| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "eval_steps": 500, | |
| "global_step": 2113, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "entropy": 1.9664676249027253, | |
| "epoch": 0.004733167672464797, | |
| "grad_norm": 2.9172720909118652, | |
| "learning_rate": 8.490566037735849e-07, | |
| "loss": 2.4508, | |
| "mean_token_accuracy": 0.5263931974768639, | |
| "num_tokens": 3879417.0, | |
| "step": 10 | |
| }, | |
| { | |
| "entropy": 2.0464825958013533, | |
| "epoch": 0.009466335344929595, | |
| "grad_norm": 0.7783076763153076, | |
| "learning_rate": 1.7924528301886793e-06, | |
| "loss": 2.2632, | |
| "mean_token_accuracy": 0.5380982637405396, | |
| "num_tokens": 7799425.0, | |
| "step": 20 | |
| }, | |
| { | |
| "entropy": 2.0156721025705338, | |
| "epoch": 0.014199503017394392, | |
| "grad_norm": 0.38891270756721497, | |
| "learning_rate": 2.7358490566037738e-06, | |
| "loss": 2.0189, | |
| "mean_token_accuracy": 0.568930983543396, | |
| "num_tokens": 11646570.0, | |
| "step": 30 | |
| }, | |
| { | |
| "entropy": 1.8222261607646941, | |
| "epoch": 0.01893267068985919, | |
| "grad_norm": 0.2623351812362671, | |
| "learning_rate": 3.679245283018868e-06, | |
| "loss": 1.8679, | |
| "mean_token_accuracy": 0.5922655925154686, | |
| "num_tokens": 15549095.0, | |
| "step": 40 | |
| }, | |
| { | |
| "entropy": 1.7781452298164369, | |
| "epoch": 0.023665838362323985, | |
| "grad_norm": 0.19355912506580353, | |
| "learning_rate": 4.622641509433963e-06, | |
| "loss": 1.7967, | |
| "mean_token_accuracy": 0.6030044049024582, | |
| "num_tokens": 19395583.0, | |
| "step": 50 | |
| }, | |
| { | |
| "entropy": 1.756221604347229, | |
| "epoch": 0.028399006034788784, | |
| "grad_norm": 0.17072290182113647, | |
| "learning_rate": 5.566037735849057e-06, | |
| "loss": 1.7705, | |
| "mean_token_accuracy": 0.6082265704870224, | |
| "num_tokens": 23287255.0, | |
| "step": 60 | |
| }, | |
| { | |
| "entropy": 1.721426972746849, | |
| "epoch": 0.033132173707253576, | |
| "grad_norm": 0.18896205723285675, | |
| "learning_rate": 6.5094339622641515e-06, | |
| "loss": 1.7402, | |
| "mean_token_accuracy": 0.6131706237792969, | |
| "num_tokens": 27155383.0, | |
| "step": 70 | |
| }, | |
| { | |
| "entropy": 1.720760390162468, | |
| "epoch": 0.03786534137971838, | |
| "grad_norm": 0.16523988544940948, | |
| "learning_rate": 7.452830188679246e-06, | |
| "loss": 1.7276, | |
| "mean_token_accuracy": 0.615737484395504, | |
| "num_tokens": 31071816.0, | |
| "step": 80 | |
| }, | |
| { | |
| "entropy": 1.696010684967041, | |
| "epoch": 0.042598509052183174, | |
| "grad_norm": 0.17002570629119873, | |
| "learning_rate": 8.39622641509434e-06, | |
| "loss": 1.7075, | |
| "mean_token_accuracy": 0.6187006324529648, | |
| "num_tokens": 34979450.0, | |
| "step": 90 | |
| }, | |
| { | |
| "entropy": 1.6817427426576614, | |
| "epoch": 0.04733167672464797, | |
| "grad_norm": 0.1671409159898758, | |
| "learning_rate": 9.339622641509435e-06, | |
| "loss": 1.689, | |
| "mean_token_accuracy": 0.6216033041477204, | |
| "num_tokens": 38828299.0, | |
| "step": 100 | |
| }, | |
| { | |
| "entropy": 1.6723841279745102, | |
| "epoch": 0.052064844397112765, | |
| "grad_norm": 0.1683770716190338, | |
| "learning_rate": 9.999944870161476e-06, | |
| "loss": 1.683, | |
| "mean_token_accuracy": 0.6222904548048973, | |
| "num_tokens": 42710858.0, | |
| "step": 110 | |
| }, | |
| { | |
| "entropy": 1.6607272058725357, | |
| "epoch": 0.05679801206957757, | |
| "grad_norm": 0.16289345920085907, | |
| "learning_rate": 9.998964817962902e-06, | |
| "loss": 1.6697, | |
| "mean_token_accuracy": 0.6255089312791824, | |
| "num_tokens": 46563041.0, | |
| "step": 120 | |
| }, | |
| { | |
| "entropy": 1.6530560374259948, | |
| "epoch": 0.06153117974204236, | |
| "grad_norm": 0.1647324562072754, | |
| "learning_rate": 9.996759934642046e-06, | |
| "loss": 1.66, | |
| "mean_token_accuracy": 0.6271017372608185, | |
| "num_tokens": 50473774.0, | |
| "step": 130 | |
| }, | |
| { | |
| "entropy": 1.646892574429512, | |
| "epoch": 0.06626434741450715, | |
| "grad_norm": 0.1683024764060974, | |
| "learning_rate": 9.993330760432702e-06, | |
| "loss": 1.6506, | |
| "mean_token_accuracy": 0.6288532495498658, | |
| "num_tokens": 54345703.0, | |
| "step": 140 | |
| }, | |
| { | |
| "entropy": 1.6421253114938736, | |
| "epoch": 0.07099751508697195, | |
| "grad_norm": 0.16962556540966034, | |
| "learning_rate": 9.98867813554068e-06, | |
| "loss": 1.649, | |
| "mean_token_accuracy": 0.6289903283119201, | |
| "num_tokens": 58224397.0, | |
| "step": 150 | |
| }, | |
| { | |
| "entropy": 1.6290255516767502, | |
| "epoch": 0.07573068275943676, | |
| "grad_norm": 0.16403813660144806, | |
| "learning_rate": 9.98280319993795e-06, | |
| "loss": 1.6341, | |
| "mean_token_accuracy": 0.6317014634609223, | |
| "num_tokens": 62114712.0, | |
| "step": 160 | |
| }, | |
| { | |
| "entropy": 1.6350149989128113, | |
| "epoch": 0.08046385043190155, | |
| "grad_norm": 0.16714808344841003, | |
| "learning_rate": 9.975707393083328e-06, | |
| "loss": 1.6379, | |
| "mean_token_accuracy": 0.6297940164804459, | |
| "num_tokens": 65991061.0, | |
| "step": 170 | |
| }, | |
| { | |
| "entropy": 1.6163844734430313, | |
| "epoch": 0.08519701810436635, | |
| "grad_norm": 0.17177364230155945, | |
| "learning_rate": 9.967392453569775e-06, | |
| "loss": 1.6222, | |
| "mean_token_accuracy": 0.6339128240942955, | |
| "num_tokens": 69856575.0, | |
| "step": 180 | |
| }, | |
| { | |
| "entropy": 1.6292197763919831, | |
| "epoch": 0.08993018577683115, | |
| "grad_norm": 0.16445177793502808, | |
| "learning_rate": 9.95786041869843e-06, | |
| "loss": 1.6316, | |
| "mean_token_accuracy": 0.6325667470693588, | |
| "num_tokens": 73718253.0, | |
| "step": 190 | |
| }, | |
| { | |
| "entropy": 1.6217930316925049, | |
| "epoch": 0.09466335344929594, | |
| "grad_norm": 0.15951167047023773, | |
| "learning_rate": 9.947113623979423e-06, | |
| "loss": 1.6253, | |
| "mean_token_accuracy": 0.6325555801391601, | |
| "num_tokens": 77599124.0, | |
| "step": 200 | |
| }, | |
| { | |
| "entropy": 1.6170487314462663, | |
| "epoch": 0.09939652112176074, | |
| "grad_norm": 0.15958699584007263, | |
| "learning_rate": 9.935154702559637e-06, | |
| "loss": 1.6246, | |
| "mean_token_accuracy": 0.6333600997924804, | |
| "num_tokens": 81539199.0, | |
| "step": 210 | |
| }, | |
| { | |
| "entropy": 1.6078982055187225, | |
| "epoch": 0.10412968879422553, | |
| "grad_norm": 0.16408687829971313, | |
| "learning_rate": 9.921986584577545e-06, | |
| "loss": 1.6135, | |
| "mean_token_accuracy": 0.6353577077388763, | |
| "num_tokens": 85404474.0, | |
| "step": 220 | |
| }, | |
| { | |
| "entropy": 1.6178746074438095, | |
| "epoch": 0.10886285646669033, | |
| "grad_norm": 0.16022811830043793, | |
| "learning_rate": 9.90761249644528e-06, | |
| "loss": 1.6173, | |
| "mean_token_accuracy": 0.6340486526489257, | |
| "num_tokens": 89310569.0, | |
| "step": 230 | |
| }, | |
| { | |
| "entropy": 1.6118096917867661, | |
| "epoch": 0.11359602413915514, | |
| "grad_norm": 0.16375407576560974, | |
| "learning_rate": 9.8920359600581e-06, | |
| "loss": 1.6195, | |
| "mean_token_accuracy": 0.6345633432269097, | |
| "num_tokens": 93225240.0, | |
| "step": 240 | |
| }, | |
| { | |
| "entropy": 1.6142515003681184, | |
| "epoch": 0.11832919181161992, | |
| "grad_norm": 0.16436107456684113, | |
| "learning_rate": 9.875260791931474e-06, | |
| "loss": 1.6154, | |
| "mean_token_accuracy": 0.6353823691606522, | |
| "num_tokens": 97106446.0, | |
| "step": 250 | |
| }, | |
| { | |
| "entropy": 1.6073731541633607, | |
| "epoch": 0.12306235948408473, | |
| "grad_norm": 0.16237056255340576, | |
| "learning_rate": 9.85729110226596e-06, | |
| "loss": 1.6087, | |
| "mean_token_accuracy": 0.6353139713406563, | |
| "num_tokens": 100977975.0, | |
| "step": 260 | |
| }, | |
| { | |
| "entropy": 1.6020812511444091, | |
| "epoch": 0.12779552715654952, | |
| "grad_norm": 0.15944795310497284, | |
| "learning_rate": 9.838131293940148e-06, | |
| "loss": 1.6071, | |
| "mean_token_accuracy": 0.6363679498434067, | |
| "num_tokens": 104876425.0, | |
| "step": 270 | |
| }, | |
| { | |
| "entropy": 1.6033865660429, | |
| "epoch": 0.1325286948290143, | |
| "grad_norm": 0.17549684643745422, | |
| "learning_rate": 9.817786061431874e-06, | |
| "loss": 1.6091, | |
| "mean_token_accuracy": 0.6354461714625359, | |
| "num_tokens": 108709842.0, | |
| "step": 280 | |
| }, | |
| { | |
| "entropy": 1.5905151784420013, | |
| "epoch": 0.13726186250147912, | |
| "grad_norm": 0.1742265522480011, | |
| "learning_rate": 9.79626038966799e-06, | |
| "loss": 1.596, | |
| "mean_token_accuracy": 0.6382940918207168, | |
| "num_tokens": 112609412.0, | |
| "step": 290 | |
| }, | |
| { | |
| "entropy": 1.5923225700855255, | |
| "epoch": 0.1419950301739439, | |
| "grad_norm": 0.16349110007286072, | |
| "learning_rate": 9.773559552802982e-06, | |
| "loss": 1.5926, | |
| "mean_token_accuracy": 0.6394994035363197, | |
| "num_tokens": 116498424.0, | |
| "step": 300 | |
| }, | |
| { | |
| "entropy": 1.5896567821502685, | |
| "epoch": 0.1467281978464087, | |
| "grad_norm": 0.1612998992204666, | |
| "learning_rate": 9.749689112926701e-06, | |
| "loss": 1.5973, | |
| "mean_token_accuracy": 0.6379290148615837, | |
| "num_tokens": 120371695.0, | |
| "step": 310 | |
| }, | |
| { | |
| "entropy": 1.596482390165329, | |
| "epoch": 0.15146136551887351, | |
| "grad_norm": 0.16588379442691803, | |
| "learning_rate": 9.724654918701568e-06, | |
| "loss": 1.6006, | |
| "mean_token_accuracy": 0.6373828530311585, | |
| "num_tokens": 124255939.0, | |
| "step": 320 | |
| }, | |
| { | |
| "entropy": 1.584341713786125, | |
| "epoch": 0.1561945331913383, | |
| "grad_norm": 0.16518791019916534, | |
| "learning_rate": 9.698463103929542e-06, | |
| "loss": 1.5887, | |
| "mean_token_accuracy": 0.6403212890028953, | |
| "num_tokens": 128177070.0, | |
| "step": 330 | |
| }, | |
| { | |
| "entropy": 1.5725542455911636, | |
| "epoch": 0.1609277008638031, | |
| "grad_norm": 0.16715767979621887, | |
| "learning_rate": 9.671120086049246e-06, | |
| "loss": 1.5799, | |
| "mean_token_accuracy": 0.6416747942566872, | |
| "num_tokens": 132121810.0, | |
| "step": 340 | |
| }, | |
| { | |
| "entropy": 1.5927457183599472, | |
| "epoch": 0.1656608685362679, | |
| "grad_norm": 0.17323796451091766, | |
| "learning_rate": 9.642632564563576e-06, | |
| "loss": 1.5939, | |
| "mean_token_accuracy": 0.6393081307411194, | |
| "num_tokens": 136012804.0, | |
| "step": 350 | |
| }, | |
| { | |
| "entropy": 1.5822071313858033, | |
| "epoch": 0.1703940362087327, | |
| "grad_norm": 0.16068372130393982, | |
| "learning_rate": 9.61300751939821e-06, | |
| "loss": 1.583, | |
| "mean_token_accuracy": 0.6409055680036545, | |
| "num_tokens": 139940284.0, | |
| "step": 360 | |
| }, | |
| { | |
| "entropy": 1.568736445903778, | |
| "epoch": 0.17512720388119749, | |
| "grad_norm": 0.16828852891921997, | |
| "learning_rate": 9.582252209191417e-06, | |
| "loss": 1.5788, | |
| "mean_token_accuracy": 0.6417408421635628, | |
| "num_tokens": 143849853.0, | |
| "step": 370 | |
| }, | |
| { | |
| "entropy": 1.5762336939573287, | |
| "epoch": 0.1798603715536623, | |
| "grad_norm": 0.1591760367155075, | |
| "learning_rate": 9.550374169515557e-06, | |
| "loss": 1.5863, | |
| "mean_token_accuracy": 0.6399773553013801, | |
| "num_tokens": 147708328.0, | |
| "step": 380 | |
| }, | |
| { | |
| "entropy": 1.5851975083351135, | |
| "epoch": 0.1845935392261271, | |
| "grad_norm": 0.16111813485622406, | |
| "learning_rate": 9.517381211030745e-06, | |
| "loss": 1.5859, | |
| "mean_token_accuracy": 0.6406419515609741, | |
| "num_tokens": 151541012.0, | |
| "step": 390 | |
| }, | |
| { | |
| "entropy": 1.5762753903865814, | |
| "epoch": 0.18932670689859188, | |
| "grad_norm": 0.1650865077972412, | |
| "learning_rate": 9.4832814175711e-06, | |
| "loss": 1.583, | |
| "mean_token_accuracy": 0.6416757836937904, | |
| "num_tokens": 155434817.0, | |
| "step": 400 | |
| }, | |
| { | |
| "entropy": 1.5776998460292817, | |
| "epoch": 0.19405987457105667, | |
| "grad_norm": 0.1527196615934372, | |
| "learning_rate": 9.448083144164078e-06, | |
| "loss": 1.5798, | |
| "mean_token_accuracy": 0.6412320747971535, | |
| "num_tokens": 159311737.0, | |
| "step": 410 | |
| }, | |
| { | |
| "entropy": 1.5809161812067032, | |
| "epoch": 0.19879304224352148, | |
| "grad_norm": 0.16029658913612366, | |
| "learning_rate": 9.411795014983338e-06, | |
| "loss": 1.5847, | |
| "mean_token_accuracy": 0.64069664478302, | |
| "num_tokens": 163185815.0, | |
| "step": 420 | |
| }, | |
| { | |
| "entropy": 1.582759228348732, | |
| "epoch": 0.20352620991598627, | |
| "grad_norm": 0.15568074584007263, | |
| "learning_rate": 9.374425921235684e-06, | |
| "loss": 1.5865, | |
| "mean_token_accuracy": 0.6403465285897255, | |
| "num_tokens": 167027904.0, | |
| "step": 430 | |
| }, | |
| { | |
| "entropy": 1.5735624372959136, | |
| "epoch": 0.20825937758845106, | |
| "grad_norm": 0.15200895071029663, | |
| "learning_rate": 9.33598501898256e-06, | |
| "loss": 1.5796, | |
| "mean_token_accuracy": 0.6411942645907402, | |
| "num_tokens": 170922541.0, | |
| "step": 440 | |
| }, | |
| { | |
| "entropy": 1.573996353149414, | |
| "epoch": 0.21299254526091588, | |
| "grad_norm": 0.15826307237148285, | |
| "learning_rate": 9.296481726896663e-06, | |
| "loss": 1.5784, | |
| "mean_token_accuracy": 0.6419912785291672, | |
| "num_tokens": 174791554.0, | |
| "step": 450 | |
| }, | |
| { | |
| "entropy": 1.573242038488388, | |
| "epoch": 0.21772571293338067, | |
| "grad_norm": 0.16425147652626038, | |
| "learning_rate": 9.25592572395421e-06, | |
| "loss": 1.5741, | |
| "mean_token_accuracy": 0.6426722049713135, | |
| "num_tokens": 178691330.0, | |
| "step": 460 | |
| }, | |
| { | |
| "entropy": 1.5621694713830947, | |
| "epoch": 0.22245888060584545, | |
| "grad_norm": 0.15976452827453613, | |
| "learning_rate": 9.214326947063424e-06, | |
| "loss": 1.569, | |
| "mean_token_accuracy": 0.6429914653301239, | |
| "num_tokens": 182540865.0, | |
| "step": 470 | |
| }, | |
| { | |
| "entropy": 1.5637736171483994, | |
| "epoch": 0.22719204827831027, | |
| "grad_norm": 0.15400023758411407, | |
| "learning_rate": 9.171695588629818e-06, | |
| "loss": 1.5669, | |
| "mean_token_accuracy": 0.6447574943304062, | |
| "num_tokens": 186445682.0, | |
| "step": 480 | |
| }, | |
| { | |
| "entropy": 1.5614915072917939, | |
| "epoch": 0.23192521595077506, | |
| "grad_norm": 0.1612849235534668, | |
| "learning_rate": 9.128042094058892e-06, | |
| "loss": 1.5661, | |
| "mean_token_accuracy": 0.6434317573904991, | |
| "num_tokens": 190298343.0, | |
| "step": 490 | |
| }, | |
| { | |
| "entropy": 1.57402603328228, | |
| "epoch": 0.23665838362323985, | |
| "grad_norm": 0.1595270037651062, | |
| "learning_rate": 9.083377159196824e-06, | |
| "loss": 1.5783, | |
| "mean_token_accuracy": 0.6416443169116974, | |
| "num_tokens": 194193930.0, | |
| "step": 500 | |
| }, | |
| { | |
| "entropy": 1.5727292507886887, | |
| "epoch": 0.24139155129570464, | |
| "grad_norm": 0.1542680561542511, | |
| "learning_rate": 9.037711727709812e-06, | |
| "loss": 1.5708, | |
| "mean_token_accuracy": 0.6426661461591721, | |
| "num_tokens": 198099362.0, | |
| "step": 510 | |
| }, | |
| { | |
| "entropy": 1.5556272625923158, | |
| "epoch": 0.24612471896816945, | |
| "grad_norm": 0.16021884977817535, | |
| "learning_rate": 8.991056988402682e-06, | |
| "loss": 1.5594, | |
| "mean_token_accuracy": 0.6437688574194909, | |
| "num_tokens": 201971348.0, | |
| "step": 520 | |
| }, | |
| { | |
| "entropy": 1.561934667825699, | |
| "epoch": 0.25085788664063424, | |
| "grad_norm": 0.160409614443779, | |
| "learning_rate": 8.943424372477455e-06, | |
| "loss": 1.5663, | |
| "mean_token_accuracy": 0.6441657856106758, | |
| "num_tokens": 205887553.0, | |
| "step": 530 | |
| }, | |
| { | |
| "entropy": 1.5650932669639588, | |
| "epoch": 0.25559105431309903, | |
| "grad_norm": 0.16672302782535553, | |
| "learning_rate": 8.894825550732491e-06, | |
| "loss": 1.5651, | |
| "mean_token_accuracy": 0.6445234566926956, | |
| "num_tokens": 209773471.0, | |
| "step": 540 | |
| }, | |
| { | |
| "entropy": 1.5703383475542068, | |
| "epoch": 0.2603242219855638, | |
| "grad_norm": 0.1647508144378662, | |
| "learning_rate": 8.845272430702962e-06, | |
| "loss": 1.5719, | |
| "mean_token_accuracy": 0.6425846204161644, | |
| "num_tokens": 213667761.0, | |
| "step": 550 | |
| }, | |
| { | |
| "entropy": 1.5690355956554414, | |
| "epoch": 0.2650573896580286, | |
| "grad_norm": 0.16149023175239563, | |
| "learning_rate": 8.79477715374329e-06, | |
| "loss": 1.5733, | |
| "mean_token_accuracy": 0.6426513850688934, | |
| "num_tokens": 217473103.0, | |
| "step": 560 | |
| }, | |
| { | |
| "entropy": 1.565832221508026, | |
| "epoch": 0.26979055733049345, | |
| "grad_norm": 0.1554195135831833, | |
| "learning_rate": 8.743352092052326e-06, | |
| "loss": 1.568, | |
| "mean_token_accuracy": 0.6430184543132782, | |
| "num_tokens": 221365501.0, | |
| "step": 570 | |
| }, | |
| { | |
| "entropy": 1.562074252963066, | |
| "epoch": 0.27452372500295824, | |
| "grad_norm": 0.16073721647262573, | |
| "learning_rate": 8.691009845641931e-06, | |
| "loss": 1.5701, | |
| "mean_token_accuracy": 0.6420939102768898, | |
| "num_tokens": 225235652.0, | |
| "step": 580 | |
| }, | |
| { | |
| "entropy": 1.5577911257743835, | |
| "epoch": 0.27925689267542303, | |
| "grad_norm": 0.16031447052955627, | |
| "learning_rate": 8.637763239249778e-06, | |
| "loss": 1.5627, | |
| "mean_token_accuracy": 0.6444457039237023, | |
| "num_tokens": 229134110.0, | |
| "step": 590 | |
| }, | |
| { | |
| "entropy": 1.556574183702469, | |
| "epoch": 0.2839900603478878, | |
| "grad_norm": 0.1577136367559433, | |
| "learning_rate": 8.583625319197063e-06, | |
| "loss": 1.5565, | |
| "mean_token_accuracy": 0.6455687090754509, | |
| "num_tokens": 233039906.0, | |
| "step": 600 | |
| }, | |
| { | |
| "entropy": 1.5509059250354766, | |
| "epoch": 0.2887232280203526, | |
| "grad_norm": 0.15498368442058563, | |
| "learning_rate": 8.528609350191938e-06, | |
| "loss": 1.558, | |
| "mean_token_accuracy": 0.6440837427973747, | |
| "num_tokens": 236877041.0, | |
| "step": 610 | |
| }, | |
| { | |
| "entropy": 1.5675470024347304, | |
| "epoch": 0.2934563956928174, | |
| "grad_norm": 0.16251349449157715, | |
| "learning_rate": 8.472728812079436e-06, | |
| "loss": 1.5698, | |
| "mean_token_accuracy": 0.6437425896525383, | |
| "num_tokens": 240760840.0, | |
| "step": 620 | |
| }, | |
| { | |
| "entropy": 1.5483269661664962, | |
| "epoch": 0.29818956336528224, | |
| "grad_norm": 0.16416430473327637, | |
| "learning_rate": 8.415997396538671e-06, | |
| "loss": 1.5516, | |
| "mean_token_accuracy": 0.6461272791028023, | |
| "num_tokens": 244646788.0, | |
| "step": 630 | |
| }, | |
| { | |
| "entropy": 1.5591329038143158, | |
| "epoch": 0.30292273103774703, | |
| "grad_norm": 0.15344582498073578, | |
| "learning_rate": 8.358429003728158e-06, | |
| "loss": 1.5631, | |
| "mean_token_accuracy": 0.6448241740465164, | |
| "num_tokens": 248505578.0, | |
| "step": 640 | |
| }, | |
| { | |
| "entropy": 1.5571163922548295, | |
| "epoch": 0.3076558987102118, | |
| "grad_norm": 0.16142602264881134, | |
| "learning_rate": 8.30003773888003e-06, | |
| "loss": 1.561, | |
| "mean_token_accuracy": 0.6446821197867394, | |
| "num_tokens": 252442387.0, | |
| "step": 650 | |
| }, | |
| { | |
| "entropy": 1.543628853559494, | |
| "epoch": 0.3123890663826766, | |
| "grad_norm": 0.16222691535949707, | |
| "learning_rate": 8.240837908844011e-06, | |
| "loss": 1.5465, | |
| "mean_token_accuracy": 0.6476170778274536, | |
| "num_tokens": 256347145.0, | |
| "step": 660 | |
| }, | |
| { | |
| "entropy": 1.5412938565015792, | |
| "epoch": 0.3171222340551414, | |
| "grad_norm": 0.1638648957014084, | |
| "learning_rate": 8.180844018582014e-06, | |
| "loss": 1.5483, | |
| "mean_token_accuracy": 0.6471631437540054, | |
| "num_tokens": 260255832.0, | |
| "step": 670 | |
| }, | |
| { | |
| "entropy": 1.5546195566654206, | |
| "epoch": 0.3218554017276062, | |
| "grad_norm": 0.1579921394586563, | |
| "learning_rate": 8.12007076761416e-06, | |
| "loss": 1.5539, | |
| "mean_token_accuracy": 0.6463906392455101, | |
| "num_tokens": 264145360.0, | |
| "step": 680 | |
| }, | |
| { | |
| "entropy": 1.538521057367325, | |
| "epoch": 0.32658856940007097, | |
| "grad_norm": 0.15421803295612335, | |
| "learning_rate": 8.05853304641716e-06, | |
| "loss": 1.5499, | |
| "mean_token_accuracy": 0.6467000633478165, | |
| "num_tokens": 268031581.0, | |
| "step": 690 | |
| }, | |
| { | |
| "entropy": 1.5507268905639648, | |
| "epoch": 0.3313217370725358, | |
| "grad_norm": 0.16604824364185333, | |
| "learning_rate": 7.996245932775883e-06, | |
| "loss": 1.5511, | |
| "mean_token_accuracy": 0.6466175228357315, | |
| "num_tokens": 271952881.0, | |
| "step": 700 | |
| }, | |
| { | |
| "entropy": 1.5473167181015015, | |
| "epoch": 0.3360549047450006, | |
| "grad_norm": 0.16262564063072205, | |
| "learning_rate": 7.933224688089059e-06, | |
| "loss": 1.5454, | |
| "mean_token_accuracy": 0.6477824598550797, | |
| "num_tokens": 275858331.0, | |
| "step": 710 | |
| }, | |
| { | |
| "entropy": 1.5481310993433, | |
| "epoch": 0.3407880724174654, | |
| "grad_norm": 0.1532575786113739, | |
| "learning_rate": 7.869484753629963e-06, | |
| "loss": 1.5541, | |
| "mean_token_accuracy": 0.6460428267717362, | |
| "num_tokens": 279739647.0, | |
| "step": 720 | |
| }, | |
| { | |
| "entropy": 1.5434869408607483, | |
| "epoch": 0.3455212400899302, | |
| "grad_norm": 0.1606515794992447, | |
| "learning_rate": 7.805041746763052e-06, | |
| "loss": 1.5495, | |
| "mean_token_accuracy": 0.6475459963083268, | |
| "num_tokens": 283656822.0, | |
| "step": 730 | |
| }, | |
| { | |
| "entropy": 1.550658145546913, | |
| "epoch": 0.35025440776239497, | |
| "grad_norm": 0.16676083207130432, | |
| "learning_rate": 7.739911457117437e-06, | |
| "loss": 1.5461, | |
| "mean_token_accuracy": 0.6471642255783081, | |
| "num_tokens": 287491081.0, | |
| "step": 740 | |
| }, | |
| { | |
| "entropy": 1.5445130974054337, | |
| "epoch": 0.35498757543485976, | |
| "grad_norm": 0.15253710746765137, | |
| "learning_rate": 7.674109842718163e-06, | |
| "loss": 1.5545, | |
| "mean_token_accuracy": 0.646132855117321, | |
| "num_tokens": 291367807.0, | |
| "step": 750 | |
| }, | |
| { | |
| "entropy": 1.5350563913583755, | |
| "epoch": 0.3597207431073246, | |
| "grad_norm": 0.16385188698768616, | |
| "learning_rate": 7.607653026076218e-06, | |
| "loss": 1.5395, | |
| "mean_token_accuracy": 0.6482170835137367, | |
| "num_tokens": 295210867.0, | |
| "step": 760 | |
| }, | |
| { | |
| "entropy": 1.5343231499195098, | |
| "epoch": 0.3644539107797894, | |
| "grad_norm": 0.1605066955089569, | |
| "learning_rate": 7.540557290238251e-06, | |
| "loss": 1.5386, | |
| "mean_token_accuracy": 0.648663455247879, | |
| "num_tokens": 299093246.0, | |
| "step": 770 | |
| }, | |
| { | |
| "entropy": 1.5489379942417145, | |
| "epoch": 0.3691870784522542, | |
| "grad_norm": 0.16125887632369995, | |
| "learning_rate": 7.4728390747969385e-06, | |
| "loss": 1.554, | |
| "mean_token_accuracy": 0.646386744081974, | |
| "num_tokens": 302979174.0, | |
| "step": 780 | |
| }, | |
| { | |
| "entropy": 1.5436400204896927, | |
| "epoch": 0.37392024612471897, | |
| "grad_norm": 0.16131755709648132, | |
| "learning_rate": 7.404514971863015e-06, | |
| "loss": 1.5547, | |
| "mean_token_accuracy": 0.6458986714482308, | |
| "num_tokens": 306861334.0, | |
| "step": 790 | |
| }, | |
| { | |
| "entropy": 1.5432285010814666, | |
| "epoch": 0.37865341379718376, | |
| "grad_norm": 0.15674395859241486, | |
| "learning_rate": 7.3356017219999236e-06, | |
| "loss": 1.543, | |
| "mean_token_accuracy": 0.6479764103889465, | |
| "num_tokens": 310763957.0, | |
| "step": 800 | |
| }, | |
| { | |
| "entropy": 1.5451003342866898, | |
| "epoch": 0.38338658146964855, | |
| "grad_norm": 0.1598990261554718, | |
| "learning_rate": 7.2661162101220895e-06, | |
| "loss": 1.5494, | |
| "mean_token_accuracy": 0.6463793829083443, | |
| "num_tokens": 314645121.0, | |
| "step": 810 | |
| }, | |
| { | |
| "entropy": 1.5456315219402312, | |
| "epoch": 0.38811974914211333, | |
| "grad_norm": 0.15369856357574463, | |
| "learning_rate": 7.196075461357831e-06, | |
| "loss": 1.5488, | |
| "mean_token_accuracy": 0.6465605661273003, | |
| "num_tokens": 318500691.0, | |
| "step": 820 | |
| }, | |
| { | |
| "entropy": 1.5482069969177246, | |
| "epoch": 0.3928529168145782, | |
| "grad_norm": 0.1560712456703186, | |
| "learning_rate": 7.125496636877922e-06, | |
| "loss": 1.5473, | |
| "mean_token_accuracy": 0.6471294403076172, | |
| "num_tokens": 322430307.0, | |
| "step": 830 | |
| }, | |
| { | |
| "entropy": 1.527587327361107, | |
| "epoch": 0.39758608448704297, | |
| "grad_norm": 0.154985710978508, | |
| "learning_rate": 7.054397029690802e-06, | |
| "loss": 1.5341, | |
| "mean_token_accuracy": 0.6498543590307235, | |
| "num_tokens": 326334568.0, | |
| "step": 840 | |
| }, | |
| { | |
| "entropy": 1.5514892965555191, | |
| "epoch": 0.40231925215950776, | |
| "grad_norm": 0.15923038125038147, | |
| "learning_rate": 6.982794060405502e-06, | |
| "loss": 1.5525, | |
| "mean_token_accuracy": 0.6457523703575134, | |
| "num_tokens": 330201587.0, | |
| "step": 850 | |
| }, | |
| { | |
| "entropy": 1.533634251356125, | |
| "epoch": 0.40705241983197255, | |
| "grad_norm": 0.16257378458976746, | |
| "learning_rate": 6.910705272963307e-06, | |
| "loss": 1.5412, | |
| "mean_token_accuracy": 0.6487752512097359, | |
| "num_tokens": 334119000.0, | |
| "step": 860 | |
| }, | |
| { | |
| "entropy": 1.5422323346138, | |
| "epoch": 0.41178558750443733, | |
| "grad_norm": 0.15663348138332367, | |
| "learning_rate": 6.8381483303391795e-06, | |
| "loss": 1.5437, | |
| "mean_token_accuracy": 0.6477985471487046, | |
| "num_tokens": 337973430.0, | |
| "step": 870 | |
| }, | |
| { | |
| "entropy": 1.5455356657505035, | |
| "epoch": 0.4165187551769021, | |
| "grad_norm": 0.15920013189315796, | |
| "learning_rate": 6.765141010214038e-06, | |
| "loss": 1.5461, | |
| "mean_token_accuracy": 0.6477194577455521, | |
| "num_tokens": 341848262.0, | |
| "step": 880 | |
| }, | |
| { | |
| "entropy": 1.5300615668296813, | |
| "epoch": 0.42125192284936697, | |
| "grad_norm": 0.15260560810565948, | |
| "learning_rate": 6.691701200618925e-06, | |
| "loss": 1.5355, | |
| "mean_token_accuracy": 0.6492922276258468, | |
| "num_tokens": 345741246.0, | |
| "step": 890 | |
| }, | |
| { | |
| "entropy": 1.5360501438379288, | |
| "epoch": 0.42598509052183176, | |
| "grad_norm": 0.1613568812608719, | |
| "learning_rate": 6.617846895552137e-06, | |
| "loss": 1.5409, | |
| "mean_token_accuracy": 0.6486483410000801, | |
| "num_tokens": 349581218.0, | |
| "step": 900 | |
| }, | |
| { | |
| "entropy": 1.5319293648004533, | |
| "epoch": 0.43071825819429654, | |
| "grad_norm": 0.16074907779693604, | |
| "learning_rate": 6.543596190570381e-06, | |
| "loss": 1.5406, | |
| "mean_token_accuracy": 0.6477162018418312, | |
| "num_tokens": 353439070.0, | |
| "step": 910 | |
| }, | |
| { | |
| "entropy": 1.5505404442548751, | |
| "epoch": 0.43545142586676133, | |
| "grad_norm": 0.1633734107017517, | |
| "learning_rate": 6.4689672783550715e-06, | |
| "loss": 1.551, | |
| "mean_token_accuracy": 0.6463420912623405, | |
| "num_tokens": 357338828.0, | |
| "step": 920 | |
| }, | |
| { | |
| "entropy": 1.5329742044210435, | |
| "epoch": 0.4401845935392261, | |
| "grad_norm": 0.1549346148967743, | |
| "learning_rate": 6.393978444254798e-06, | |
| "loss": 1.5368, | |
| "mean_token_accuracy": 0.6490220680832863, | |
| "num_tokens": 361226544.0, | |
| "step": 930 | |
| }, | |
| { | |
| "entropy": 1.5339823961257935, | |
| "epoch": 0.4449177612116909, | |
| "grad_norm": 0.15460601449012756, | |
| "learning_rate": 6.3186480618051215e-06, | |
| "loss": 1.5398, | |
| "mean_token_accuracy": 0.6480798035860061, | |
| "num_tokens": 365067789.0, | |
| "step": 940 | |
| }, | |
| { | |
| "entropy": 1.5356625199317933, | |
| "epoch": 0.4496509288841557, | |
| "grad_norm": 0.1592043936252594, | |
| "learning_rate": 6.242994588226731e-06, | |
| "loss": 1.5378, | |
| "mean_token_accuracy": 0.6489008024334908, | |
| "num_tokens": 368967569.0, | |
| "step": 950 | |
| }, | |
| { | |
| "entropy": 1.532603034377098, | |
| "epoch": 0.45438409655662054, | |
| "grad_norm": 0.15242810547351837, | |
| "learning_rate": 6.1670365599031215e-06, | |
| "loss": 1.5352, | |
| "mean_token_accuracy": 0.649759191274643, | |
| "num_tokens": 372857431.0, | |
| "step": 960 | |
| }, | |
| { | |
| "entropy": 1.5232465624809266, | |
| "epoch": 0.45911726422908533, | |
| "grad_norm": 0.1570185422897339, | |
| "learning_rate": 6.090792587838867e-06, | |
| "loss": 1.5277, | |
| "mean_token_accuracy": 0.6510468333959579, | |
| "num_tokens": 376736423.0, | |
| "step": 970 | |
| }, | |
| { | |
| "entropy": 1.5315758377313613, | |
| "epoch": 0.4638504319015501, | |
| "grad_norm": 0.15817788243293762, | |
| "learning_rate": 6.014281353099601e-06, | |
| "loss": 1.5306, | |
| "mean_token_accuracy": 0.6500695019960403, | |
| "num_tokens": 380611456.0, | |
| "step": 980 | |
| }, | |
| { | |
| "entropy": 1.5315613538026809, | |
| "epoch": 0.4685835995740149, | |
| "grad_norm": 0.16947001218795776, | |
| "learning_rate": 5.937521602234842e-06, | |
| "loss": 1.5368, | |
| "mean_token_accuracy": 0.6481669098138809, | |
| "num_tokens": 384494701.0, | |
| "step": 990 | |
| }, | |
| { | |
| "entropy": 1.541380015015602, | |
| "epoch": 0.4733167672464797, | |
| "grad_norm": 0.1462167650461197, | |
| "learning_rate": 5.8605321426847795e-06, | |
| "loss": 1.5436, | |
| "mean_token_accuracy": 0.6473192036151886, | |
| "num_tokens": 388320020.0, | |
| "step": 1000 | |
| }, | |
| { | |
| "entropy": 1.5256280928850174, | |
| "epoch": 0.4780499349189445, | |
| "grad_norm": 0.16574805974960327, | |
| "learning_rate": 5.783331838172116e-06, | |
| "loss": 1.5349, | |
| "mean_token_accuracy": 0.6488758757710457, | |
| "num_tokens": 392189228.0, | |
| "step": 1010 | |
| }, | |
| { | |
| "entropy": 1.527175721526146, | |
| "epoch": 0.4827831025914093, | |
| "grad_norm": 0.15633495151996613, | |
| "learning_rate": 5.705939604080147e-06, | |
| "loss": 1.5316, | |
| "mean_token_accuracy": 0.6492022335529327, | |
| "num_tokens": 396051728.0, | |
| "step": 1020 | |
| }, | |
| { | |
| "entropy": 1.5351572215557099, | |
| "epoch": 0.4875162702638741, | |
| "grad_norm": 0.17186181247234344, | |
| "learning_rate": 5.628374402818173e-06, | |
| "loss": 1.54, | |
| "mean_token_accuracy": 0.6493882149457931, | |
| "num_tokens": 399969073.0, | |
| "step": 1030 | |
| }, | |
| { | |
| "entropy": 1.5383628755807877, | |
| "epoch": 0.4922494379363389, | |
| "grad_norm": 0.1601661890745163, | |
| "learning_rate": 5.550655239175377e-06, | |
| "loss": 1.5395, | |
| "mean_token_accuracy": 0.6477264970541, | |
| "num_tokens": 403812928.0, | |
| "step": 1040 | |
| }, | |
| { | |
| "entropy": 1.5366960883140564, | |
| "epoch": 0.4969826056088037, | |
| "grad_norm": 0.16334351897239685, | |
| "learning_rate": 5.472801155664339e-06, | |
| "loss": 1.539, | |
| "mean_token_accuracy": 0.6490108296275139, | |
| "num_tokens": 407675957.0, | |
| "step": 1050 | |
| }, | |
| { | |
| "entropy": 1.5218679517507554, | |
| "epoch": 0.5017157732812685, | |
| "grad_norm": 0.15565453469753265, | |
| "learning_rate": 5.394831227855291e-06, | |
| "loss": 1.5295, | |
| "mean_token_accuracy": 0.64990915954113, | |
| "num_tokens": 411549219.0, | |
| "step": 1060 | |
| }, | |
| { | |
| "entropy": 1.5312676399946212, | |
| "epoch": 0.5064489409537333, | |
| "grad_norm": 0.16980209946632385, | |
| "learning_rate": 5.3167645597022855e-06, | |
| "loss": 1.5335, | |
| "mean_token_accuracy": 0.6499560788273812, | |
| "num_tokens": 415463466.0, | |
| "step": 1070 | |
| }, | |
| { | |
| "entropy": 1.5292291462421417, | |
| "epoch": 0.5111821086261981, | |
| "grad_norm": 0.1765119880437851, | |
| "learning_rate": 5.238620278862397e-06, | |
| "loss": 1.5346, | |
| "mean_token_accuracy": 0.649413350224495, | |
| "num_tokens": 419305289.0, | |
| "step": 1080 | |
| }, | |
| { | |
| "entropy": 1.5442948073148728, | |
| "epoch": 0.5159152762986629, | |
| "grad_norm": 0.1539527326822281, | |
| "learning_rate": 5.160417532009122e-06, | |
| "loss": 1.5398, | |
| "mean_token_accuracy": 0.6489475637674331, | |
| "num_tokens": 423200780.0, | |
| "step": 1090 | |
| }, | |
| { | |
| "entropy": 1.5311467558145524, | |
| "epoch": 0.5206484439711276, | |
| "grad_norm": 0.15670068562030792, | |
| "learning_rate": 5.082175480141126e-06, | |
| "loss": 1.5399, | |
| "mean_token_accuracy": 0.6483099237084389, | |
| "num_tokens": 427105709.0, | |
| "step": 1100 | |
| }, | |
| { | |
| "entropy": 1.5239627480506897, | |
| "epoch": 0.5253816116435924, | |
| "grad_norm": 0.15966466069221497, | |
| "learning_rate": 5.003913293887467e-06, | |
| "loss": 1.5261, | |
| "mean_token_accuracy": 0.6510903730988502, | |
| "num_tokens": 430984481.0, | |
| "step": 1110 | |
| }, | |
| { | |
| "entropy": 1.521809309720993, | |
| "epoch": 0.5301147793160572, | |
| "grad_norm": 0.15411856770515442, | |
| "learning_rate": 4.925650148810465e-06, | |
| "loss": 1.5239, | |
| "mean_token_accuracy": 0.6515027031302452, | |
| "num_tokens": 434868955.0, | |
| "step": 1120 | |
| }, | |
| { | |
| "entropy": 1.5308598577976227, | |
| "epoch": 0.5348479469885221, | |
| "grad_norm": 0.16343414783477783, | |
| "learning_rate": 4.847405220707378e-06, | |
| "loss": 1.537, | |
| "mean_token_accuracy": 0.6491450101137162, | |
| "num_tokens": 438789538.0, | |
| "step": 1130 | |
| }, | |
| { | |
| "entropy": 1.522998222708702, | |
| "epoch": 0.5395811146609869, | |
| "grad_norm": 0.1585935652256012, | |
| "learning_rate": 4.7691976809119835e-06, | |
| "loss": 1.5227, | |
| "mean_token_accuracy": 0.6509381130337715, | |
| "num_tokens": 442659205.0, | |
| "step": 1140 | |
| }, | |
| { | |
| "entropy": 1.5199230432510376, | |
| "epoch": 0.5443142823334517, | |
| "grad_norm": 0.15242497622966766, | |
| "learning_rate": 4.691046691597303e-06, | |
| "loss": 1.5277, | |
| "mean_token_accuracy": 0.6500678122043609, | |
| "num_tokens": 446546475.0, | |
| "step": 1150 | |
| }, | |
| { | |
| "entropy": 1.5328166007995605, | |
| "epoch": 0.5490474500059165, | |
| "grad_norm": 0.1544242948293686, | |
| "learning_rate": 4.612971401080521e-06, | |
| "loss": 1.5344, | |
| "mean_token_accuracy": 0.6496497511863708, | |
| "num_tokens": 450440495.0, | |
| "step": 1160 | |
| }, | |
| { | |
| "entropy": 1.5248778969049455, | |
| "epoch": 0.5537806176783813, | |
| "grad_norm": 0.15791839361190796, | |
| "learning_rate": 4.5349909391313384e-06, | |
| "loss": 1.529, | |
| "mean_token_accuracy": 0.6500067859888077, | |
| "num_tokens": 454368931.0, | |
| "step": 1170 | |
| }, | |
| { | |
| "entropy": 1.5288578271865845, | |
| "epoch": 0.5585137853508461, | |
| "grad_norm": 0.1565057933330536, | |
| "learning_rate": 4.457124412284849e-06, | |
| "loss": 1.5328, | |
| "mean_token_accuracy": 0.6499333620071411, | |
| "num_tokens": 458289128.0, | |
| "step": 1180 | |
| }, | |
| { | |
| "entropy": 1.5237576812505722, | |
| "epoch": 0.5632469530233108, | |
| "grad_norm": 0.16522431373596191, | |
| "learning_rate": 4.379390899160116e-06, | |
| "loss": 1.5242, | |
| "mean_token_accuracy": 0.6513824790716172, | |
| "num_tokens": 462166520.0, | |
| "step": 1190 | |
| }, | |
| { | |
| "entropy": 1.5189936101436614, | |
| "epoch": 0.5679801206957756, | |
| "grad_norm": 0.1568940430879593, | |
| "learning_rate": 4.301809445785582e-06, | |
| "loss": 1.5221, | |
| "mean_token_accuracy": 0.6510216951370239, | |
| "num_tokens": 466060024.0, | |
| "step": 1200 | |
| }, | |
| { | |
| "entropy": 1.52001011967659, | |
| "epoch": 0.5727132883682404, | |
| "grad_norm": 0.16081492602825165, | |
| "learning_rate": 4.224399060932477e-06, | |
| "loss": 1.5253, | |
| "mean_token_accuracy": 0.6509154736995697, | |
| "num_tokens": 469988704.0, | |
| "step": 1210 | |
| }, | |
| { | |
| "entropy": 1.518327683210373, | |
| "epoch": 0.5774464560407052, | |
| "grad_norm": 0.1593891978263855, | |
| "learning_rate": 4.147178711457343e-06, | |
| "loss": 1.5268, | |
| "mean_token_accuracy": 0.6499305665493011, | |
| "num_tokens": 473843546.0, | |
| "step": 1220 | |
| }, | |
| { | |
| "entropy": 1.5157315611839295, | |
| "epoch": 0.58217962371317, | |
| "grad_norm": 0.1540728062391281, | |
| "learning_rate": 4.070167317654829e-06, | |
| "loss": 1.5215, | |
| "mean_token_accuracy": 0.6516207203269004, | |
| "num_tokens": 477705517.0, | |
| "step": 1230 | |
| }, | |
| { | |
| "entropy": 1.5173570811748505, | |
| "epoch": 0.5869127913856348, | |
| "grad_norm": 0.1539110541343689, | |
| "learning_rate": 3.9933837486219065e-06, | |
| "loss": 1.5212, | |
| "mean_token_accuracy": 0.6517001137137413, | |
| "num_tokens": 481587746.0, | |
| "step": 1240 | |
| }, | |
| { | |
| "entropy": 1.519597727060318, | |
| "epoch": 0.5916459590580996, | |
| "grad_norm": 0.1510864943265915, | |
| "learning_rate": 3.916846817634618e-06, | |
| "loss": 1.5254, | |
| "mean_token_accuracy": 0.6502514213323594, | |
| "num_tokens": 485420622.0, | |
| "step": 1250 | |
| }, | |
| { | |
| "entropy": 1.5250845789909362, | |
| "epoch": 0.5963791267305645, | |
| "grad_norm": 0.15542279183864594, | |
| "learning_rate": 3.840575277538495e-06, | |
| "loss": 1.5268, | |
| "mean_token_accuracy": 0.6506903722882271, | |
| "num_tokens": 489322234.0, | |
| "step": 1260 | |
| }, | |
| { | |
| "entropy": 1.530037170648575, | |
| "epoch": 0.6011122944030293, | |
| "grad_norm": 0.16025404632091522, | |
| "learning_rate": 3.764587816153813e-06, | |
| "loss": 1.5328, | |
| "mean_token_accuracy": 0.6497552871704102, | |
| "num_tokens": 493213867.0, | |
| "step": 1270 | |
| }, | |
| { | |
| "entropy": 1.5264522224664687, | |
| "epoch": 0.6058454620754941, | |
| "grad_norm": 0.1563606560230255, | |
| "learning_rate": 3.688903051696737e-06, | |
| "loss": 1.5271, | |
| "mean_token_accuracy": 0.6511508211493492, | |
| "num_tokens": 497079317.0, | |
| "step": 1280 | |
| }, | |
| { | |
| "entropy": 1.517073431611061, | |
| "epoch": 0.6105786297479588, | |
| "grad_norm": 0.15244126319885254, | |
| "learning_rate": 3.61353952821756e-06, | |
| "loss": 1.519, | |
| "mean_token_accuracy": 0.6525383740663528, | |
| "num_tokens": 501003584.0, | |
| "step": 1290 | |
| }, | |
| { | |
| "entropy": 1.520869755744934, | |
| "epoch": 0.6153117974204236, | |
| "grad_norm": 0.15403762459754944, | |
| "learning_rate": 3.5385157110570917e-06, | |
| "loss": 1.5296, | |
| "mean_token_accuracy": 0.6494565770030022, | |
| "num_tokens": 504882648.0, | |
| "step": 1300 | |
| }, | |
| { | |
| "entropy": 1.5278783291578293, | |
| "epoch": 0.6200449650928884, | |
| "grad_norm": 0.1558082103729248, | |
| "learning_rate": 3.463849982322326e-06, | |
| "loss": 1.532, | |
| "mean_token_accuracy": 0.6506448686122894, | |
| "num_tokens": 508796374.0, | |
| "step": 1310 | |
| }, | |
| { | |
| "entropy": 1.5135444432497025, | |
| "epoch": 0.6247781327653532, | |
| "grad_norm": 0.16050703823566437, | |
| "learning_rate": 3.389560636382531e-06, | |
| "loss": 1.5184, | |
| "mean_token_accuracy": 0.6528948619961739, | |
| "num_tokens": 512676763.0, | |
| "step": 1320 | |
| }, | |
| { | |
| "entropy": 1.5228564977645873, | |
| "epoch": 0.629511300437818, | |
| "grad_norm": 0.1465277224779129, | |
| "learning_rate": 3.315665875386807e-06, | |
| "loss": 1.5259, | |
| "mean_token_accuracy": 0.6503187119960785, | |
| "num_tokens": 516588257.0, | |
| "step": 1330 | |
| }, | |
| { | |
| "entropy": 1.5219411462545396, | |
| "epoch": 0.6342444681102828, | |
| "grad_norm": 0.15017631649971008, | |
| "learning_rate": 3.2421838048042516e-06, | |
| "loss": 1.5266, | |
| "mean_token_accuracy": 0.6507244795560837, | |
| "num_tokens": 520496855.0, | |
| "step": 1340 | |
| }, | |
| { | |
| "entropy": 1.5131709426641464, | |
| "epoch": 0.6389776357827476, | |
| "grad_norm": 0.1609673649072647, | |
| "learning_rate": 3.169132428987819e-06, | |
| "loss": 1.5171, | |
| "mean_token_accuracy": 0.652211906015873, | |
| "num_tokens": 524368593.0, | |
| "step": 1350 | |
| }, | |
| { | |
| "entropy": 1.5158830434083939, | |
| "epoch": 0.6437108034552124, | |
| "grad_norm": 0.1541507989168167, | |
| "learning_rate": 3.0965296467629413e-06, | |
| "loss": 1.5207, | |
| "mean_token_accuracy": 0.6517833650112153, | |
| "num_tokens": 528292613.0, | |
| "step": 1360 | |
| }, | |
| { | |
| "entropy": 1.5173752516508103, | |
| "epoch": 0.6484439711276772, | |
| "grad_norm": 0.14874839782714844, | |
| "learning_rate": 3.024393247042018e-06, | |
| "loss": 1.5186, | |
| "mean_token_accuracy": 0.6526758641004562, | |
| "num_tokens": 532197090.0, | |
| "step": 1370 | |
| }, | |
| { | |
| "entropy": 1.5261268466711044, | |
| "epoch": 0.6531771388001419, | |
| "grad_norm": 0.1556578129529953, | |
| "learning_rate": 2.9527409044658286e-06, | |
| "loss": 1.529, | |
| "mean_token_accuracy": 0.6497259110212326, | |
| "num_tokens": 536037282.0, | |
| "step": 1380 | |
| }, | |
| { | |
| "entropy": 1.5166011482477189, | |
| "epoch": 0.6579103064726068, | |
| "grad_norm": 0.15171046555042267, | |
| "learning_rate": 2.881590175072948e-06, | |
| "loss": 1.5189, | |
| "mean_token_accuracy": 0.6521017536520958, | |
| "num_tokens": 539892183.0, | |
| "step": 1390 | |
| }, | |
| { | |
| "entropy": 1.5153537422418595, | |
| "epoch": 0.6626434741450716, | |
| "grad_norm": 0.1611650586128235, | |
| "learning_rate": 2.8109584919982145e-06, | |
| "loss": 1.5221, | |
| "mean_token_accuracy": 0.6510555073618889, | |
| "num_tokens": 543786829.0, | |
| "step": 1400 | |
| }, | |
| { | |
| "entropy": 1.5176646322011949, | |
| "epoch": 0.6673766418175364, | |
| "grad_norm": 0.16072982549667358, | |
| "learning_rate": 2.7408631612013228e-06, | |
| "loss": 1.5198, | |
| "mean_token_accuracy": 0.6510667949914932, | |
| "num_tokens": 547697306.0, | |
| "step": 1410 | |
| }, | |
| { | |
| "entropy": 1.516287124156952, | |
| "epoch": 0.6721098094900012, | |
| "grad_norm": 0.15958984196186066, | |
| "learning_rate": 2.671321357226566e-06, | |
| "loss": 1.5191, | |
| "mean_token_accuracy": 0.6518496558070183, | |
| "num_tokens": 551553208.0, | |
| "step": 1420 | |
| }, | |
| { | |
| "entropy": 1.5146547496318816, | |
| "epoch": 0.676842977162466, | |
| "grad_norm": 0.156447172164917, | |
| "learning_rate": 2.602350118994782e-06, | |
| "loss": 1.5178, | |
| "mean_token_accuracy": 0.6530700176954269, | |
| "num_tokens": 555450956.0, | |
| "step": 1430 | |
| }, | |
| { | |
| "entropy": 1.5136778473854064, | |
| "epoch": 0.6815761448349308, | |
| "grad_norm": 0.15450631082057953, | |
| "learning_rate": 2.5339663456285302e-06, | |
| "loss": 1.5173, | |
| "mean_token_accuracy": 0.6531704932451248, | |
| "num_tokens": 559347478.0, | |
| "step": 1440 | |
| }, | |
| { | |
| "entropy": 1.5194635063409805, | |
| "epoch": 0.6863093125073956, | |
| "grad_norm": 0.1552649736404419, | |
| "learning_rate": 2.466186792311522e-06, | |
| "loss": 1.5282, | |
| "mean_token_accuracy": 0.6514062762260437, | |
| "num_tokens": 563180332.0, | |
| "step": 1450 | |
| }, | |
| { | |
| "entropy": 1.5103699028491975, | |
| "epoch": 0.6910424801798604, | |
| "grad_norm": 0.15950508415699005, | |
| "learning_rate": 2.399028066183306e-06, | |
| "loss": 1.515, | |
| "mean_token_accuracy": 0.6529736325144768, | |
| "num_tokens": 567111777.0, | |
| "step": 1460 | |
| }, | |
| { | |
| "entropy": 1.515543583035469, | |
| "epoch": 0.6957756478523252, | |
| "grad_norm": 0.1573014110326767, | |
| "learning_rate": 2.332506622270256e-06, | |
| "loss": 1.5143, | |
| "mean_token_accuracy": 0.6537506148219109, | |
| "num_tokens": 570991505.0, | |
| "step": 1470 | |
| }, | |
| { | |
| "entropy": 1.5154441595077515, | |
| "epoch": 0.7005088155247899, | |
| "grad_norm": 0.1603464037179947, | |
| "learning_rate": 2.2666387594537896e-06, | |
| "loss": 1.5204, | |
| "mean_token_accuracy": 0.652016893029213, | |
| "num_tokens": 574883883.0, | |
| "step": 1480 | |
| }, | |
| { | |
| "entropy": 1.5132777452468873, | |
| "epoch": 0.7052419831972547, | |
| "grad_norm": 0.15662123262882233, | |
| "learning_rate": 2.201440616476885e-06, | |
| "loss": 1.5177, | |
| "mean_token_accuracy": 0.6527584567666054, | |
| "num_tokens": 578724891.0, | |
| "step": 1490 | |
| }, | |
| { | |
| "entropy": 1.5139577269554139, | |
| "epoch": 0.7099751508697195, | |
| "grad_norm": 0.15398600697517395, | |
| "learning_rate": 2.136928167989803e-06, | |
| "loss": 1.5188, | |
| "mean_token_accuracy": 0.6523755803704262, | |
| "num_tokens": 582630649.0, | |
| "step": 1500 | |
| }, | |
| { | |
| "entropy": 1.5097831279039382, | |
| "epoch": 0.7147083185421843, | |
| "grad_norm": 0.1604987233877182, | |
| "learning_rate": 2.073117220636027e-06, | |
| "loss": 1.517, | |
| "mean_token_accuracy": 0.653074149787426, | |
| "num_tokens": 586509280.0, | |
| "step": 1510 | |
| }, | |
| { | |
| "entropy": 1.5180349677801133, | |
| "epoch": 0.7194414862146492, | |
| "grad_norm": 0.15329141914844513, | |
| "learning_rate": 2.0100234091793778e-06, | |
| "loss": 1.5201, | |
| "mean_token_accuracy": 0.6520388692617416, | |
| "num_tokens": 590387510.0, | |
| "step": 1520 | |
| }, | |
| { | |
| "entropy": 1.5136069297790526, | |
| "epoch": 0.724174653887114, | |
| "grad_norm": 0.15793868899345398, | |
| "learning_rate": 1.9476621926732227e-06, | |
| "loss": 1.5221, | |
| "mean_token_accuracy": 0.6520983532071114, | |
| "num_tokens": 594314494.0, | |
| "step": 1530 | |
| }, | |
| { | |
| "entropy": 1.5157446891069413, | |
| "epoch": 0.7289078215595788, | |
| "grad_norm": 0.15694737434387207, | |
| "learning_rate": 1.8860488506727409e-06, | |
| "loss": 1.5175, | |
| "mean_token_accuracy": 0.6533299177885056, | |
| "num_tokens": 598258201.0, | |
| "step": 1540 | |
| }, | |
| { | |
| "entropy": 1.522775200009346, | |
| "epoch": 0.7336409892320436, | |
| "grad_norm": 0.15210026502609253, | |
| "learning_rate": 1.8251984794911887e-06, | |
| "loss": 1.526, | |
| "mean_token_accuracy": 0.6508068576455116, | |
| "num_tokens": 602110882.0, | |
| "step": 1550 | |
| }, | |
| { | |
| "entropy": 1.5086775600910187, | |
| "epoch": 0.7383741569045084, | |
| "grad_norm": 0.15608255565166473, | |
| "learning_rate": 1.765125988501034e-06, | |
| "loss": 1.513, | |
| "mean_token_accuracy": 0.6531857639551163, | |
| "num_tokens": 606013390.0, | |
| "step": 1560 | |
| }, | |
| { | |
| "entropy": 1.5057473361492157, | |
| "epoch": 0.7431073245769731, | |
| "grad_norm": 0.1536129117012024, | |
| "learning_rate": 1.7058460964809086e-06, | |
| "loss": 1.5105, | |
| "mean_token_accuracy": 0.6533811703324318, | |
| "num_tokens": 609866962.0, | |
| "step": 1570 | |
| }, | |
| { | |
| "entropy": 1.5125353664159775, | |
| "epoch": 0.7478404922494379, | |
| "grad_norm": 0.15434998273849487, | |
| "learning_rate": 1.6473733280092658e-06, | |
| "loss": 1.5163, | |
| "mean_token_accuracy": 0.6529958829283714, | |
| "num_tokens": 613748468.0, | |
| "step": 1580 | |
| }, | |
| { | |
| "entropy": 1.5094835668802262, | |
| "epoch": 0.7525736599219027, | |
| "grad_norm": 0.16267992556095123, | |
| "learning_rate": 1.589722009905606e-06, | |
| "loss": 1.512, | |
| "mean_token_accuracy": 0.6539744392037392, | |
| "num_tokens": 617586252.0, | |
| "step": 1590 | |
| }, | |
| { | |
| "entropy": 1.5102200895547866, | |
| "epoch": 0.7573068275943675, | |
| "grad_norm": 0.15085026621818542, | |
| "learning_rate": 1.5329062677201594e-06, | |
| "loss": 1.5152, | |
| "mean_token_accuracy": 0.652720046043396, | |
| "num_tokens": 621473159.0, | |
| "step": 1600 | |
| }, | |
| { | |
| "entropy": 1.5119531810283662, | |
| "epoch": 0.7620399952668323, | |
| "grad_norm": 0.15508662164211273, | |
| "learning_rate": 1.4769400222728974e-06, | |
| "loss": 1.5176, | |
| "mean_token_accuracy": 0.6533014222979545, | |
| "num_tokens": 625366303.0, | |
| "step": 1610 | |
| }, | |
| { | |
| "entropy": 1.5167303442955018, | |
| "epoch": 0.7667731629392971, | |
| "grad_norm": 0.14820463955402374, | |
| "learning_rate": 1.4218369862426896e-06, | |
| "loss": 1.5186, | |
| "mean_token_accuracy": 0.6529193088412285, | |
| "num_tokens": 629214314.0, | |
| "step": 1620 | |
| }, | |
| { | |
| "entropy": 1.510753783583641, | |
| "epoch": 0.7715063306117619, | |
| "grad_norm": 0.1586351990699768, | |
| "learning_rate": 1.3676106608074602e-06, | |
| "loss": 1.5144, | |
| "mean_token_accuracy": 0.653745847940445, | |
| "num_tokens": 633129219.0, | |
| "step": 1630 | |
| }, | |
| { | |
| "entropy": 1.5246556103229523, | |
| "epoch": 0.7762394982842267, | |
| "grad_norm": 0.15720601379871368, | |
| "learning_rate": 1.3142743323361818e-06, | |
| "loss": 1.5293, | |
| "mean_token_accuracy": 0.6503741011023522, | |
| "num_tokens": 636956832.0, | |
| "step": 1640 | |
| }, | |
| { | |
| "entropy": 1.518925702571869, | |
| "epoch": 0.7809726659566916, | |
| "grad_norm": 0.15746639668941498, | |
| "learning_rate": 1.2618410691334805e-06, | |
| "loss": 1.5244, | |
| "mean_token_accuracy": 0.6520501941442489, | |
| "num_tokens": 640891051.0, | |
| "step": 1650 | |
| }, | |
| { | |
| "entropy": 1.51315179169178, | |
| "epoch": 0.7857058336291564, | |
| "grad_norm": 0.15106885135173798, | |
| "learning_rate": 1.2103237182376848e-06, | |
| "loss": 1.5184, | |
| "mean_token_accuracy": 0.6530534133315087, | |
| "num_tokens": 644765813.0, | |
| "step": 1660 | |
| }, | |
| { | |
| "entropy": 1.5101027220487595, | |
| "epoch": 0.7904390013016211, | |
| "grad_norm": 0.15605495870113373, | |
| "learning_rate": 1.159734902273078e-06, | |
| "loss": 1.5137, | |
| "mean_token_accuracy": 0.6541712284088135, | |
| "num_tokens": 648685831.0, | |
| "step": 1670 | |
| }, | |
| { | |
| "entropy": 1.5037204265594482, | |
| "epoch": 0.7951721689740859, | |
| "grad_norm": 0.15608017146587372, | |
| "learning_rate": 1.1100870163571486e-06, | |
| "loss": 1.5085, | |
| "mean_token_accuracy": 0.6543751254677772, | |
| "num_tokens": 652549548.0, | |
| "step": 1680 | |
| }, | |
| { | |
| "entropy": 1.5166430234909059, | |
| "epoch": 0.7999053366465507, | |
| "grad_norm": 0.15508434176445007, | |
| "learning_rate": 1.061392225063571e-06, | |
| "loss": 1.524, | |
| "mean_token_accuracy": 0.6517237603664399, | |
| "num_tokens": 656455494.0, | |
| "step": 1690 | |
| }, | |
| { | |
| "entropy": 1.5094672977924346, | |
| "epoch": 0.8046385043190155, | |
| "grad_norm": 0.1599401831626892, | |
| "learning_rate": 1.0136624594416828e-06, | |
| "loss": 1.5145, | |
| "mean_token_accuracy": 0.6521976351737976, | |
| "num_tokens": 660321875.0, | |
| "step": 1700 | |
| }, | |
| { | |
| "entropy": 1.524332845211029, | |
| "epoch": 0.8093716719914803, | |
| "grad_norm": 0.15658311545848846, | |
| "learning_rate": 9.669094140931678e-07, | |
| "loss": 1.5249, | |
| "mean_token_accuracy": 0.651316037774086, | |
| "num_tokens": 664173208.0, | |
| "step": 1710 | |
| }, | |
| { | |
| "entropy": 1.5061070144176483, | |
| "epoch": 0.8141048396639451, | |
| "grad_norm": 0.15877439081668854, | |
| "learning_rate": 9.211445443066896e-07, | |
| "loss": 1.5095, | |
| "mean_token_accuracy": 0.6547744259238243, | |
| "num_tokens": 668090711.0, | |
| "step": 1720 | |
| }, | |
| { | |
| "entropy": 1.5110326170921327, | |
| "epoch": 0.8188380073364099, | |
| "grad_norm": 0.15988118946552277, | |
| "learning_rate": 8.763790632511482e-07, | |
| "loss": 1.52, | |
| "mean_token_accuracy": 0.6521027386188507, | |
| "num_tokens": 671930932.0, | |
| "step": 1730 | |
| }, | |
| { | |
| "entropy": 1.5050281405448913, | |
| "epoch": 0.8235711750088747, | |
| "grad_norm": 0.15555670857429504, | |
| "learning_rate": 8.326239392282586e-07, | |
| "loss": 1.5105, | |
| "mean_token_accuracy": 0.6531902849674225, | |
| "num_tokens": 675792125.0, | |
| "step": 1740 | |
| }, | |
| { | |
| "entropy": 1.5129504561424256, | |
| "epoch": 0.8283043426813395, | |
| "grad_norm": 0.16341622173786163, | |
| "learning_rate": 7.898898929851406e-07, | |
| "loss": 1.5162, | |
| "mean_token_accuracy": 0.6542815431952477, | |
| "num_tokens": 679648075.0, | |
| "step": 1750 | |
| }, | |
| { | |
| "entropy": 1.5132198542356492, | |
| "epoch": 0.8330375103538042, | |
| "grad_norm": 0.1575479656457901, | |
| "learning_rate": 7.481873950875468e-07, | |
| "loss": 1.5167, | |
| "mean_token_accuracy": 0.6529857292771339, | |
| "num_tokens": 683496935.0, | |
| "step": 1760 | |
| }, | |
| { | |
| "entropy": 1.514217236638069, | |
| "epoch": 0.837770678026269, | |
| "grad_norm": 0.16133052110671997, | |
| "learning_rate": 7.075266633543959e-07, | |
| "loss": 1.5176, | |
| "mean_token_accuracy": 0.6526405304670334, | |
| "num_tokens": 687412196.0, | |
| "step": 1770 | |
| }, | |
| { | |
| "entropy": 1.5245876908302307, | |
| "epoch": 0.8425038456987339, | |
| "grad_norm": 0.1658501774072647, | |
| "learning_rate": 6.679176603542398e-07, | |
| "loss": 1.5267, | |
| "mean_token_accuracy": 0.6507634833455086, | |
| "num_tokens": 691261759.0, | |
| "step": 1780 | |
| }, | |
| { | |
| "entropy": 1.520870679616928, | |
| "epoch": 0.8472370133711987, | |
| "grad_norm": 0.15542222559452057, | |
| "learning_rate": 6.29370090964262e-07, | |
| "loss": 1.5237, | |
| "mean_token_accuracy": 0.6512942254543305, | |
| "num_tokens": 695146169.0, | |
| "step": 1790 | |
| }, | |
| { | |
| "entropy": 1.5049445867538451, | |
| "epoch": 0.8519701810436635, | |
| "grad_norm": 0.15088842809200287, | |
| "learning_rate": 5.918933999924143e-07, | |
| "loss": 1.507, | |
| "mean_token_accuracy": 0.6546556517481804, | |
| "num_tokens": 699030254.0, | |
| "step": 1800 | |
| }, | |
| { | |
| "entropy": 1.5152593225240707, | |
| "epoch": 0.8567033487161283, | |
| "grad_norm": 0.15672406554222107, | |
| "learning_rate": 5.55496769863288e-07, | |
| "loss": 1.5197, | |
| "mean_token_accuracy": 0.652678707242012, | |
| "num_tokens": 702916640.0, | |
| "step": 1810 | |
| }, | |
| { | |
| "entropy": 1.5112317591905593, | |
| "epoch": 0.8614365163885931, | |
| "grad_norm": 0.15444321930408478, | |
| "learning_rate": 5.201891183682545e-07, | |
| "loss": 1.5167, | |
| "mean_token_accuracy": 0.6519044652581215, | |
| "num_tokens": 706729238.0, | |
| "step": 1820 | |
| }, | |
| { | |
| "entropy": 1.5071990102529527, | |
| "epoch": 0.8661696840610579, | |
| "grad_norm": 0.1569969356060028, | |
| "learning_rate": 4.859790964804617e-07, | |
| "loss": 1.5104, | |
| "mean_token_accuracy": 0.6534604758024216, | |
| "num_tokens": 710608784.0, | |
| "step": 1830 | |
| }, | |
| { | |
| "entropy": 1.5036645025014876, | |
| "epoch": 0.8709028517335227, | |
| "grad_norm": 0.16283676028251648, | |
| "learning_rate": 4.5287508623519773e-07, | |
| "loss": 1.5068, | |
| "mean_token_accuracy": 0.6550710022449493, | |
| "num_tokens": 714514707.0, | |
| "step": 1840 | |
| }, | |
| { | |
| "entropy": 1.5189965814352036, | |
| "epoch": 0.8756360194059875, | |
| "grad_norm": 0.15639755129814148, | |
| "learning_rate": 4.2088519867614585e-07, | |
| "loss": 1.5222, | |
| "mean_token_accuracy": 0.6517624288797379, | |
| "num_tokens": 718366975.0, | |
| "step": 1850 | |
| }, | |
| { | |
| "entropy": 1.5078352093696594, | |
| "epoch": 0.8803691870784522, | |
| "grad_norm": 0.1577681601047516, | |
| "learning_rate": 3.900172718680484e-07, | |
| "loss": 1.5131, | |
| "mean_token_accuracy": 0.6530882626771927, | |
| "num_tokens": 722251285.0, | |
| "step": 1860 | |
| }, | |
| { | |
| "entropy": 1.5013979017734527, | |
| "epoch": 0.885102354750917, | |
| "grad_norm": 0.16839657723903656, | |
| "learning_rate": 3.6027886897624376e-07, | |
| "loss": 1.5079, | |
| "mean_token_accuracy": 0.6545991912484169, | |
| "num_tokens": 726162080.0, | |
| "step": 1870 | |
| }, | |
| { | |
| "entropy": 1.5258357018232345, | |
| "epoch": 0.8898355224233818, | |
| "grad_norm": 0.1610095053911209, | |
| "learning_rate": 3.3167727641356064e-07, | |
| "loss": 1.529, | |
| "mean_token_accuracy": 0.6494020417332649, | |
| "num_tokens": 730048788.0, | |
| "step": 1880 | |
| }, | |
| { | |
| "entropy": 1.5098802983760833, | |
| "epoch": 0.8945686900958466, | |
| "grad_norm": 0.15612637996673584, | |
| "learning_rate": 3.0421950205502813e-07, | |
| "loss": 1.5136, | |
| "mean_token_accuracy": 0.6532332092523575, | |
| "num_tokens": 733943282.0, | |
| "step": 1890 | |
| }, | |
| { | |
| "entropy": 1.5100417882204056, | |
| "epoch": 0.8993018577683114, | |
| "grad_norm": 0.154027059674263, | |
| "learning_rate": 2.779122735208267e-07, | |
| "loss": 1.5139, | |
| "mean_token_accuracy": 0.6538966566324234, | |
| "num_tokens": 737803762.0, | |
| "step": 1900 | |
| }, | |
| { | |
| "entropy": 1.511214417219162, | |
| "epoch": 0.9040350254407762, | |
| "grad_norm": 0.1588786244392395, | |
| "learning_rate": 2.527620365279021e-07, | |
| "loss": 1.5141, | |
| "mean_token_accuracy": 0.6533711299300193, | |
| "num_tokens": 741703649.0, | |
| "step": 1910 | |
| }, | |
| { | |
| "entropy": 1.5116032123565675, | |
| "epoch": 0.9087681931132411, | |
| "grad_norm": 0.15595540404319763, | |
| "learning_rate": 2.2877495331066312e-07, | |
| "loss": 1.5156, | |
| "mean_token_accuracy": 0.6534523665904999, | |
| "num_tokens": 745612891.0, | |
| "step": 1920 | |
| }, | |
| { | |
| "entropy": 1.5098071992397308, | |
| "epoch": 0.9135013607857059, | |
| "grad_norm": 0.160483717918396, | |
| "learning_rate": 2.0595690111112398e-07, | |
| "loss": 1.5142, | |
| "mean_token_accuracy": 0.6534514829516411, | |
| "num_tokens": 749494675.0, | |
| "step": 1930 | |
| }, | |
| { | |
| "entropy": 1.5071230858564377, | |
| "epoch": 0.9182345284581707, | |
| "grad_norm": 0.15629717707633972, | |
| "learning_rate": 1.8431347073888217e-07, | |
| "loss": 1.5111, | |
| "mean_token_accuracy": 0.6530539333820343, | |
| "num_tokens": 753341723.0, | |
| "step": 1940 | |
| }, | |
| { | |
| "entropy": 1.5019009202718734, | |
| "epoch": 0.9229676961306355, | |
| "grad_norm": 0.15971773862838745, | |
| "learning_rate": 1.638499652012754e-07, | |
| "loss": 1.5059, | |
| "mean_token_accuracy": 0.6544443607330322, | |
| "num_tokens": 757233014.0, | |
| "step": 1950 | |
| }, | |
| { | |
| "entropy": 1.5118261843919754, | |
| "epoch": 0.9277008638031002, | |
| "grad_norm": 0.15779262781143188, | |
| "learning_rate": 1.44571398404057e-07, | |
| "loss": 1.515, | |
| "mean_token_accuracy": 0.6532472312450409, | |
| "num_tokens": 761115255.0, | |
| "step": 1960 | |
| }, | |
| { | |
| "entropy": 1.5036541551351548, | |
| "epoch": 0.932434031475565, | |
| "grad_norm": 0.1535780280828476, | |
| "learning_rate": 1.2648249392289925e-07, | |
| "loss": 1.5061, | |
| "mean_token_accuracy": 0.6545781284570694, | |
| "num_tokens": 765028626.0, | |
| "step": 1970 | |
| }, | |
| { | |
| "entropy": 1.50391583442688, | |
| "epoch": 0.9371671991480298, | |
| "grad_norm": 0.15700839459896088, | |
| "learning_rate": 1.095876838460408e-07, | |
| "loss": 1.506, | |
| "mean_token_accuracy": 0.6557769432663918, | |
| "num_tokens": 768939705.0, | |
| "step": 1980 | |
| }, | |
| { | |
| "entropy": 1.5056363612413406, | |
| "epoch": 0.9419003668204946, | |
| "grad_norm": 0.15855862200260162, | |
| "learning_rate": 9.38911076883503e-08, | |
| "loss": 1.5082, | |
| "mean_token_accuracy": 0.6543473988771439, | |
| "num_tokens": 772833689.0, | |
| "step": 1990 | |
| }, | |
| { | |
| "entropy": 1.525366762280464, | |
| "epoch": 0.9466335344929594, | |
| "grad_norm": 0.15759994089603424, | |
| "learning_rate": 7.939661137707167e-08, | |
| "loss": 1.5292, | |
| "mean_token_accuracy": 0.650723172724247, | |
| "num_tokens": 776737000.0, | |
| "step": 2000 | |
| }, | |
| { | |
| "entropy": 1.5107537627220153, | |
| "epoch": 0.9513667021654242, | |
| "grad_norm": 0.14716793596744537, | |
| "learning_rate": 6.610774630951167e-08, | |
| "loss": 1.514, | |
| "mean_token_accuracy": 0.6538170203566551, | |
| "num_tokens": 780623496.0, | |
| "step": 2010 | |
| }, | |
| { | |
| "entropy": 1.510766288638115, | |
| "epoch": 0.956099869837889, | |
| "grad_norm": 0.15204967558383942, | |
| "learning_rate": 5.402776848288216e-08, | |
| "loss": 1.5127, | |
| "mean_token_accuracy": 0.6531319335103035, | |
| "num_tokens": 784511197.0, | |
| "step": 2020 | |
| }, | |
| { | |
| "entropy": 1.5045825749635697, | |
| "epoch": 0.9608330375103538, | |
| "grad_norm": 0.162211611866951, | |
| "learning_rate": 4.315963769652931e-08, | |
| "loss": 1.5063, | |
| "mean_token_accuracy": 0.6537961974740029, | |
| "num_tokens": 788407251.0, | |
| "step": 2030 | |
| }, | |
| { | |
| "entropy": 1.5063252121210098, | |
| "epoch": 0.9655662051828185, | |
| "grad_norm": 0.15293368697166443, | |
| "learning_rate": 3.350601682673094e-08, | |
| "loss": 1.5086, | |
| "mean_token_accuracy": 0.654248197376728, | |
| "num_tokens": 792249083.0, | |
| "step": 2040 | |
| }, | |
| { | |
| "entropy": 1.5172866135835648, | |
| "epoch": 0.9702993728552834, | |
| "grad_norm": 0.15927255153656006, | |
| "learning_rate": 2.5069271174247844e-08, | |
| "loss": 1.5187, | |
| "mean_token_accuracy": 0.6526034936308861, | |
| "num_tokens": 796138376.0, | |
| "step": 2050 | |
| }, | |
| { | |
| "entropy": 1.515240240097046, | |
| "epoch": 0.9750325405277482, | |
| "grad_norm": 0.15734337270259857, | |
| "learning_rate": 1.785146788478298e-08, | |
| "loss": 1.5168, | |
| "mean_token_accuracy": 0.6522197261452675, | |
| "num_tokens": 800048611.0, | |
| "step": 2060 | |
| }, | |
| { | |
| "entropy": 1.505976414680481, | |
| "epoch": 0.979765708200213, | |
| "grad_norm": 0.15182293951511383, | |
| "learning_rate": 1.1854375442498234e-08, | |
| "loss": 1.5083, | |
| "mean_token_accuracy": 0.653746984899044, | |
| "num_tokens": 803953901.0, | |
| "step": 2070 | |
| }, | |
| { | |
| "entropy": 1.5067311346530914, | |
| "epoch": 0.9844988758726778, | |
| "grad_norm": 0.15344145894050598, | |
| "learning_rate": 7.0794632367032765e-09, | |
| "loss": 1.5107, | |
| "mean_token_accuracy": 0.6533343732357025, | |
| "num_tokens": 807795934.0, | |
| "step": 2080 | |
| }, | |
| { | |
| "entropy": 1.5039544194936751, | |
| "epoch": 0.9892320435451426, | |
| "grad_norm": 0.15625813603401184, | |
| "learning_rate": 3.5279012018324397e-09, | |
| "loss": 1.5083, | |
| "mean_token_accuracy": 0.6547796651721001, | |
| "num_tokens": 811672777.0, | |
| "step": 2090 | |
| }, | |
| { | |
| "entropy": 1.5102417439222335, | |
| "epoch": 0.9939652112176074, | |
| "grad_norm": 0.14735041558742523, | |
| "learning_rate": 1.2005595307906792e-09, | |
| "loss": 1.5121, | |
| "mean_token_accuracy": 0.6533789873123169, | |
| "num_tokens": 815540486.0, | |
| "step": 2100 | |
| }, | |
| { | |
| "entropy": 1.5077374666929244, | |
| "epoch": 0.9986983788900722, | |
| "grad_norm": 0.15531828999519348, | |
| "learning_rate": 9.800846173968481e-11, | |
| "loss": 1.5117, | |
| "mean_token_accuracy": 0.6538635641336441, | |
| "num_tokens": 819362988.0, | |
| "step": 2110 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 2113, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 100, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 3.6414323458230452e+19, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |