| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 10.997134670487107, | |
| "eval_steps": 50, | |
| "global_step": 1919, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.14, | |
| "grad_norm": 0.1376492828130722, | |
| "learning_rate": 2.5e-05, | |
| "loss": 0.9919, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "grad_norm": 0.14622816443443298, | |
| "learning_rate": 5e-05, | |
| "loss": 0.8765, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "grad_norm": 0.1424524486064911, | |
| "learning_rate": 7.500000000000001e-05, | |
| "loss": 0.6284, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "grad_norm": 0.09082940965890884, | |
| "learning_rate": 0.0001, | |
| "loss": 0.4119, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.72, | |
| "grad_norm": 0.11173515766859055, | |
| "learning_rate": 9.92603550295858e-05, | |
| "loss": 0.3504, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.86, | |
| "grad_norm": 0.09382566064596176, | |
| "learning_rate": 9.85207100591716e-05, | |
| "loss": 0.3142, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 0.11800573766231537, | |
| "learning_rate": 9.77810650887574e-05, | |
| "loss": 0.299, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 1.15, | |
| "grad_norm": 0.1385473757982254, | |
| "learning_rate": 9.70414201183432e-05, | |
| "loss": 0.2783, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.29, | |
| "grad_norm": 0.1731497198343277, | |
| "learning_rate": 9.6301775147929e-05, | |
| "loss": 0.2583, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 1.43, | |
| "grad_norm": 0.16235513985157013, | |
| "learning_rate": 9.55621301775148e-05, | |
| "loss": 0.2408, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 1.58, | |
| "grad_norm": 0.2389882653951645, | |
| "learning_rate": 9.48224852071006e-05, | |
| "loss": 0.2279, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 1.72, | |
| "grad_norm": 0.2514766454696655, | |
| "learning_rate": 9.408284023668639e-05, | |
| "loss": 0.2109, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 1.86, | |
| "grad_norm": 0.3435331881046295, | |
| "learning_rate": 9.33431952662722e-05, | |
| "loss": 0.2009, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 2.01, | |
| "grad_norm": 0.2297317236661911, | |
| "learning_rate": 9.260355029585799e-05, | |
| "loss": 0.1904, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 2.15, | |
| "grad_norm": 0.23601436614990234, | |
| "learning_rate": 9.186390532544379e-05, | |
| "loss": 0.1728, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 2.29, | |
| "grad_norm": 0.26974865794181824, | |
| "learning_rate": 9.112426035502959e-05, | |
| "loss": 0.1658, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 2.44, | |
| "grad_norm": 0.3188651502132416, | |
| "learning_rate": 9.038461538461538e-05, | |
| "loss": 0.1619, | |
| "step": 425 | |
| }, | |
| { | |
| "epoch": 2.58, | |
| "grad_norm": 0.40681496262550354, | |
| "learning_rate": 8.96449704142012e-05, | |
| "loss": 0.1496, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 2.72, | |
| "grad_norm": 0.32123222947120667, | |
| "learning_rate": 8.8905325443787e-05, | |
| "loss": 0.1498, | |
| "step": 475 | |
| }, | |
| { | |
| "epoch": 2.87, | |
| "grad_norm": 0.2911372482776642, | |
| "learning_rate": 8.816568047337278e-05, | |
| "loss": 0.1378, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 3.01, | |
| "grad_norm": 0.28380608558654785, | |
| "learning_rate": 8.742603550295858e-05, | |
| "loss": 0.1355, | |
| "step": 525 | |
| }, | |
| { | |
| "epoch": 3.15, | |
| "grad_norm": 0.3216865062713623, | |
| "learning_rate": 8.668639053254438e-05, | |
| "loss": 0.1302, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 3.3, | |
| "grad_norm": 0.29950952529907227, | |
| "learning_rate": 8.594674556213019e-05, | |
| "loss": 0.1209, | |
| "step": 575 | |
| }, | |
| { | |
| "epoch": 3.44, | |
| "grad_norm": 0.2643551826477051, | |
| "learning_rate": 8.520710059171599e-05, | |
| "loss": 0.1222, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 3.58, | |
| "grad_norm": 0.31945034861564636, | |
| "learning_rate": 8.446745562130178e-05, | |
| "loss": 0.1172, | |
| "step": 625 | |
| }, | |
| { | |
| "epoch": 3.72, | |
| "grad_norm": 0.28063860535621643, | |
| "learning_rate": 8.372781065088757e-05, | |
| "loss": 0.1162, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 3.87, | |
| "grad_norm": 0.30673155188560486, | |
| "learning_rate": 8.298816568047337e-05, | |
| "loss": 0.1127, | |
| "step": 675 | |
| }, | |
| { | |
| "epoch": 4.01, | |
| "grad_norm": 0.26293325424194336, | |
| "learning_rate": 8.224852071005918e-05, | |
| "loss": 0.1078, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 4.15, | |
| "grad_norm": 0.3006896674633026, | |
| "learning_rate": 8.150887573964498e-05, | |
| "loss": 0.1039, | |
| "step": 725 | |
| }, | |
| { | |
| "epoch": 4.3, | |
| "grad_norm": 0.22531060874462128, | |
| "learning_rate": 8.076923076923078e-05, | |
| "loss": 0.1026, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 4.44, | |
| "grad_norm": 0.27850422263145447, | |
| "learning_rate": 8.002958579881658e-05, | |
| "loss": 0.1038, | |
| "step": 775 | |
| }, | |
| { | |
| "epoch": 4.58, | |
| "grad_norm": 0.24622277915477753, | |
| "learning_rate": 7.928994082840237e-05, | |
| "loss": 0.1004, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 4.73, | |
| "grad_norm": 0.2631712555885315, | |
| "learning_rate": 7.855029585798817e-05, | |
| "loss": 0.0944, | |
| "step": 825 | |
| }, | |
| { | |
| "epoch": 4.87, | |
| "grad_norm": 0.2231552004814148, | |
| "learning_rate": 7.781065088757397e-05, | |
| "loss": 0.0937, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 5.01, | |
| "grad_norm": 0.2672629952430725, | |
| "learning_rate": 7.707100591715977e-05, | |
| "loss": 0.0908, | |
| "step": 875 | |
| }, | |
| { | |
| "epoch": 5.16, | |
| "grad_norm": 0.32010477781295776, | |
| "learning_rate": 7.633136094674557e-05, | |
| "loss": 0.0949, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 5.3, | |
| "grad_norm": 0.20605669915676117, | |
| "learning_rate": 7.559171597633137e-05, | |
| "loss": 0.0883, | |
| "step": 925 | |
| }, | |
| { | |
| "epoch": 5.44, | |
| "grad_norm": 0.2745099663734436, | |
| "learning_rate": 7.485207100591716e-05, | |
| "loss": 0.0881, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 5.59, | |
| "grad_norm": 0.22129909694194794, | |
| "learning_rate": 7.411242603550296e-05, | |
| "loss": 0.0861, | |
| "step": 975 | |
| }, | |
| { | |
| "epoch": 5.73, | |
| "grad_norm": 0.19070565700531006, | |
| "learning_rate": 7.337278106508876e-05, | |
| "loss": 0.0874, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 5.87, | |
| "grad_norm": 0.27825799584388733, | |
| "learning_rate": 7.263313609467456e-05, | |
| "loss": 0.0846, | |
| "step": 1025 | |
| }, | |
| { | |
| "epoch": 6.02, | |
| "grad_norm": 0.28191158175468445, | |
| "learning_rate": 7.189349112426036e-05, | |
| "loss": 0.0859, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 6.16, | |
| "grad_norm": 0.2130414992570877, | |
| "learning_rate": 7.115384615384616e-05, | |
| "loss": 0.0843, | |
| "step": 1075 | |
| }, | |
| { | |
| "epoch": 6.3, | |
| "grad_norm": 0.2572125494480133, | |
| "learning_rate": 7.041420118343195e-05, | |
| "loss": 0.0829, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 6.45, | |
| "grad_norm": 0.2799431383609772, | |
| "learning_rate": 6.967455621301775e-05, | |
| "loss": 0.0794, | |
| "step": 1125 | |
| }, | |
| { | |
| "epoch": 6.59, | |
| "grad_norm": 0.22224412858486176, | |
| "learning_rate": 6.893491124260355e-05, | |
| "loss": 0.0807, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 6.73, | |
| "grad_norm": 0.19804546236991882, | |
| "learning_rate": 6.819526627218935e-05, | |
| "loss": 0.077, | |
| "step": 1175 | |
| }, | |
| { | |
| "epoch": 6.88, | |
| "grad_norm": 0.2524961233139038, | |
| "learning_rate": 6.745562130177515e-05, | |
| "loss": 0.0802, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 7.02, | |
| "grad_norm": 0.18262654542922974, | |
| "learning_rate": 6.671597633136095e-05, | |
| "loss": 0.0757, | |
| "step": 1225 | |
| }, | |
| { | |
| "epoch": 7.16, | |
| "grad_norm": 0.1993337720632553, | |
| "learning_rate": 6.597633136094676e-05, | |
| "loss": 0.0766, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 7.31, | |
| "grad_norm": 0.19785763323307037, | |
| "learning_rate": 6.523668639053254e-05, | |
| "loss": 0.0751, | |
| "step": 1275 | |
| }, | |
| { | |
| "epoch": 7.45, | |
| "grad_norm": 0.19745734333992004, | |
| "learning_rate": 6.449704142011834e-05, | |
| "loss": 0.0768, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 7.59, | |
| "grad_norm": 0.17973637580871582, | |
| "learning_rate": 6.375739644970414e-05, | |
| "loss": 0.0738, | |
| "step": 1325 | |
| }, | |
| { | |
| "epoch": 7.74, | |
| "grad_norm": 0.1808304488658905, | |
| "learning_rate": 6.301775147928994e-05, | |
| "loss": 0.0757, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 7.88, | |
| "grad_norm": 0.19356884062290192, | |
| "learning_rate": 6.227810650887575e-05, | |
| "loss": 0.0738, | |
| "step": 1375 | |
| }, | |
| { | |
| "epoch": 8.02, | |
| "grad_norm": 0.19427159428596497, | |
| "learning_rate": 6.153846153846155e-05, | |
| "loss": 0.0748, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 8.17, | |
| "grad_norm": 0.16624993085861206, | |
| "learning_rate": 6.079881656804735e-05, | |
| "loss": 0.0737, | |
| "step": 1425 | |
| }, | |
| { | |
| "epoch": 8.31, | |
| "grad_norm": 0.2472502738237381, | |
| "learning_rate": 6.005917159763313e-05, | |
| "loss": 0.0726, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 8.45, | |
| "grad_norm": 0.20945917069911957, | |
| "learning_rate": 5.931952662721894e-05, | |
| "loss": 0.0703, | |
| "step": 1475 | |
| }, | |
| { | |
| "epoch": 8.6, | |
| "grad_norm": 0.21044062077999115, | |
| "learning_rate": 5.8579881656804736e-05, | |
| "loss": 0.0705, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 8.74, | |
| "grad_norm": 0.1783844232559204, | |
| "learning_rate": 5.7840236686390534e-05, | |
| "loss": 0.0711, | |
| "step": 1525 | |
| }, | |
| { | |
| "epoch": 8.88, | |
| "grad_norm": 0.1689521223306656, | |
| "learning_rate": 5.710059171597634e-05, | |
| "loss": 0.0738, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 9.03, | |
| "grad_norm": 0.1530502885580063, | |
| "learning_rate": 5.636094674556214e-05, | |
| "loss": 0.0684, | |
| "step": 1575 | |
| }, | |
| { | |
| "epoch": 9.17, | |
| "grad_norm": 0.1769784688949585, | |
| "learning_rate": 5.562130177514793e-05, | |
| "loss": 0.0699, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 9.31, | |
| "grad_norm": 0.19619758427143097, | |
| "learning_rate": 5.488165680473373e-05, | |
| "loss": 0.0681, | |
| "step": 1625 | |
| }, | |
| { | |
| "epoch": 9.46, | |
| "grad_norm": 0.19810470938682556, | |
| "learning_rate": 5.4142011834319526e-05, | |
| "loss": 0.071, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 9.6, | |
| "grad_norm": 0.183139830827713, | |
| "learning_rate": 5.340236686390533e-05, | |
| "loss": 0.0675, | |
| "step": 1675 | |
| }, | |
| { | |
| "epoch": 9.74, | |
| "grad_norm": 0.22580993175506592, | |
| "learning_rate": 5.266272189349113e-05, | |
| "loss": 0.068, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 9.89, | |
| "grad_norm": 0.1878005862236023, | |
| "learning_rate": 5.192307692307693e-05, | |
| "loss": 0.0682, | |
| "step": 1725 | |
| }, | |
| { | |
| "epoch": 10.03, | |
| "grad_norm": 0.17333678901195526, | |
| "learning_rate": 5.118343195266272e-05, | |
| "loss": 0.0683, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 10.17, | |
| "grad_norm": 0.16736529767513275, | |
| "learning_rate": 5.044378698224852e-05, | |
| "loss": 0.0639, | |
| "step": 1775 | |
| }, | |
| { | |
| "epoch": 10.32, | |
| "grad_norm": 0.15685293078422546, | |
| "learning_rate": 4.970414201183432e-05, | |
| "loss": 0.0682, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 10.46, | |
| "grad_norm": 0.16026218235492706, | |
| "learning_rate": 4.896449704142012e-05, | |
| "loss": 0.0657, | |
| "step": 1825 | |
| }, | |
| { | |
| "epoch": 10.6, | |
| "grad_norm": 0.17992612719535828, | |
| "learning_rate": 4.822485207100592e-05, | |
| "loss": 0.0648, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 10.74, | |
| "grad_norm": 0.17420756816864014, | |
| "learning_rate": 4.748520710059172e-05, | |
| "loss": 0.0664, | |
| "step": 1875 | |
| }, | |
| { | |
| "epoch": 10.89, | |
| "grad_norm": 0.14792782068252563, | |
| "learning_rate": 4.674556213017752e-05, | |
| "loss": 0.0668, | |
| "step": 1900 | |
| } | |
| ], | |
| "logging_steps": 25, | |
| "max_steps": 3480, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 20, | |
| "save_steps": 100, | |
| "total_flos": 1.371816806339666e+18, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |