| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 837, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.03590664272890485, | |
| "grad_norm": 2.185314827906911, | |
| "learning_rate": 1.0714285714285714e-06, | |
| "loss": 0.5255, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.0718132854578097, | |
| "grad_norm": 0.9772670854647908, | |
| "learning_rate": 2.261904761904762e-06, | |
| "loss": 0.4864, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.10771992818671454, | |
| "grad_norm": 0.5510959017484935, | |
| "learning_rate": 3.4523809523809528e-06, | |
| "loss": 0.4451, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.1436265709156194, | |
| "grad_norm": 0.36493152532802847, | |
| "learning_rate": 4.642857142857144e-06, | |
| "loss": 0.4149, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.17953321364452424, | |
| "grad_norm": 0.2869233938041407, | |
| "learning_rate": 5.833333333333334e-06, | |
| "loss": 0.3955, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.21543985637342908, | |
| "grad_norm": 0.2374087352719959, | |
| "learning_rate": 7.023809523809524e-06, | |
| "loss": 0.3807, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.2513464991023339, | |
| "grad_norm": 0.19085750578643032, | |
| "learning_rate": 8.214285714285714e-06, | |
| "loss": 0.3719, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.2872531418312388, | |
| "grad_norm": 0.18588659208121952, | |
| "learning_rate": 9.404761904761905e-06, | |
| "loss": 0.3638, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.3231597845601436, | |
| "grad_norm": 0.19078133253606472, | |
| "learning_rate": 9.998912137362367e-06, | |
| "loss": 0.3602, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.3590664272890485, | |
| "grad_norm": 0.18585754435457444, | |
| "learning_rate": 9.990212076323587e-06, | |
| "loss": 0.357, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.39497307001795334, | |
| "grad_norm": 0.1879818930694756, | |
| "learning_rate": 9.97282709575282e-06, | |
| "loss": 0.3511, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.43087971274685816, | |
| "grad_norm": 0.17595485058415583, | |
| "learning_rate": 9.946787452311507e-06, | |
| "loss": 0.3474, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.466786355475763, | |
| "grad_norm": 0.18168043823227237, | |
| "learning_rate": 9.912138465157325e-06, | |
| "loss": 0.3466, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.5026929982046678, | |
| "grad_norm": 0.19246537631691138, | |
| "learning_rate": 9.86894043707114e-06, | |
| "loss": 0.3439, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.5385996409335727, | |
| "grad_norm": 0.20913120656523562, | |
| "learning_rate": 9.81726854950659e-06, | |
| "loss": 0.3392, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.5745062836624776, | |
| "grad_norm": 0.1943925752607722, | |
| "learning_rate": 9.757212731744973e-06, | |
| "loss": 0.3382, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.6104129263913824, | |
| "grad_norm": 0.22384055375769804, | |
| "learning_rate": 9.688877504383158e-06, | |
| "loss": 0.3386, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.6463195691202872, | |
| "grad_norm": 0.19509930129044936, | |
| "learning_rate": 9.612381797426874e-06, | |
| "loss": 0.3378, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.6822262118491921, | |
| "grad_norm": 0.23177190223223015, | |
| "learning_rate": 9.52785874330602e-06, | |
| "loss": 0.333, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.718132854578097, | |
| "grad_norm": 0.2006246018340954, | |
| "learning_rate": 9.435455445172214e-06, | |
| "loss": 0.3311, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.7540394973070018, | |
| "grad_norm": 0.2042999512470892, | |
| "learning_rate": 9.33533272088179e-06, | |
| "loss": 0.3313, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.7899461400359067, | |
| "grad_norm": 0.21930626870236752, | |
| "learning_rate": 9.227664823109884e-06, | |
| "loss": 0.3325, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.8258527827648114, | |
| "grad_norm": 0.23685553683202806, | |
| "learning_rate": 9.11263913608266e-06, | |
| "loss": 0.3308, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.8617594254937163, | |
| "grad_norm": 0.21203795699103023, | |
| "learning_rate": 8.990455849455522e-06, | |
| "loss": 0.3306, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.8976660682226212, | |
| "grad_norm": 0.2617908535020091, | |
| "learning_rate": 8.861327609904859e-06, | |
| "loss": 0.328, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.933572710951526, | |
| "grad_norm": 0.20428110286245318, | |
| "learning_rate": 8.725479151039714e-06, | |
| "loss": 0.3249, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.9694793536804309, | |
| "grad_norm": 0.23276915935689596, | |
| "learning_rate": 8.583146902277464e-06, | |
| "loss": 0.3287, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 1.0035906642728905, | |
| "grad_norm": 0.2445973206934874, | |
| "learning_rate": 8.434578577364218e-06, | |
| "loss": 0.3246, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 1.0394973070017954, | |
| "grad_norm": 0.21984748841500337, | |
| "learning_rate": 8.28003274325608e-06, | |
| "loss": 0.3196, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 1.0754039497307002, | |
| "grad_norm": 0.21007463727725653, | |
| "learning_rate": 8.119778370111566e-06, | |
| "loss": 0.3129, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 1.111310592459605, | |
| "grad_norm": 0.22501623459913725, | |
| "learning_rate": 7.954094363178421e-06, | |
| "loss": 0.3145, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 1.14721723518851, | |
| "grad_norm": 0.20904438470673392, | |
| "learning_rate": 7.783269077389447e-06, | |
| "loss": 0.3132, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 1.1831238779174147, | |
| "grad_norm": 0.1929405792634159, | |
| "learning_rate": 7.607599815512226e-06, | |
| "loss": 0.3119, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 1.2190305206463194, | |
| "grad_norm": 0.20576378490371444, | |
| "learning_rate": 7.427392310726088e-06, | |
| "loss": 0.3145, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 1.2549371633752244, | |
| "grad_norm": 0.22314438193898609, | |
| "learning_rate": 7.242960194526893e-06, | |
| "loss": 0.314, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 1.2908438061041292, | |
| "grad_norm": 0.18526094334512155, | |
| "learning_rate": 7.054624450885621e-06, | |
| "loss": 0.3147, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 1.3267504488330342, | |
| "grad_norm": 0.20944522582021186, | |
| "learning_rate": 6.862712857610812e-06, | |
| "loss": 0.3125, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 1.362657091561939, | |
| "grad_norm": 0.20970244825805384, | |
| "learning_rate": 6.667559415887055e-06, | |
| "loss": 0.3119, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 1.398563734290844, | |
| "grad_norm": 0.18929491036558882, | |
| "learning_rate": 6.469503768982379e-06, | |
| "loss": 0.3144, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 1.4344703770197487, | |
| "grad_norm": 0.18469452603163652, | |
| "learning_rate": 6.2688906111362115e-06, | |
| "loss": 0.3131, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 1.4703770197486534, | |
| "grad_norm": 0.18021217056496155, | |
| "learning_rate": 6.066069087656665e-06, | |
| "loss": 0.3128, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 1.5062836624775584, | |
| "grad_norm": 0.31157448102210894, | |
| "learning_rate": 5.8613921872712446e-06, | |
| "loss": 0.3115, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 1.5421903052064632, | |
| "grad_norm": 0.19847526072216384, | |
| "learning_rate": 5.655216127788472e-06, | |
| "loss": 0.3088, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 1.578096947935368, | |
| "grad_norm": 0.19215915923161053, | |
| "learning_rate": 5.447899736139676e-06, | |
| "loss": 0.3063, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 1.614003590664273, | |
| "grad_norm": 0.18221395113654418, | |
| "learning_rate": 5.239803823879878e-06, | |
| "loss": 0.3082, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 1.6499102333931779, | |
| "grad_norm": 0.1749484556213854, | |
| "learning_rate": 5.03129055923465e-06, | |
| "loss": 0.3089, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 1.6858168761220824, | |
| "grad_norm": 0.17613434487474727, | |
| "learning_rate": 4.822722836785842e-06, | |
| "loss": 0.3068, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 1.7217235188509874, | |
| "grad_norm": 0.1751185479216866, | |
| "learning_rate": 4.614463645893175e-06, | |
| "loss": 0.3068, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 1.7576301615798924, | |
| "grad_norm": 0.1748572053302032, | |
| "learning_rate": 4.4068754389508616e-06, | |
| "loss": 0.3093, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 1.7935368043087971, | |
| "grad_norm": 0.16507354789633685, | |
| "learning_rate": 4.2003195005787745e-06, | |
| "loss": 0.3099, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.829443447037702, | |
| "grad_norm": 0.170392010993257, | |
| "learning_rate": 3.995155318845994e-06, | |
| "loss": 0.3074, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 1.8653500897666069, | |
| "grad_norm": 0.18627037895031676, | |
| "learning_rate": 3.791739959621054e-06, | |
| "loss": 0.3091, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 1.9012567324955116, | |
| "grad_norm": 0.17081506891008494, | |
| "learning_rate": 3.5904274451377634e-06, | |
| "loss": 0.3071, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 1.9371633752244164, | |
| "grad_norm": 0.1697011202581245, | |
| "learning_rate": 3.3915681378581416e-06, | |
| "loss": 0.3093, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 1.9730700179533214, | |
| "grad_norm": 0.17582268731336928, | |
| "learning_rate": 3.195508130704795e-06, | |
| "loss": 0.3072, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 2.007181328545781, | |
| "grad_norm": 0.16800018749204126, | |
| "learning_rate": 3.0025886447239474e-06, | |
| "loss": 0.3027, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 2.043087971274686, | |
| "grad_norm": 0.17959400944310325, | |
| "learning_rate": 2.8131454352274525e-06, | |
| "loss": 0.2975, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 2.078994614003591, | |
| "grad_norm": 0.1785065928144257, | |
| "learning_rate": 2.627508207447308e-06, | |
| "loss": 0.2986, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 2.1149012567324954, | |
| "grad_norm": 0.15727463247343326, | |
| "learning_rate": 2.4460000427196916e-06, | |
| "loss": 0.2949, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 2.1508078994614004, | |
| "grad_norm": 0.15805698099204835, | |
| "learning_rate": 2.268936836197144e-06, | |
| "loss": 0.2952, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 2.1867145421903054, | |
| "grad_norm": 0.16920663238784117, | |
| "learning_rate": 2.0966267470675273e-06, | |
| "loss": 0.2985, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 2.22262118491921, | |
| "grad_norm": 0.16646936573182844, | |
| "learning_rate": 1.929369662236604e-06, | |
| "loss": 0.2979, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 2.258527827648115, | |
| "grad_norm": 0.3421880613089778, | |
| "learning_rate": 1.7674566744075882e-06, | |
| "loss": 0.298, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 2.29443447037702, | |
| "grad_norm": 0.15025140288330655, | |
| "learning_rate": 1.6111695754660667e-06, | |
| "loss": 0.2942, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 2.3303411131059244, | |
| "grad_norm": 0.17473486663498805, | |
| "learning_rate": 1.4607803660519803e-06, | |
| "loss": 0.2952, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 2.3662477558348294, | |
| "grad_norm": 0.1589150679388623, | |
| "learning_rate": 1.3165507821721906e-06, | |
| "loss": 0.2983, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 2.4021543985637344, | |
| "grad_norm": 0.1703479004667421, | |
| "learning_rate": 1.1787318396775188e-06, | |
| "loss": 0.298, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 2.438061041292639, | |
| "grad_norm": 0.1513614749600422, | |
| "learning_rate": 1.0475633973970573e-06, | |
| "loss": 0.2959, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 2.473967684021544, | |
| "grad_norm": 0.1876551965771974, | |
| "learning_rate": 9.232737396900543e-07, | |
| "loss": 0.2996, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 2.509874326750449, | |
| "grad_norm": 0.15388873373154505, | |
| "learning_rate": 8.060791791418887e-07, | |
| "loss": 0.2966, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 2.545780969479354, | |
| "grad_norm": 0.15535759307395802, | |
| "learning_rate": 6.96183680095639e-07, | |
| "loss": 0.2991, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 2.5816876122082584, | |
| "grad_norm": 0.14993765716059407, | |
| "learning_rate": 5.937785036743893e-07, | |
| "loss": 0.298, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 2.6175942549371634, | |
| "grad_norm": 0.14735943617696928, | |
| "learning_rate": 4.990418749121179e-07, | |
| "loss": 0.2966, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 2.6535008976660683, | |
| "grad_norm": 0.1548863032393796, | |
| "learning_rate": 4.121386725724835e-07, | |
| "loss": 0.2978, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 2.6894075403949733, | |
| "grad_norm": 0.15810495711820732, | |
| "learning_rate": 3.3322014219532575e-07, | |
| "loss": 0.2976, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 2.725314183123878, | |
| "grad_norm": 0.15072810300321354, | |
| "learning_rate": 2.6242363287030617e-07, | |
| "loss": 0.296, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 2.761220825852783, | |
| "grad_norm": 0.2424841654880629, | |
| "learning_rate": 1.9987235819581118e-07, | |
| "loss": 0.2969, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 2.797127468581688, | |
| "grad_norm": 0.22116921613107723, | |
| "learning_rate": 1.4567518183912887e-07, | |
| "loss": 0.2964, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 2.8330341113105924, | |
| "grad_norm": 0.14618317771038028, | |
| "learning_rate": 9.992642807111486e-08, | |
| "loss": 0.2978, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 2.8689407540394973, | |
| "grad_norm": 0.1636178141701447, | |
| "learning_rate": 6.270571760509547e-08, | |
| "loss": 0.2991, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 2.9048473967684023, | |
| "grad_norm": 0.14508080741570623, | |
| "learning_rate": 3.4077829025703226e-08, | |
| "loss": 0.2942, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 2.940754039497307, | |
| "grad_norm": 0.1398003635276662, | |
| "learning_rate": 1.4092586048820578e-08, | |
| "loss": 0.2962, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 2.976660682226212, | |
| "grad_norm": 0.1587618807983056, | |
| "learning_rate": 2.784770808839654e-09, | |
| "loss": 0.2982, | |
| "step": 830 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 837, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 10000000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 8093209209077760.0, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |