| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 375, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.04024144869215292, | |
| "grad_norm": 128.68338012695312, | |
| "learning_rate": 6.34920634920635e-07, | |
| "loss": 5.7142, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.08048289738430583, | |
| "grad_norm": 124.65970611572266, | |
| "learning_rate": 1.4285714285714286e-06, | |
| "loss": 5.8126, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.12072434607645875, | |
| "grad_norm": 63.15693664550781, | |
| "learning_rate": 2.222222222222222e-06, | |
| "loss": 4.9144, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.16096579476861167, | |
| "grad_norm": 26.409881591796875, | |
| "learning_rate": 3.015873015873016e-06, | |
| "loss": 4.1026, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.2012072434607646, | |
| "grad_norm": 18.272987365722656, | |
| "learning_rate": 3.80952380952381e-06, | |
| "loss": 3.8405, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.2414486921529175, | |
| "grad_norm": 13.232375144958496, | |
| "learning_rate": 4.603174603174604e-06, | |
| "loss": 3.5374, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.28169014084507044, | |
| "grad_norm": 50.732887268066406, | |
| "learning_rate": 5.396825396825397e-06, | |
| "loss": 3.3776, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.32193158953722334, | |
| "grad_norm": 11.886248588562012, | |
| "learning_rate": 6.1904761904761914e-06, | |
| "loss": 3.2729, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.36217303822937624, | |
| "grad_norm": 12.157098770141602, | |
| "learning_rate": 6.984126984126984e-06, | |
| "loss": 3.0134, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.4024144869215292, | |
| "grad_norm": 11.598234176635742, | |
| "learning_rate": 7.77777777777778e-06, | |
| "loss": 2.9235, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.4426559356136821, | |
| "grad_norm": 11.506558418273926, | |
| "learning_rate": 8.571428571428571e-06, | |
| "loss": 2.584, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.482897384305835, | |
| "grad_norm": 14.368203163146973, | |
| "learning_rate": 9.365079365079366e-06, | |
| "loss": 2.4279, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.5231388329979879, | |
| "grad_norm": 12.335148811340332, | |
| "learning_rate": 9.999921879324127e-06, | |
| "loss": 2.244, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.5633802816901409, | |
| "grad_norm": 17.981578826904297, | |
| "learning_rate": 9.997187911979252e-06, | |
| "loss": 2.0648, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.6036217303822937, | |
| "grad_norm": 12.924208641052246, | |
| "learning_rate": 9.990550351633784e-06, | |
| "loss": 2.0333, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.6438631790744467, | |
| "grad_norm": 36.98970031738281, | |
| "learning_rate": 9.980014383270668e-06, | |
| "loss": 1.7114, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.6841046277665996, | |
| "grad_norm": 11.499068260192871, | |
| "learning_rate": 9.965588237145219e-06, | |
| "loss": 1.725, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.7243460764587525, | |
| "grad_norm": 13.697612762451172, | |
| "learning_rate": 9.947283182355982e-06, | |
| "loss": 1.7076, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.7645875251509054, | |
| "grad_norm": 11.36514663696289, | |
| "learning_rate": 9.925113518041796e-06, | |
| "loss": 1.4663, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.8048289738430584, | |
| "grad_norm": 11.20559024810791, | |
| "learning_rate": 9.899096562211902e-06, | |
| "loss": 1.1067, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.8450704225352113, | |
| "grad_norm": 10.782366752624512, | |
| "learning_rate": 9.869252638217846e-06, | |
| "loss": 1.2299, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.8853118712273642, | |
| "grad_norm": 12.094294548034668, | |
| "learning_rate": 9.83560505887773e-06, | |
| "loss": 0.9908, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.9255533199195171, | |
| "grad_norm": 10.271987915039062, | |
| "learning_rate": 9.798180108265218e-06, | |
| "loss": 0.8628, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.96579476861167, | |
| "grad_norm": 13.298333168029785, | |
| "learning_rate": 9.757007021177529e-06, | |
| "loss": 0.7913, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 7.651159286499023, | |
| "learning_rate": 9.712117960298433e-06, | |
| "loss": 0.6121, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 1.040241448692153, | |
| "grad_norm": 15.86595630645752, | |
| "learning_rate": 9.663547991074129e-06, | |
| "loss": 0.4909, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 1.080482897384306, | |
| "grad_norm": 9.43641185760498, | |
| "learning_rate": 9.611335054321576e-06, | |
| "loss": 0.3678, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 1.1207243460764587, | |
| "grad_norm": 8.751080513000488, | |
| "learning_rate": 9.555519936590739e-06, | |
| "loss": 0.3778, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 1.1609657947686116, | |
| "grad_norm": 6.040717601776123, | |
| "learning_rate": 9.496146238303846e-06, | |
| "loss": 0.3935, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 1.2012072434607646, | |
| "grad_norm": 8.183173179626465, | |
| "learning_rate": 9.433260339696564e-06, | |
| "loss": 0.3653, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.2414486921529175, | |
| "grad_norm": 7.956486701965332, | |
| "learning_rate": 9.366911364587726e-06, | |
| "loss": 0.3123, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 1.2816901408450705, | |
| "grad_norm": 11.906622886657715, | |
| "learning_rate": 9.297151142005852e-06, | |
| "loss": 0.335, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 1.3219315895372232, | |
| "grad_norm": 6.626152515411377, | |
| "learning_rate": 9.224034165702506e-06, | |
| "loss": 0.3211, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 1.3621730382293762, | |
| "grad_norm": 7.222238063812256, | |
| "learning_rate": 9.147617551584066e-06, | |
| "loss": 0.2317, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 1.4024144869215291, | |
| "grad_norm": 6.305289268493652, | |
| "learning_rate": 9.067960993095176e-06, | |
| "loss": 0.2209, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 1.442655935613682, | |
| "grad_norm": 7.212320804595947, | |
| "learning_rate": 8.985126714588739e-06, | |
| "loss": 0.2155, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 1.482897384305835, | |
| "grad_norm": 6.313644886016846, | |
| "learning_rate": 8.899179422718877e-06, | |
| "loss": 0.2051, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 1.5231388329979878, | |
| "grad_norm": 7.122276782989502, | |
| "learning_rate": 8.810186255894804e-06, | |
| "loss": 0.2192, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 1.563380281690141, | |
| "grad_norm": 5.341350555419922, | |
| "learning_rate": 8.718216731835131e-06, | |
| "loss": 0.1937, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 1.6036217303822937, | |
| "grad_norm": 3.3275582790374756, | |
| "learning_rate": 8.623342693263549e-06, | |
| "loss": 0.1281, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.6438631790744467, | |
| "grad_norm": 6.510648727416992, | |
| "learning_rate": 8.525638251788312e-06, | |
| "loss": 0.1872, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 1.6841046277665996, | |
| "grad_norm": 14.44485855102539, | |
| "learning_rate": 8.425179730009368e-06, | |
| "loss": 0.1722, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 1.7243460764587524, | |
| "grad_norm": 5.061001777648926, | |
| "learning_rate": 8.322045601898354e-06, | |
| "loss": 0.1602, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 1.7645875251509056, | |
| "grad_norm": 4.675739288330078, | |
| "learning_rate": 8.216316431498028e-06, | |
| "loss": 0.1034, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 1.8048289738430583, | |
| "grad_norm": 4.156198501586914, | |
| "learning_rate": 8.108074809989032e-06, | |
| "loss": 0.1249, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 1.8450704225352113, | |
| "grad_norm": 6.796616554260254, | |
| "learning_rate": 7.99740529117313e-06, | |
| "loss": 0.1205, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 1.8853118712273642, | |
| "grad_norm": 6.765830039978027, | |
| "learning_rate": 7.88439432542334e-06, | |
| "loss": 0.1609, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 1.925553319919517, | |
| "grad_norm": 3.9120049476623535, | |
| "learning_rate": 7.769130192152538e-06, | |
| "loss": 0.0915, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 1.9657947686116701, | |
| "grad_norm": 3.127553701400757, | |
| "learning_rate": 7.651702930853287e-06, | |
| "loss": 0.0684, | |
| "step": 245 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 3.80415415763855, | |
| "learning_rate": 7.532204270762786e-06, | |
| "loss": 0.1202, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 2.0402414486921527, | |
| "grad_norm": 3.3256561756134033, | |
| "learning_rate": 7.4107275592078345e-06, | |
| "loss": 0.0578, | |
| "step": 255 | |
| }, | |
| { | |
| "epoch": 2.080482897384306, | |
| "grad_norm": 4.8566083908081055, | |
| "learning_rate": 7.287367688685835e-06, | |
| "loss": 0.0424, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 2.1207243460764587, | |
| "grad_norm": 2.9243128299713135, | |
| "learning_rate": 7.162221022738768e-06, | |
| "loss": 0.0489, | |
| "step": 265 | |
| }, | |
| { | |
| "epoch": 2.160965794768612, | |
| "grad_norm": 2.4397518634796143, | |
| "learning_rate": 7.035385320678035e-06, | |
| "loss": 0.0917, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 2.2012072434607646, | |
| "grad_norm": 3.2336502075195312, | |
| "learning_rate": 6.906959661219011e-06, | |
| "loss": 0.0401, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 2.2414486921529173, | |
| "grad_norm": 4.65470027923584, | |
| "learning_rate": 6.777044365084907e-06, | |
| "loss": 0.0566, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 2.2816901408450705, | |
| "grad_norm": 3.8202149868011475, | |
| "learning_rate": 6.645740916640449e-06, | |
| "loss": 0.0461, | |
| "step": 285 | |
| }, | |
| { | |
| "epoch": 2.3219315895372232, | |
| "grad_norm": 5.353789329528809, | |
| "learning_rate": 6.513151884616556e-06, | |
| "loss": 0.1036, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 2.3621730382293764, | |
| "grad_norm": 5.688483238220215, | |
| "learning_rate": 6.379380841987965e-06, | |
| "loss": 0.0917, | |
| "step": 295 | |
| }, | |
| { | |
| "epoch": 2.402414486921529, | |
| "grad_norm": 4.736418724060059, | |
| "learning_rate": 6.244532285066382e-06, | |
| "loss": 0.0689, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 2.442655935613682, | |
| "grad_norm": 3.6202309131622314, | |
| "learning_rate": 6.108711551872347e-06, | |
| "loss": 0.05, | |
| "step": 305 | |
| }, | |
| { | |
| "epoch": 2.482897384305835, | |
| "grad_norm": 2.6547493934631348, | |
| "learning_rate": 5.972024739849622e-06, | |
| "loss": 0.0463, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 2.523138832997988, | |
| "grad_norm": 1.9001390933990479, | |
| "learning_rate": 5.83457862298631e-06, | |
| "loss": 0.0389, | |
| "step": 315 | |
| }, | |
| { | |
| "epoch": 2.563380281690141, | |
| "grad_norm": 5.619109153747559, | |
| "learning_rate": 5.696480568407523e-06, | |
| "loss": 0.0542, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 2.6036217303822937, | |
| "grad_norm": 3.106781244277954, | |
| "learning_rate": 5.557838452504692e-06, | |
| "loss": 0.0403, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 2.6438631790744465, | |
| "grad_norm": 3.7563529014587402, | |
| "learning_rate": 5.418760576667071e-06, | |
| "loss": 0.0551, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 2.6841046277665996, | |
| "grad_norm": 5.184078216552734, | |
| "learning_rate": 5.2793555826812456e-06, | |
| "loss": 0.054, | |
| "step": 335 | |
| }, | |
| { | |
| "epoch": 2.7243460764587524, | |
| "grad_norm": 3.3755650520324707, | |
| "learning_rate": 5.139732367864736e-06, | |
| "loss": 0.0341, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 2.7645875251509056, | |
| "grad_norm": 3.0244081020355225, | |
| "learning_rate": 5e-06, | |
| "loss": 0.0337, | |
| "step": 345 | |
| }, | |
| { | |
| "epoch": 2.8048289738430583, | |
| "grad_norm": 2.2510271072387695, | |
| "learning_rate": 4.8602676321352646e-06, | |
| "loss": 0.0371, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 2.845070422535211, | |
| "grad_norm": 2.3468408584594727, | |
| "learning_rate": 4.720644417318755e-06, | |
| "loss": 0.017, | |
| "step": 355 | |
| }, | |
| { | |
| "epoch": 2.885311871227364, | |
| "grad_norm": 1.6513224840164185, | |
| "learning_rate": 4.58123942333293e-06, | |
| "loss": 0.0333, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 2.925553319919517, | |
| "grad_norm": 3.460078239440918, | |
| "learning_rate": 4.442161547495309e-06, | |
| "loss": 0.0308, | |
| "step": 365 | |
| }, | |
| { | |
| "epoch": 2.96579476861167, | |
| "grad_norm": 1.5945957899093628, | |
| "learning_rate": 4.303519431592479e-06, | |
| "loss": 0.0385, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 0.9703298211097717, | |
| "learning_rate": 4.165421377013691e-06, | |
| "loss": 0.0245, | |
| "step": 375 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 625, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2.866166680327815e+18, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |