| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 201, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.07518796992481203, | |
| "grad_norm": 0.5843608975410461, | |
| "learning_rate": 7.058823529411765e-06, | |
| "loss": 1.8954, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.15037593984962405, | |
| "grad_norm": 0.5829306244850159, | |
| "learning_rate": 1.5882352941176473e-05, | |
| "loss": 1.9066, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.22556390977443608, | |
| "grad_norm": 0.4856240153312683, | |
| "learning_rate": 2.4705882352941174e-05, | |
| "loss": 1.8401, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.3007518796992481, | |
| "grad_norm": 0.4811968505382538, | |
| "learning_rate": 2.9997072124327365e-05, | |
| "loss": 1.7568, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.37593984962406013, | |
| "grad_norm": 0.4097922444343567, | |
| "learning_rate": 2.9964146648174195e-05, | |
| "loss": 1.7135, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.45112781954887216, | |
| "grad_norm": 0.470956414937973, | |
| "learning_rate": 2.9894716440202756e-05, | |
| "loss": 1.663, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.5263157894736842, | |
| "grad_norm": 0.46692782640457153, | |
| "learning_rate": 2.978895087399522e-05, | |
| "loss": 1.6288, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.6015037593984962, | |
| "grad_norm": 0.4785483479499817, | |
| "learning_rate": 2.9647107962502205e-05, | |
| "loss": 1.5842, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.6766917293233082, | |
| "grad_norm": 0.6028839349746704, | |
| "learning_rate": 2.946953372862538e-05, | |
| "loss": 1.5293, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.7518796992481203, | |
| "grad_norm": 0.6228252649307251, | |
| "learning_rate": 2.9256661361101666e-05, | |
| "loss": 1.4678, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.8270676691729323, | |
| "grad_norm": 0.6252663731575012, | |
| "learning_rate": 2.9009010157748082e-05, | |
| "loss": 1.4163, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.9022556390977443, | |
| "grad_norm": 0.6308442950248718, | |
| "learning_rate": 2.8727184258645276e-05, | |
| "loss": 1.3788, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.9774436090225563, | |
| "grad_norm": 0.7826851606369019, | |
| "learning_rate": 2.841187117235008e-05, | |
| "loss": 1.3428, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 1.045112781954887, | |
| "grad_norm": 1.1327600479125977, | |
| "learning_rate": 2.8063840098732322e-05, | |
| "loss": 1.2124, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 1.1203007518796992, | |
| "grad_norm": 1.054235577583313, | |
| "learning_rate": 2.768394005252739e-05, | |
| "loss": 1.1427, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 1.1954887218045114, | |
| "grad_norm": 1.178091049194336, | |
| "learning_rate": 2.7273097792182038e-05, | |
| "loss": 1.1609, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.2706766917293233, | |
| "grad_norm": 1.2119579315185547, | |
| "learning_rate": 2.6832315559045938e-05, | |
| "loss": 1.0633, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 1.3458646616541352, | |
| "grad_norm": 1.0310696363449097, | |
| "learning_rate": 2.6362668632424302e-05, | |
| "loss": 1.0456, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 1.4210526315789473, | |
| "grad_norm": 1.2468197345733643, | |
| "learning_rate": 2.586530270645584e-05, | |
| "loss": 1.0244, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 1.4962406015037595, | |
| "grad_norm": 1.4294254779815674, | |
| "learning_rate": 2.534143109521518e-05, | |
| "loss": 0.989, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.5714285714285714, | |
| "grad_norm": 1.7933841943740845, | |
| "learning_rate": 2.4792331772857826e-05, | |
| "loss": 0.9748, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 1.6466165413533833, | |
| "grad_norm": 1.389110803604126, | |
| "learning_rate": 2.421934425602816e-05, | |
| "loss": 0.9058, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 1.7218045112781954, | |
| "grad_norm": 1.226500153541565, | |
| "learning_rate": 2.3623866336135806e-05, | |
| "loss": 0.9148, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 1.7969924812030076, | |
| "grad_norm": 1.363110899925232, | |
| "learning_rate": 2.3007350669471866e-05, | |
| "loss": 0.8737, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.8721804511278195, | |
| "grad_norm": 1.4197795391082764, | |
| "learning_rate": 2.237130123348338e-05, | |
| "loss": 0.872, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 1.9473684210526314, | |
| "grad_norm": 1.230918526649475, | |
| "learning_rate": 2.171726965785095e-05, | |
| "loss": 0.868, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 2.0150375939849625, | |
| "grad_norm": 1.576594591140747, | |
| "learning_rate": 2.1046851439319587e-05, | |
| "loss": 0.7835, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 2.090225563909774, | |
| "grad_norm": 1.4939583539962769, | |
| "learning_rate": 2.0361682049516837e-05, | |
| "loss": 0.6887, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 2.1654135338345863, | |
| "grad_norm": 2.268726110458374, | |
| "learning_rate": 1.966343294525297e-05, | |
| "loss": 0.6642, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 2.2406015037593985, | |
| "grad_norm": 1.6134439706802368, | |
| "learning_rate": 1.8953807491036015e-05, | |
| "loss": 0.6663, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 2.3157894736842106, | |
| "grad_norm": 1.713577151298523, | |
| "learning_rate": 1.8234536803748657e-05, | |
| "loss": 0.6383, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 2.3909774436090228, | |
| "grad_norm": 2.0117926597595215, | |
| "learning_rate": 1.7507375529623748e-05, | |
| "loss": 0.684, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 2.4661654135338344, | |
| "grad_norm": 1.974694848060608, | |
| "learning_rate": 1.6774097563820486e-05, | |
| "loss": 0.6357, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 2.5413533834586466, | |
| "grad_norm": 1.564803123474121, | |
| "learning_rate": 1.603649172304317e-05, | |
| "loss": 0.6048, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 2.6165413533834587, | |
| "grad_norm": 2.4491689205169678, | |
| "learning_rate": 1.5296357381759197e-05, | |
| "loss": 0.586, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 2.6917293233082704, | |
| "grad_norm": 1.540130615234375, | |
| "learning_rate": 1.4555500082661603e-05, | |
| "loss": 0.5548, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 2.7669172932330826, | |
| "grad_norm": 1.8671647310256958, | |
| "learning_rate": 1.3815727132084322e-05, | |
| "loss": 0.581, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 2.8421052631578947, | |
| "grad_norm": 2.341644525527954, | |
| "learning_rate": 1.3078843191115099e-05, | |
| "loss": 0.5204, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 2.917293233082707, | |
| "grad_norm": 1.749429702758789, | |
| "learning_rate": 1.234664587316141e-05, | |
| "loss": 0.5241, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 2.992481203007519, | |
| "grad_norm": 1.812374234199524, | |
| "learning_rate": 1.1620921358709076e-05, | |
| "loss": 0.5162, | |
| "step": 200 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 335, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 2000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 4.3198236595532595e+17, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |