| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "eval_steps": 500, | |
| "global_step": 125, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.04024144869215292, | |
| "grad_norm": 128.68338012695312, | |
| "learning_rate": 6.34920634920635e-07, | |
| "loss": 5.7142, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.08048289738430583, | |
| "grad_norm": 124.65970611572266, | |
| "learning_rate": 1.4285714285714286e-06, | |
| "loss": 5.8126, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.12072434607645875, | |
| "grad_norm": 63.15693664550781, | |
| "learning_rate": 2.222222222222222e-06, | |
| "loss": 4.9144, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.16096579476861167, | |
| "grad_norm": 26.409881591796875, | |
| "learning_rate": 3.015873015873016e-06, | |
| "loss": 4.1026, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.2012072434607646, | |
| "grad_norm": 18.272987365722656, | |
| "learning_rate": 3.80952380952381e-06, | |
| "loss": 3.8405, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.2414486921529175, | |
| "grad_norm": 13.232375144958496, | |
| "learning_rate": 4.603174603174604e-06, | |
| "loss": 3.5374, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.28169014084507044, | |
| "grad_norm": 50.732887268066406, | |
| "learning_rate": 5.396825396825397e-06, | |
| "loss": 3.3776, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.32193158953722334, | |
| "grad_norm": 11.886248588562012, | |
| "learning_rate": 6.1904761904761914e-06, | |
| "loss": 3.2729, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.36217303822937624, | |
| "grad_norm": 12.157098770141602, | |
| "learning_rate": 6.984126984126984e-06, | |
| "loss": 3.0134, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.4024144869215292, | |
| "grad_norm": 11.598234176635742, | |
| "learning_rate": 7.77777777777778e-06, | |
| "loss": 2.9235, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.4426559356136821, | |
| "grad_norm": 11.506558418273926, | |
| "learning_rate": 8.571428571428571e-06, | |
| "loss": 2.584, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.482897384305835, | |
| "grad_norm": 14.368203163146973, | |
| "learning_rate": 9.365079365079366e-06, | |
| "loss": 2.4279, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.5231388329979879, | |
| "grad_norm": 12.335148811340332, | |
| "learning_rate": 9.999921879324127e-06, | |
| "loss": 2.244, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.5633802816901409, | |
| "grad_norm": 17.981578826904297, | |
| "learning_rate": 9.997187911979252e-06, | |
| "loss": 2.0648, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.6036217303822937, | |
| "grad_norm": 12.924208641052246, | |
| "learning_rate": 9.990550351633784e-06, | |
| "loss": 2.0333, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.6438631790744467, | |
| "grad_norm": 36.98970031738281, | |
| "learning_rate": 9.980014383270668e-06, | |
| "loss": 1.7114, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.6841046277665996, | |
| "grad_norm": 11.499068260192871, | |
| "learning_rate": 9.965588237145219e-06, | |
| "loss": 1.725, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.7243460764587525, | |
| "grad_norm": 13.697612762451172, | |
| "learning_rate": 9.947283182355982e-06, | |
| "loss": 1.7076, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.7645875251509054, | |
| "grad_norm": 11.36514663696289, | |
| "learning_rate": 9.925113518041796e-06, | |
| "loss": 1.4663, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.8048289738430584, | |
| "grad_norm": 11.20559024810791, | |
| "learning_rate": 9.899096562211902e-06, | |
| "loss": 1.1067, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.8450704225352113, | |
| "grad_norm": 10.782366752624512, | |
| "learning_rate": 9.869252638217846e-06, | |
| "loss": 1.2299, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.8853118712273642, | |
| "grad_norm": 12.094294548034668, | |
| "learning_rate": 9.83560505887773e-06, | |
| "loss": 0.9908, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.9255533199195171, | |
| "grad_norm": 10.271987915039062, | |
| "learning_rate": 9.798180108265218e-06, | |
| "loss": 0.8628, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.96579476861167, | |
| "grad_norm": 13.298333168029785, | |
| "learning_rate": 9.757007021177529e-06, | |
| "loss": 0.7913, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 7.651159286499023, | |
| "learning_rate": 9.712117960298433e-06, | |
| "loss": 0.6121, | |
| "step": 125 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 625, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 9.553888784102195e+17, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |