| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 171, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.08771929824561403, | |
| "grad_norm": 0.6626073718070984, | |
| "learning_rate": 8e-06, | |
| "loss": 1.9742, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.17543859649122806, | |
| "grad_norm": 0.5849995017051697, | |
| "learning_rate": 1.8e-05, | |
| "loss": 1.9156, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.2631578947368421, | |
| "grad_norm": 0.46837282180786133, | |
| "learning_rate": 2.8e-05, | |
| "loss": 1.8546, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.3508771929824561, | |
| "grad_norm": 0.5414826273918152, | |
| "learning_rate": 2.998375667007787e-05, | |
| "loss": 1.7517, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.43859649122807015, | |
| "grad_norm": 0.469635546207428, | |
| "learning_rate": 2.99178284305241e-05, | |
| "loss": 1.733, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.5263157894736842, | |
| "grad_norm": 0.4930901825428009, | |
| "learning_rate": 2.980142298168869e-05, | |
| "loss": 1.6696, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.6140350877192983, | |
| "grad_norm": 0.5132279992103577, | |
| "learning_rate": 2.9634934202763214e-05, | |
| "loss": 1.6134, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.7017543859649122, | |
| "grad_norm": 0.5949628949165344, | |
| "learning_rate": 2.9418925439074784e-05, | |
| "loss": 1.5153, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.7894736842105263, | |
| "grad_norm": 0.6162750720977783, | |
| "learning_rate": 2.9154127595903755e-05, | |
| "loss": 1.4365, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.8771929824561403, | |
| "grad_norm": 0.6752856969833374, | |
| "learning_rate": 2.8841436665331634e-05, | |
| "loss": 1.4368, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.9649122807017544, | |
| "grad_norm": 0.7573911547660828, | |
| "learning_rate": 2.8481910694487507e-05, | |
| "loss": 1.402, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 1.0526315789473684, | |
| "grad_norm": 1.0285247564315796, | |
| "learning_rate": 2.8076766205451435e-05, | |
| "loss": 1.3013, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 1.1403508771929824, | |
| "grad_norm": 0.8978292942047119, | |
| "learning_rate": 2.7627374078928862e-05, | |
| "loss": 1.2467, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 1.2280701754385965, | |
| "grad_norm": 0.9425483345985413, | |
| "learning_rate": 2.7135254915624213e-05, | |
| "loss": 1.2154, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 1.3157894736842106, | |
| "grad_norm": 0.9950088858604431, | |
| "learning_rate": 2.6602073891009458e-05, | |
| "loss": 1.0844, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 1.4035087719298245, | |
| "grad_norm": 1.19395112991333, | |
| "learning_rate": 2.6029635120897434e-05, | |
| "loss": 1.0669, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.4912280701754386, | |
| "grad_norm": 1.1038938760757446, | |
| "learning_rate": 2.541987555688496e-05, | |
| "loss": 1.0143, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 1.5789473684210527, | |
| "grad_norm": 1.1312118768692017, | |
| "learning_rate": 2.477485843232183e-05, | |
| "loss": 0.949, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 1.6666666666666665, | |
| "grad_norm": 1.2909584045410156, | |
| "learning_rate": 2.4096766280982204e-05, | |
| "loss": 0.9563, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 1.7543859649122808, | |
| "grad_norm": 1.283607840538025, | |
| "learning_rate": 2.3387893552061202e-05, | |
| "loss": 0.9226, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.8421052631578947, | |
| "grad_norm": 1.3236758708953857, | |
| "learning_rate": 2.265063884648513e-05, | |
| "loss": 0.8785, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 1.9298245614035088, | |
| "grad_norm": 1.3031620979309082, | |
| "learning_rate": 2.1887496800805175e-05, | |
| "loss": 0.8263, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 2.017543859649123, | |
| "grad_norm": 1.6206766366958618, | |
| "learning_rate": 2.1101049646137008e-05, | |
| "loss": 0.7628, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 2.1052631578947367, | |
| "grad_norm": 1.84587824344635, | |
| "learning_rate": 2.029395847070803e-05, | |
| "loss": 0.7357, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 2.192982456140351, | |
| "grad_norm": 1.5857688188552856, | |
| "learning_rate": 1.9468954215577227e-05, | |
| "loss": 0.6546, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 2.280701754385965, | |
| "grad_norm": 1.3934041261672974, | |
| "learning_rate": 1.8628828433995013e-05, | |
| "loss": 0.6807, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 2.3684210526315788, | |
| "grad_norm": 1.8430525064468384, | |
| "learning_rate": 1.777642384567072e-05, | |
| "loss": 0.6609, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 2.456140350877193, | |
| "grad_norm": 1.450028657913208, | |
| "learning_rate": 1.6914624717908922e-05, | |
| "loss": 0.6439, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 2.543859649122807, | |
| "grad_norm": 1.610876441001892, | |
| "learning_rate": 1.604634710616188e-05, | |
| "loss": 0.5813, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 2.6315789473684212, | |
| "grad_norm": 2.032411575317383, | |
| "learning_rate": 1.5174528987020958e-05, | |
| "loss": 0.5937, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 2.719298245614035, | |
| "grad_norm": 1.7907747030258179, | |
| "learning_rate": 1.43021203170338e-05, | |
| "loss": 0.5612, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 2.807017543859649, | |
| "grad_norm": 1.6159515380859375, | |
| "learning_rate": 1.3432073050985201e-05, | |
| "loss": 0.5519, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 2.8947368421052633, | |
| "grad_norm": 1.756583333015442, | |
| "learning_rate": 1.256733115341649e-05, | |
| "loss": 0.5585, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 2.982456140350877, | |
| "grad_norm": 1.916366696357727, | |
| "learning_rate": 1.1710820637181449e-05, | |
| "loss": 0.5336, | |
| "step": 170 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 285, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 2000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 3.7426598474101555e+17, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |