| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.154068090017311, | |
| "eval_steps": 500, | |
| "global_step": 6000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.09617234083477592, | |
| "grad_norm": 98.88580322265625, | |
| "learning_rate": 9.520100019234469e-06, | |
| "loss": -95.731, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.19234468166955185, | |
| "grad_norm": 185.990234375, | |
| "learning_rate": 9.039238315060588e-06, | |
| "loss": -149.7462, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.28851702250432776, | |
| "grad_norm": 158.8727264404297, | |
| "learning_rate": 8.55837661088671e-06, | |
| "loss": -154.5378, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.3846893633391037, | |
| "grad_norm": 181.66783142089844, | |
| "learning_rate": 8.07751490671283e-06, | |
| "loss": -157.9401, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.4808617041738796, | |
| "grad_norm": 118.4883804321289, | |
| "learning_rate": 7.596653202538951e-06, | |
| "loss": -159.6488, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.5770340450086555, | |
| "grad_norm": 158.15525817871094, | |
| "learning_rate": 7.115791498365071e-06, | |
| "loss": -162.1141, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.6732063858434314, | |
| "grad_norm": 224.30747985839844, | |
| "learning_rate": 6.6349297941911915e-06, | |
| "loss": -164.0697, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.7693787266782074, | |
| "grad_norm": 186.41622924804688, | |
| "learning_rate": 6.154068090017311e-06, | |
| "loss": -166.0416, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.8655510675129833, | |
| "grad_norm": 211.46307373046875, | |
| "learning_rate": 5.673206385843431e-06, | |
| "loss": -167.003, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.9617234083477592, | |
| "grad_norm": 262.0639953613281, | |
| "learning_rate": 5.1923446816695525e-06, | |
| "loss": -168.8809, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 1.0578957491825352, | |
| "grad_norm": 274.6922302246094, | |
| "learning_rate": 4.711482977495673e-06, | |
| "loss": -170.0935, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 1.154068090017311, | |
| "grad_norm": 202.4212646484375, | |
| "learning_rate": 4.230621273321793e-06, | |
| "loss": -171.5855, | |
| "step": 6000 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 10398, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 2, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 0.0, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |