| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 500.0, | |
| "eval_steps": 500, | |
| "global_step": 18000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 13.88888888888889, | |
| "grad_norm": 2.0625548362731934, | |
| "learning_rate": 4.8611111111111115e-05, | |
| "loss": 0.2419, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 27.77777777777778, | |
| "grad_norm": 1.07987380027771, | |
| "learning_rate": 4.722222222222222e-05, | |
| "loss": 0.122, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 41.666666666666664, | |
| "grad_norm": 0.8302552700042725, | |
| "learning_rate": 4.5833333333333334e-05, | |
| "loss": 0.0991, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 55.55555555555556, | |
| "grad_norm": 3.696808099746704, | |
| "learning_rate": 4.4444444444444447e-05, | |
| "loss": 0.0848, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 69.44444444444444, | |
| "grad_norm": 0.5040786266326904, | |
| "learning_rate": 4.305555555555556e-05, | |
| "loss": 0.0727, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 83.33333333333333, | |
| "grad_norm": 3.4218876361846924, | |
| "learning_rate": 4.166666666666667e-05, | |
| "loss": 0.0722, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 97.22222222222223, | |
| "grad_norm": 0.38914600014686584, | |
| "learning_rate": 4.027777777777778e-05, | |
| "loss": 0.0657, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 111.11111111111111, | |
| "grad_norm": 0.18231849372386932, | |
| "learning_rate": 3.888888888888889e-05, | |
| "loss": 0.0625, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 125.0, | |
| "grad_norm": 0.00844669621437788, | |
| "learning_rate": 3.7500000000000003e-05, | |
| "loss": 0.06, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 138.88888888888889, | |
| "grad_norm": 0.024724161252379417, | |
| "learning_rate": 3.611111111111111e-05, | |
| "loss": 0.0554, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 152.77777777777777, | |
| "grad_norm": 0.017953308299183846, | |
| "learning_rate": 3.472222222222222e-05, | |
| "loss": 0.0506, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 166.66666666666666, | |
| "grad_norm": 0.007029552944004536, | |
| "learning_rate": 3.3333333333333335e-05, | |
| "loss": 0.0416, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 180.55555555555554, | |
| "grad_norm": 0.09840937703847885, | |
| "learning_rate": 3.194444444444444e-05, | |
| "loss": 0.0439, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 194.44444444444446, | |
| "grad_norm": 0.010464319959282875, | |
| "learning_rate": 3.055555555555556e-05, | |
| "loss": 0.0378, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 208.33333333333334, | |
| "grad_norm": 0.6669019460678101, | |
| "learning_rate": 2.916666666666667e-05, | |
| "loss": 0.0372, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 222.22222222222223, | |
| "grad_norm": 5.722232818603516, | |
| "learning_rate": 2.777777777777778e-05, | |
| "loss": 0.0431, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 236.11111111111111, | |
| "grad_norm": 0.12137515097856522, | |
| "learning_rate": 2.6388888888888892e-05, | |
| "loss": 0.0362, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 250.0, | |
| "grad_norm": 0.2072753608226776, | |
| "learning_rate": 2.5e-05, | |
| "loss": 0.0318, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 263.8888888888889, | |
| "grad_norm": 0.7483288645744324, | |
| "learning_rate": 2.361111111111111e-05, | |
| "loss": 0.0314, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 277.77777777777777, | |
| "grad_norm": 0.004340521525591612, | |
| "learning_rate": 2.2222222222222223e-05, | |
| "loss": 0.035, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 291.6666666666667, | |
| "grad_norm": 0.33232325315475464, | |
| "learning_rate": 2.0833333333333336e-05, | |
| "loss": 0.0295, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 305.55555555555554, | |
| "grad_norm": 0.001907658763229847, | |
| "learning_rate": 1.9444444444444445e-05, | |
| "loss": 0.0265, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 319.44444444444446, | |
| "grad_norm": 0.03909081220626831, | |
| "learning_rate": 1.8055555555555555e-05, | |
| "loss": 0.0243, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 333.3333333333333, | |
| "grad_norm": 0.12255003303289413, | |
| "learning_rate": 1.6666666666666667e-05, | |
| "loss": 0.0243, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 347.22222222222223, | |
| "grad_norm": 0.0019066839013248682, | |
| "learning_rate": 1.527777777777778e-05, | |
| "loss": 0.0218, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 361.1111111111111, | |
| "grad_norm": 0.01006140187382698, | |
| "learning_rate": 1.388888888888889e-05, | |
| "loss": 0.0252, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 375.0, | |
| "grad_norm": 0.0507582426071167, | |
| "learning_rate": 1.25e-05, | |
| "loss": 0.0235, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 388.8888888888889, | |
| "grad_norm": 0.001732513541355729, | |
| "learning_rate": 1.1111111111111112e-05, | |
| "loss": 0.0227, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 402.77777777777777, | |
| "grad_norm": 0.039914872497320175, | |
| "learning_rate": 9.722222222222223e-06, | |
| "loss": 0.019, | |
| "step": 14500 | |
| }, | |
| { | |
| "epoch": 416.6666666666667, | |
| "grad_norm": 9.351898193359375, | |
| "learning_rate": 8.333333333333334e-06, | |
| "loss": 0.0216, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 430.55555555555554, | |
| "grad_norm": 0.0020057051442563534, | |
| "learning_rate": 6.944444444444445e-06, | |
| "loss": 0.0156, | |
| "step": 15500 | |
| }, | |
| { | |
| "epoch": 444.44444444444446, | |
| "grad_norm": 0.04586656391620636, | |
| "learning_rate": 5.555555555555556e-06, | |
| "loss": 0.0195, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 458.3333333333333, | |
| "grad_norm": 0.248296856880188, | |
| "learning_rate": 4.166666666666667e-06, | |
| "loss": 0.017, | |
| "step": 16500 | |
| }, | |
| { | |
| "epoch": 472.22222222222223, | |
| "grad_norm": 0.0010602438123896718, | |
| "learning_rate": 2.777777777777778e-06, | |
| "loss": 0.0144, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 486.1111111111111, | |
| "grad_norm": 0.0068945749662816525, | |
| "learning_rate": 1.388888888888889e-06, | |
| "loss": 0.0142, | |
| "step": 17500 | |
| }, | |
| { | |
| "epoch": 500.0, | |
| "grad_norm": 0.001057297340594232, | |
| "learning_rate": 0.0, | |
| "loss": 0.0155, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 500.0, | |
| "step": 18000, | |
| "total_flos": 2.208521704015872e+19, | |
| "train_loss": 0.0, | |
| "train_runtime": 1.8559, | |
| "train_samples_per_second": 153560.694, | |
| "train_steps_per_second": 9698.57 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 18000, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 500, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2.208521704015872e+19, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |