| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 31.25, | |
| "eval_steps": 500, | |
| "global_step": 500, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 1.56, | |
| "grad_norm": 1.203125, | |
| "learning_rate": 3e-05, | |
| "loss": 1.3475, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 3.12, | |
| "grad_norm": 1.6328125, | |
| "learning_rate": 3e-05, | |
| "loss": 0.944, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 4.69, | |
| "grad_norm": 2.390625, | |
| "learning_rate": 3e-05, | |
| "loss": 0.6433, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 6.25, | |
| "grad_norm": 2.953125, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3333, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 7.81, | |
| "grad_norm": 2.453125, | |
| "learning_rate": 3e-05, | |
| "loss": 0.1399, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 9.38, | |
| "grad_norm": 2.375, | |
| "learning_rate": 3e-05, | |
| "loss": 0.0672, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 10.94, | |
| "grad_norm": 2.453125, | |
| "learning_rate": 3e-05, | |
| "loss": 0.0431, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 12.5, | |
| "grad_norm": 2.546875, | |
| "learning_rate": 3e-05, | |
| "loss": 0.0302, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 14.06, | |
| "grad_norm": 1.578125, | |
| "learning_rate": 3e-05, | |
| "loss": 0.0276, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 15.62, | |
| "grad_norm": 1.75, | |
| "learning_rate": 3e-05, | |
| "loss": 0.0236, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 17.19, | |
| "grad_norm": 1.359375, | |
| "learning_rate": 3e-05, | |
| "loss": 0.0223, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 18.75, | |
| "grad_norm": 1.328125, | |
| "learning_rate": 3e-05, | |
| "loss": 0.0203, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 20.31, | |
| "grad_norm": 0.87109375, | |
| "learning_rate": 3e-05, | |
| "loss": 0.0191, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 21.88, | |
| "grad_norm": 1.3828125, | |
| "learning_rate": 3e-05, | |
| "loss": 0.0186, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 23.44, | |
| "grad_norm": 1.5078125, | |
| "learning_rate": 3e-05, | |
| "loss": 0.0164, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 25.0, | |
| "grad_norm": 1.9765625, | |
| "learning_rate": 3e-05, | |
| "loss": 0.0172, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 26.56, | |
| "grad_norm": 1.234375, | |
| "learning_rate": 3e-05, | |
| "loss": 0.0148, | |
| "step": 425 | |
| }, | |
| { | |
| "epoch": 28.12, | |
| "grad_norm": 0.482421875, | |
| "learning_rate": 3e-05, | |
| "loss": 0.0154, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 29.69, | |
| "grad_norm": 1.0625, | |
| "learning_rate": 3e-05, | |
| "loss": 0.0143, | |
| "step": 475 | |
| }, | |
| { | |
| "epoch": 31.25, | |
| "grad_norm": 0.8125, | |
| "learning_rate": 3e-05, | |
| "loss": 0.0144, | |
| "step": 500 | |
| } | |
| ], | |
| "logging_steps": 25, | |
| "max_steps": 500, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 32, | |
| "save_steps": 50, | |
| "total_flos": 1.5640413609804595e+17, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |