| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.3016430675325597, | |
| "eval_steps": 500, | |
| "global_step": 8500, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 0.00029822562901451436, | |
| "loss": 2.0946, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 0.0002964512580290287, | |
| "loss": 1.8641, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 0.00029467688704354307, | |
| "loss": 1.7932, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 0.0002929025160580574, | |
| "loss": 1.7361, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 0.0002911281450725718, | |
| "loss": 1.7503, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 0.0002893537740870861, | |
| "loss": 1.6991, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 0.0002875794031016005, | |
| "loss": 1.6943, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 0.0002858050321161148, | |
| "loss": 1.6897, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 0.0002840306611306292, | |
| "loss": 1.6749, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "learning_rate": 0.0002822562901451435, | |
| "loss": 1.6142, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 0.0002804819191596579, | |
| "loss": 1.6275, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "learning_rate": 0.00027870754817417223, | |
| "loss": 1.6408, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "learning_rate": 0.0002769331771886866, | |
| "loss": 1.5969, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "learning_rate": 0.00027515880620320094, | |
| "loss": 1.608, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 0.0002733844352177153, | |
| "loss": 1.5868, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "learning_rate": 0.00027161006423222965, | |
| "loss": 1.5849, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "learning_rate": 0.00026983569324674403, | |
| "loss": 1.6047, | |
| "step": 8500 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 84537, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "total_flos": 1.0108955101519872e+16, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |