| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 16.46840148698885, | |
| "eval_steps": 500, | |
| "global_step": 8860, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.93, | |
| "grad_norm": 1.3550214767456055, | |
| "learning_rate": 2.8605947955390337e-05, | |
| "loss": 0.6834, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.93, | |
| "eval_loss": 0.5703962445259094, | |
| "eval_runtime": 6.1216, | |
| "eval_samples_per_second": 248.138, | |
| "eval_steps_per_second": 15.519, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.86, | |
| "grad_norm": 0.7966482043266296, | |
| "learning_rate": 2.721189591078067e-05, | |
| "loss": 0.5019, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.86, | |
| "eval_loss": 0.5145730972290039, | |
| "eval_runtime": 6.1234, | |
| "eval_samples_per_second": 248.066, | |
| "eval_steps_per_second": 15.514, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 2.79, | |
| "grad_norm": 0.9648380875587463, | |
| "learning_rate": 2.5817843866171006e-05, | |
| "loss": 0.4522, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 2.79, | |
| "eval_loss": 0.48453015089035034, | |
| "eval_runtime": 6.0895, | |
| "eval_samples_per_second": 249.446, | |
| "eval_steps_per_second": 15.601, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 3.72, | |
| "grad_norm": 0.7465741038322449, | |
| "learning_rate": 2.442379182156134e-05, | |
| "loss": 0.4105, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 3.72, | |
| "eval_loss": 0.4673249125480652, | |
| "eval_runtime": 6.11, | |
| "eval_samples_per_second": 248.609, | |
| "eval_steps_per_second": 15.548, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 4.65, | |
| "grad_norm": 0.8333841562271118, | |
| "learning_rate": 2.302973977695167e-05, | |
| "loss": 0.3705, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 4.65, | |
| "eval_loss": 0.4610813856124878, | |
| "eval_runtime": 6.0863, | |
| "eval_samples_per_second": 249.578, | |
| "eval_steps_per_second": 15.609, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 5.58, | |
| "grad_norm": 0.8087735176086426, | |
| "learning_rate": 2.1635687732342008e-05, | |
| "loss": 0.3484, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 5.58, | |
| "eval_loss": 0.44604435563087463, | |
| "eval_runtime": 6.1053, | |
| "eval_samples_per_second": 248.8, | |
| "eval_steps_per_second": 15.56, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 6.51, | |
| "grad_norm": 0.8467231392860413, | |
| "learning_rate": 2.0241635687732344e-05, | |
| "loss": 0.33, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 6.51, | |
| "eval_loss": 0.4419492185115814, | |
| "eval_runtime": 6.1029, | |
| "eval_samples_per_second": 248.896, | |
| "eval_steps_per_second": 15.566, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 7.43, | |
| "grad_norm": 0.9491092562675476, | |
| "learning_rate": 1.8847583643122676e-05, | |
| "loss": 0.3068, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 7.43, | |
| "eval_loss": 0.4374473989009857, | |
| "eval_runtime": 6.0863, | |
| "eval_samples_per_second": 249.578, | |
| "eval_steps_per_second": 15.609, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 8.36, | |
| "grad_norm": 0.5728708505630493, | |
| "learning_rate": 1.7453531598513013e-05, | |
| "loss": 0.2877, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 8.36, | |
| "eval_loss": 0.4360838234424591, | |
| "eval_runtime": 6.109, | |
| "eval_samples_per_second": 248.648, | |
| "eval_steps_per_second": 15.551, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 9.29, | |
| "grad_norm": 0.9121451377868652, | |
| "learning_rate": 1.6059479553903345e-05, | |
| "loss": 0.2774, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 9.29, | |
| "eval_loss": 0.4340021312236786, | |
| "eval_runtime": 6.1145, | |
| "eval_samples_per_second": 248.424, | |
| "eval_steps_per_second": 15.537, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 10.22, | |
| "grad_norm": 0.6917915344238281, | |
| "learning_rate": 1.4665427509293682e-05, | |
| "loss": 0.2643, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 10.22, | |
| "eval_loss": 0.4294339716434479, | |
| "eval_runtime": 6.1237, | |
| "eval_samples_per_second": 248.054, | |
| "eval_steps_per_second": 15.514, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 11.15, | |
| "grad_norm": 0.5932989120483398, | |
| "learning_rate": 1.3271375464684014e-05, | |
| "loss": 0.2521, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 11.15, | |
| "eval_loss": 0.429922491312027, | |
| "eval_runtime": 6.1392, | |
| "eval_samples_per_second": 247.428, | |
| "eval_steps_per_second": 15.474, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 12.08, | |
| "grad_norm": 0.6674472689628601, | |
| "learning_rate": 1.187732342007435e-05, | |
| "loss": 0.2445, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 12.08, | |
| "eval_loss": 0.43110528588294983, | |
| "eval_runtime": 6.2182, | |
| "eval_samples_per_second": 244.285, | |
| "eval_steps_per_second": 15.278, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 13.01, | |
| "grad_norm": 0.8294160962104797, | |
| "learning_rate": 1.0483271375464685e-05, | |
| "loss": 0.2343, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 13.01, | |
| "eval_loss": 0.42911913990974426, | |
| "eval_runtime": 6.1036, | |
| "eval_samples_per_second": 248.87, | |
| "eval_steps_per_second": 15.565, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 13.94, | |
| "grad_norm": 0.9937490224838257, | |
| "learning_rate": 9.089219330855018e-06, | |
| "loss": 0.227, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 13.94, | |
| "eval_loss": 0.4279896914958954, | |
| "eval_runtime": 6.1085, | |
| "eval_samples_per_second": 248.671, | |
| "eval_steps_per_second": 15.552, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 14.87, | |
| "grad_norm": 0.7265322804450989, | |
| "learning_rate": 7.695167286245354e-06, | |
| "loss": 0.2231, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 14.87, | |
| "eval_loss": 0.4289594292640686, | |
| "eval_runtime": 6.1309, | |
| "eval_samples_per_second": 247.763, | |
| "eval_steps_per_second": 15.495, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 15.8, | |
| "grad_norm": 2.6667771339416504, | |
| "learning_rate": 6.301115241635688e-06, | |
| "loss": 0.2154, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 15.8, | |
| "eval_loss": 0.4316449761390686, | |
| "eval_runtime": 6.1029, | |
| "eval_samples_per_second": 248.899, | |
| "eval_steps_per_second": 15.566, | |
| "step": 8500 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 10760, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 20, | |
| "save_steps": 4430, | |
| "total_flos": 1.0656359168606208e+16, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |