| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 15.151515151515152, | |
| "eval_steps": 1000, | |
| "global_step": 7000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.65, | |
| "learning_rate": 6.493506493506493e-07, | |
| "loss": 2.3458, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 1.3, | |
| "learning_rate": 1.2987012987012986e-06, | |
| "loss": 2.3309, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 1.95, | |
| "learning_rate": 1.9480519480519483e-06, | |
| "loss": 2.2872, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 2.16, | |
| "eval_loss": 2.2653801441192627, | |
| "eval_runtime": 40.5821, | |
| "eval_samples_per_second": 64.166, | |
| "eval_steps_per_second": 4.017, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 2.6, | |
| "learning_rate": 2.597402597402597e-06, | |
| "loss": 2.2593, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 3.25, | |
| "learning_rate": 3.246753246753247e-06, | |
| "loss": 2.2332, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 3.9, | |
| "learning_rate": 3.896103896103897e-06, | |
| "loss": 2.1927, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 4.33, | |
| "eval_loss": 2.181955575942993, | |
| "eval_runtime": 40.5637, | |
| "eval_samples_per_second": 64.195, | |
| "eval_steps_per_second": 4.018, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 4.55, | |
| "learning_rate": 4.5454545454545455e-06, | |
| "loss": 2.1735, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 5.19, | |
| "learning_rate": 5.194805194805194e-06, | |
| "loss": 2.1703, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 5.84, | |
| "learning_rate": 5.844155844155844e-06, | |
| "loss": 2.1335, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 6.49, | |
| "learning_rate": 6.493506493506494e-06, | |
| "loss": 2.1158, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 6.49, | |
| "eval_loss": 2.119281530380249, | |
| "eval_runtime": 40.6723, | |
| "eval_samples_per_second": 64.024, | |
| "eval_steps_per_second": 4.008, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 7.14, | |
| "learning_rate": 7.1428571428571436e-06, | |
| "loss": 2.1031, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 7.79, | |
| "learning_rate": 7.792207792207793e-06, | |
| "loss": 2.0958, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 8.44, | |
| "learning_rate": 8.441558441558442e-06, | |
| "loss": 2.0816, | |
| "step": 3900 | |
| }, | |
| { | |
| "epoch": 8.66, | |
| "eval_loss": 2.083141803741455, | |
| "eval_runtime": 40.6586, | |
| "eval_samples_per_second": 64.045, | |
| "eval_steps_per_second": 4.009, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 9.09, | |
| "learning_rate": 9.090909090909091e-06, | |
| "loss": 2.07, | |
| "step": 4200 | |
| }, | |
| { | |
| "epoch": 9.74, | |
| "learning_rate": 9.740259740259742e-06, | |
| "loss": 2.0637, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 10.39, | |
| "learning_rate": 9.96259251245514e-06, | |
| "loss": 2.0616, | |
| "step": 4800 | |
| }, | |
| { | |
| "epoch": 10.82, | |
| "eval_loss": 2.0597293376922607, | |
| "eval_runtime": 40.7285, | |
| "eval_samples_per_second": 63.936, | |
| "eval_steps_per_second": 4.002, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 11.04, | |
| "learning_rate": 9.736015051459551e-06, | |
| "loss": 2.0349, | |
| "step": 5100 | |
| }, | |
| { | |
| "epoch": 11.69, | |
| "learning_rate": 9.313027078004903e-06, | |
| "loss": 2.0407, | |
| "step": 5400 | |
| }, | |
| { | |
| "epoch": 12.34, | |
| "learning_rate": 8.71117061502135e-06, | |
| "loss": 2.0326, | |
| "step": 5700 | |
| }, | |
| { | |
| "epoch": 12.99, | |
| "learning_rate": 7.955405662643384e-06, | |
| "loss": 2.0234, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 12.99, | |
| "eval_loss": 2.0452144145965576, | |
| "eval_runtime": 40.7967, | |
| "eval_samples_per_second": 63.829, | |
| "eval_steps_per_second": 3.995, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 13.64, | |
| "learning_rate": 7.0770750650094335e-06, | |
| "loss": 2.019, | |
| "step": 6300 | |
| }, | |
| { | |
| "epoch": 14.29, | |
| "learning_rate": 6.112604669781572e-06, | |
| "loss": 2.0186, | |
| "step": 6600 | |
| }, | |
| { | |
| "epoch": 14.94, | |
| "learning_rate": 5.101992686957028e-06, | |
| "loss": 2.0114, | |
| "step": 6900 | |
| }, | |
| { | |
| "epoch": 15.15, | |
| "eval_loss": 2.0369863510131836, | |
| "eval_runtime": 40.6659, | |
| "eval_samples_per_second": 64.034, | |
| "eval_steps_per_second": 4.008, | |
| "step": 7000 | |
| } | |
| ], | |
| "logging_steps": 300, | |
| "max_steps": 9240, | |
| "num_train_epochs": 20, | |
| "save_steps": 1000, | |
| "total_flos": 4.871894179579822e+17, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |