| { | |
| "best_metric": 3.3079917430877686, | |
| "best_model_checkpoint": "output/21-savage/checkpoint-128", | |
| "epoch": 1.0, | |
| "global_step": 128, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 0.0001366840960734715, | |
| "loss": 3.9729, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 0.00013514414396914573, | |
| "loss": 3.8142, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 0.0001326033060000631, | |
| "loss": 3.5999, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 0.00012909979873429716, | |
| "loss": 3.5616, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 0.00012468631818219865, | |
| "loss": 3.5237, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "learning_rate": 0.00011942924719935021, | |
| "loss": 3.3237, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 0.00011340765702662907, | |
| "loss": 3.5837, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "learning_rate": 0.00010671211798514472, | |
| "loss": 3.5472, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "learning_rate": 9.944333721430602e-05, | |
| "loss": 3.3629, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "learning_rate": 9.171064394270629e-05, | |
| "loss": 3.5192, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "learning_rate": 8.363034507476126e-05, | |
| "loss": 3.423, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "learning_rate": 7.532397582660788e-05, | |
| "loss": 3.602, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "learning_rate": 6.691647172332823e-05, | |
| "loss": 3.3109, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "learning_rate": 5.853428945236219e-05, | |
| "loss": 3.4656, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.59, | |
| "learning_rate": 5.0303504837221976e-05, | |
| "loss": 3.261, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "learning_rate": 4.2347916539754844e-05, | |
| "loss": 3.3219, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.66, | |
| "learning_rate": 3.478718401303682e-05, | |
| "loss": 3.2217, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "learning_rate": 2.773502771181907e-05, | |
| "loss": 3.3141, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.74, | |
| "learning_rate": 2.1297518631037208e-05, | |
| "loss": 3.2947, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.78, | |
| "learning_rate": 1.557148289931624e-05, | |
| "loss": 3.3889, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.82, | |
| "learning_rate": 1.0643045423870092e-05, | |
| "loss": 3.3058, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.86, | |
| "learning_rate": 6.586334491731787e-06, | |
| "loss": 3.3431, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "learning_rate": 3.462366811317684e-06, | |
| "loss": 3.3222, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.94, | |
| "learning_rate": 1.3181297643383925e-06, | |
| "loss": 3.2589, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.98, | |
| "learning_rate": 1.858746718418518e-07, | |
| "loss": 3.3423, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 3.3079917430877686, | |
| "eval_runtime": 7.1911, | |
| "eval_samples_per_second": 22.111, | |
| "eval_steps_per_second": 2.781, | |
| "step": 128 | |
| } | |
| ], | |
| "max_steps": 128, | |
| "num_train_epochs": 1, | |
| "total_flos": 132997644288000.0, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |