| { | |
| "best_metric": 3.516153573989868, | |
| "best_model_checkpoint": "output/bones/checkpoint-131", | |
| "epoch": 1.0, | |
| "global_step": 131, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 0.00013670742670262692, | |
| "loss": 4.0743, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 0.00013523678052634687, | |
| "loss": 4.0532, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 0.00013280918103490095, | |
| "loss": 3.8244, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "learning_rate": 0.00012945949034742042, | |
| "loss": 3.8818, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "learning_rate": 0.00012523581249268407, | |
| "loss": 3.7574, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "learning_rate": 0.00012019880259978666, | |
| "loss": 3.8753, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 0.00011442079584574986, | |
| "loss": 3.6832, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "learning_rate": 0.00010798476866903087, | |
| "loss": 3.764, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "learning_rate": 0.00010098314716666811, | |
| "loss": 3.7562, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "learning_rate": 9.351647978736063e-05, | |
| "loss": 3.8048, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "learning_rate": 8.5691993381587e-05, | |
| "loss": 3.8516, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "learning_rate": 7.762205334494898e-05, | |
| "loss": 3.7629, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "learning_rate": 6.942254996821776e-05, | |
| "loss": 3.7416, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.53, | |
| "learning_rate": 6.121123416728538e-05, | |
| "loss": 3.5754, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "learning_rate": 5.310602649316754e-05, | |
| "loss": 3.625, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.61, | |
| "learning_rate": 4.5223323705920566e-05, | |
| "loss": 3.5876, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.65, | |
| "learning_rate": 3.7676327231320786e-05, | |
| "loss": 3.5927, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.69, | |
| "learning_rate": 3.0573417504900444e-05, | |
| "loss": 3.4549, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.73, | |
| "learning_rate": 2.401659754895943e-05, | |
| "loss": 3.8261, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.76, | |
| "learning_rate": 1.8100028133934438e-05, | |
| "loss": 3.6442, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "learning_rate": 1.2908675560288951e-05, | |
| "loss": 3.7345, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.84, | |
| "learning_rate": 8.517091479772992e-06, | |
| "loss": 3.4828, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.88, | |
| "learning_rate": 4.988342278719811e-06, | |
| "loss": 3.5471, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "learning_rate": 2.3731033982246404e-06, | |
| "loss": 3.5867, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.95, | |
| "learning_rate": 7.089315974356758e-07, | |
| "loss": 3.4037, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.99, | |
| "learning_rate": 1.9725610793441152e-08, | |
| "loss": 3.6132, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 3.516153573989868, | |
| "eval_runtime": 9.0194, | |
| "eval_samples_per_second": 22.396, | |
| "eval_steps_per_second": 2.883, | |
| "step": 131 | |
| } | |
| ], | |
| "max_steps": 131, | |
| "num_train_epochs": 1, | |
| "total_flos": 136133148672000.0, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |