| { | |
| "best_metric": 0.13279759883880615, | |
| "best_model_checkpoint": "./beans_outputs/checkpoint-325", | |
| "epoch": 5.0, | |
| "global_step": 325, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.15, | |
| "learning_rate": 1.9384615384615386e-05, | |
| "loss": 1.0095, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "learning_rate": 1.876923076923077e-05, | |
| "loss": 0.8982, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "learning_rate": 1.8153846153846155e-05, | |
| "loss": 0.7749, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "learning_rate": 1.753846153846154e-05, | |
| "loss": 0.6896, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.77, | |
| "learning_rate": 1.6923076923076924e-05, | |
| "loss": 0.5892, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "learning_rate": 1.630769230769231e-05, | |
| "loss": 0.49, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.9624060150375939, | |
| "eval_loss": 0.405004620552063, | |
| "eval_runtime": 1.5758, | |
| "eval_samples_per_second": 84.401, | |
| "eval_steps_per_second": 5.711, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 1.08, | |
| "learning_rate": 1.5692307692307693e-05, | |
| "loss": 0.4349, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 1.23, | |
| "learning_rate": 1.5076923076923078e-05, | |
| "loss": 0.3709, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.38, | |
| "learning_rate": 1.4461538461538462e-05, | |
| "loss": 0.3631, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 1.54, | |
| "learning_rate": 1.3846153846153847e-05, | |
| "loss": 0.2787, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.69, | |
| "learning_rate": 1.3230769230769231e-05, | |
| "loss": 0.2455, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 1.85, | |
| "learning_rate": 1.2615384615384616e-05, | |
| "loss": 0.2542, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "learning_rate": 1.2e-05, | |
| "loss": 0.2769, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.9849624060150376, | |
| "eval_loss": 0.1862308382987976, | |
| "eval_runtime": 1.554, | |
| "eval_samples_per_second": 85.586, | |
| "eval_steps_per_second": 5.792, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 2.15, | |
| "learning_rate": 1.1384615384615385e-05, | |
| "loss": 0.2338, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 2.31, | |
| "learning_rate": 1.076923076923077e-05, | |
| "loss": 0.2129, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 2.46, | |
| "learning_rate": 1.0153846153846154e-05, | |
| "loss": 0.1908, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 2.62, | |
| "learning_rate": 9.53846153846154e-06, | |
| "loss": 0.1731, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 2.77, | |
| "learning_rate": 8.923076923076925e-06, | |
| "loss": 0.1826, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 2.92, | |
| "learning_rate": 8.307692307692309e-06, | |
| "loss": 0.1441, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.9774436090225563, | |
| "eval_loss": 0.15537278354167938, | |
| "eval_runtime": 1.5401, | |
| "eval_samples_per_second": 86.36, | |
| "eval_steps_per_second": 5.844, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 3.08, | |
| "learning_rate": 7.692307692307694e-06, | |
| "loss": 0.1676, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 3.23, | |
| "learning_rate": 7.076923076923078e-06, | |
| "loss": 0.1733, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 3.38, | |
| "learning_rate": 6.461538461538463e-06, | |
| "loss": 0.1785, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 3.54, | |
| "learning_rate": 5.846153846153847e-06, | |
| "loss": 0.1697, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 3.69, | |
| "learning_rate": 5.230769230769232e-06, | |
| "loss": 0.1536, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 3.85, | |
| "learning_rate": 4.615384615384616e-06, | |
| "loss": 0.1862, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "learning_rate": 4.000000000000001e-06, | |
| "loss": 0.1661, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.9774436090225563, | |
| "eval_loss": 0.13332900404930115, | |
| "eval_runtime": 1.5476, | |
| "eval_samples_per_second": 85.942, | |
| "eval_steps_per_second": 5.816, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 4.15, | |
| "learning_rate": 3.384615384615385e-06, | |
| "loss": 0.1189, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 4.31, | |
| "learning_rate": 2.7692307692307697e-06, | |
| "loss": 0.1368, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 4.46, | |
| "learning_rate": 2.153846153846154e-06, | |
| "loss": 0.1742, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 4.62, | |
| "learning_rate": 1.5384615384615387e-06, | |
| "loss": 0.1238, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 4.77, | |
| "learning_rate": 9.230769230769232e-07, | |
| "loss": 0.1637, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 4.92, | |
| "learning_rate": 3.0769230769230774e-07, | |
| "loss": 0.1754, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_accuracy": 0.9699248120300752, | |
| "eval_loss": 0.13279759883880615, | |
| "eval_runtime": 1.5399, | |
| "eval_samples_per_second": 86.369, | |
| "eval_steps_per_second": 5.845, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "step": 325, | |
| "total_flos": 4.006371770595533e+17, | |
| "train_loss": 0.0, | |
| "train_runtime": 0.3337, | |
| "train_samples_per_second": 15491.466, | |
| "train_steps_per_second": 973.835 | |
| } | |
| ], | |
| "max_steps": 325, | |
| "num_train_epochs": 5, | |
| "total_flos": 4.006371770595533e+17, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |