| { | |
| "best_metric": 0.8530173897743225, | |
| "best_model_checkpoint": "./beans_outputs/checkpoint-715", | |
| "epoch": 5.0, | |
| "eval_steps": 500, | |
| "global_step": 715, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 1.9720279720279722e-05, | |
| "loss": 1.7594, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 1.944055944055944e-05, | |
| "loss": 1.7247, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "learning_rate": 1.916083916083916e-05, | |
| "loss": 1.6869, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "learning_rate": 1.888111888111888e-05, | |
| "loss": 1.6615, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "learning_rate": 1.8601398601398602e-05, | |
| "loss": 1.6413, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "learning_rate": 1.8321678321678323e-05, | |
| "loss": 1.6369, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "learning_rate": 1.8041958041958044e-05, | |
| "loss": 1.5629, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "learning_rate": 1.7762237762237765e-05, | |
| "loss": 1.4909, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "learning_rate": 1.7482517482517486e-05, | |
| "loss": 1.585, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "learning_rate": 1.7202797202797203e-05, | |
| "loss": 1.4428, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.77, | |
| "learning_rate": 1.6923076923076924e-05, | |
| "loss": 1.4252, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.84, | |
| "learning_rate": 1.6643356643356645e-05, | |
| "loss": 1.3894, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.91, | |
| "learning_rate": 1.6363636363636366e-05, | |
| "loss": 1.4099, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.98, | |
| "learning_rate": 1.6083916083916083e-05, | |
| "loss": 1.4331, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.48031496062992124, | |
| "eval_loss": 1.380394458770752, | |
| "eval_runtime": 13.5035, | |
| "eval_samples_per_second": 9.405, | |
| "eval_steps_per_second": 1.185, | |
| "step": 143 | |
| }, | |
| { | |
| "epoch": 1.05, | |
| "learning_rate": 1.5804195804195804e-05, | |
| "loss": 1.4025, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.12, | |
| "learning_rate": 1.5524475524475525e-05, | |
| "loss": 1.341, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 1.19, | |
| "learning_rate": 1.5244755244755244e-05, | |
| "loss": 1.2961, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 1.26, | |
| "learning_rate": 1.4965034965034965e-05, | |
| "loss": 1.3105, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 1.33, | |
| "learning_rate": 1.4685314685314686e-05, | |
| "loss": 1.2829, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 1.4, | |
| "learning_rate": 1.4405594405594407e-05, | |
| "loss": 1.291, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.47, | |
| "learning_rate": 1.4125874125874126e-05, | |
| "loss": 1.2477, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 1.54, | |
| "learning_rate": 1.3846153846153847e-05, | |
| "loss": 1.1525, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 1.61, | |
| "learning_rate": 1.3566433566433568e-05, | |
| "loss": 1.2458, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 1.68, | |
| "learning_rate": 1.3286713286713288e-05, | |
| "loss": 1.1054, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 1.75, | |
| "learning_rate": 1.3006993006993008e-05, | |
| "loss": 1.2257, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 1.82, | |
| "learning_rate": 1.2727272727272728e-05, | |
| "loss": 1.1471, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 1.89, | |
| "learning_rate": 1.244755244755245e-05, | |
| "loss": 1.1859, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 1.96, | |
| "learning_rate": 1.216783216783217e-05, | |
| "loss": 1.1653, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.6850393700787402, | |
| "eval_loss": 1.0843431949615479, | |
| "eval_runtime": 10.8218, | |
| "eval_samples_per_second": 11.736, | |
| "eval_steps_per_second": 1.479, | |
| "step": 286 | |
| }, | |
| { | |
| "epoch": 2.03, | |
| "learning_rate": 1.1888111888111888e-05, | |
| "loss": 1.2361, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 2.1, | |
| "learning_rate": 1.1608391608391608e-05, | |
| "loss": 1.1012, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 2.17, | |
| "learning_rate": 1.132867132867133e-05, | |
| "loss": 1.0819, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 2.24, | |
| "learning_rate": 1.1048951048951048e-05, | |
| "loss": 1.0283, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 2.31, | |
| "learning_rate": 1.076923076923077e-05, | |
| "loss": 1.0479, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 2.38, | |
| "learning_rate": 1.048951048951049e-05, | |
| "loss": 1.0286, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 2.45, | |
| "learning_rate": 1.0209790209790211e-05, | |
| "loss": 1.0439, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 2.52, | |
| "learning_rate": 9.93006993006993e-06, | |
| "loss": 1.0562, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 2.59, | |
| "learning_rate": 9.650349650349651e-06, | |
| "loss": 1.0392, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 2.66, | |
| "learning_rate": 9.370629370629372e-06, | |
| "loss": 1.111, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 2.73, | |
| "learning_rate": 9.090909090909091e-06, | |
| "loss": 1.1265, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 2.8, | |
| "learning_rate": 8.811188811188812e-06, | |
| "loss": 1.0098, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 2.87, | |
| "learning_rate": 8.531468531468533e-06, | |
| "loss": 1.1216, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 2.94, | |
| "learning_rate": 8.251748251748254e-06, | |
| "loss": 1.0919, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.7165354330708661, | |
| "eval_loss": 0.9539378881454468, | |
| "eval_runtime": 9.6357, | |
| "eval_samples_per_second": 13.18, | |
| "eval_steps_per_second": 1.66, | |
| "step": 429 | |
| }, | |
| { | |
| "epoch": 3.01, | |
| "learning_rate": 7.972027972027973e-06, | |
| "loss": 1.0267, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 3.08, | |
| "learning_rate": 7.692307692307694e-06, | |
| "loss": 1.042, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 3.15, | |
| "learning_rate": 7.412587412587413e-06, | |
| "loss": 0.8939, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 3.22, | |
| "learning_rate": 7.132867132867134e-06, | |
| "loss": 1.0723, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 3.29, | |
| "learning_rate": 6.853146853146854e-06, | |
| "loss": 1.0155, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 3.36, | |
| "learning_rate": 6.573426573426574e-06, | |
| "loss": 1.0215, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 3.43, | |
| "learning_rate": 6.2937062937062944e-06, | |
| "loss": 0.9736, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 3.5, | |
| "learning_rate": 6.013986013986014e-06, | |
| "loss": 0.968, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 3.57, | |
| "learning_rate": 5.7342657342657345e-06, | |
| "loss": 0.9836, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 3.64, | |
| "learning_rate": 5.4545454545454545e-06, | |
| "loss": 0.9847, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 3.71, | |
| "learning_rate": 5.174825174825175e-06, | |
| "loss": 0.9006, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 3.78, | |
| "learning_rate": 4.895104895104895e-06, | |
| "loss": 0.9513, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 3.85, | |
| "learning_rate": 4.615384615384616e-06, | |
| "loss": 0.7749, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 3.92, | |
| "learning_rate": 4.335664335664336e-06, | |
| "loss": 0.8459, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 3.99, | |
| "learning_rate": 4.055944055944056e-06, | |
| "loss": 0.9689, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.7322834645669292, | |
| "eval_loss": 0.8723652958869934, | |
| "eval_runtime": 9.8772, | |
| "eval_samples_per_second": 12.858, | |
| "eval_steps_per_second": 1.62, | |
| "step": 572 | |
| }, | |
| { | |
| "epoch": 4.06, | |
| "learning_rate": 3.776223776223776e-06, | |
| "loss": 0.9308, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 4.13, | |
| "learning_rate": 3.4965034965034966e-06, | |
| "loss": 0.9629, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 4.2, | |
| "learning_rate": 3.216783216783217e-06, | |
| "loss": 0.9307, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 4.27, | |
| "learning_rate": 2.937062937062937e-06, | |
| "loss": 0.8897, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 4.34, | |
| "learning_rate": 2.6573426573426574e-06, | |
| "loss": 0.921, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 4.41, | |
| "learning_rate": 2.377622377622378e-06, | |
| "loss": 0.9482, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 4.48, | |
| "learning_rate": 2.0979020979020983e-06, | |
| "loss": 0.9577, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 4.55, | |
| "learning_rate": 1.8181818181818183e-06, | |
| "loss": 0.8544, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 4.62, | |
| "learning_rate": 1.5384615384615387e-06, | |
| "loss": 0.9408, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 4.69, | |
| "learning_rate": 1.258741258741259e-06, | |
| "loss": 0.8936, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 4.76, | |
| "learning_rate": 9.790209790209791e-07, | |
| "loss": 0.9222, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 4.83, | |
| "learning_rate": 6.993006993006994e-07, | |
| "loss": 0.8527, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 4.9, | |
| "learning_rate": 4.195804195804196e-07, | |
| "loss": 0.8538, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 4.97, | |
| "learning_rate": 1.3986013986013987e-07, | |
| "loss": 0.9175, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_accuracy": 0.7322834645669292, | |
| "eval_loss": 0.8530173897743225, | |
| "eval_runtime": 12.0509, | |
| "eval_samples_per_second": 10.539, | |
| "eval_steps_per_second": 1.328, | |
| "step": 715 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "step": 715, | |
| "total_flos": 4.424951239059456e+17, | |
| "train_loss": 0.18311181501908735, | |
| "train_runtime": 299.5589, | |
| "train_samples_per_second": 19.061, | |
| "train_steps_per_second": 2.387 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 715, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 500, | |
| "total_flos": 4.424951239059456e+17, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |