| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 9.846153846153847, | |
| "global_step": 896, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 3e-06, | |
| "loss": 4.0528, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "learning_rate": 6e-06, | |
| "loss": 4.0182, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "learning_rate": 9e-06, | |
| "loss": 3.9239, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "learning_rate": 1.2e-05, | |
| "loss": 3.5011, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "learning_rate": 1.5e-05, | |
| "loss": 3.2438, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.66, | |
| "learning_rate": 1.8e-05, | |
| "loss": 3.1774, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.77, | |
| "learning_rate": 2.1e-05, | |
| "loss": 3.1056, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.88, | |
| "learning_rate": 2.4e-05, | |
| "loss": 3.0254, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.99, | |
| "learning_rate": 2.7000000000000002e-05, | |
| "loss": 2.941, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 1.1, | |
| "learning_rate": 3e-05, | |
| "loss": 2.8396, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.1, | |
| "eval_loss": 2.859715223312378, | |
| "eval_runtime": 56.6611, | |
| "eval_samples_per_second": 4.73, | |
| "eval_steps_per_second": 0.088, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.21, | |
| "learning_rate": 2.962962962962963e-05, | |
| "loss": 2.7745, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 1.32, | |
| "learning_rate": 2.925925925925926e-05, | |
| "loss": 2.6745, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.43, | |
| "learning_rate": 2.8888888888888888e-05, | |
| "loss": 2.6359, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 1.54, | |
| "learning_rate": 2.851851851851852e-05, | |
| "loss": 2.5991, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 1.65, | |
| "learning_rate": 2.8148148148148147e-05, | |
| "loss": 2.6046, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.76, | |
| "learning_rate": 2.777777777777778e-05, | |
| "loss": 2.4419, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 1.87, | |
| "learning_rate": 2.7407407407407408e-05, | |
| "loss": 2.4514, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 1.98, | |
| "learning_rate": 2.7037037037037037e-05, | |
| "loss": 2.4557, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 2.09, | |
| "learning_rate": 2.6666666666666667e-05, | |
| "loss": 2.1795, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 2.2, | |
| "learning_rate": 2.6296296296296296e-05, | |
| "loss": 2.1014, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 2.2, | |
| "eval_loss": 2.2379915714263916, | |
| "eval_runtime": 62.1156, | |
| "eval_samples_per_second": 4.315, | |
| "eval_steps_per_second": 0.08, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 2.31, | |
| "learning_rate": 2.5925925925925925e-05, | |
| "loss": 2.0129, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 2.42, | |
| "learning_rate": 2.5555555555555557e-05, | |
| "loss": 2.0986, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 2.53, | |
| "learning_rate": 2.5185185185185183e-05, | |
| "loss": 2.0814, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 2.64, | |
| "learning_rate": 2.4814814814814816e-05, | |
| "loss": 1.9973, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 2.75, | |
| "learning_rate": 2.4444444444444445e-05, | |
| "loss": 1.959, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 2.86, | |
| "learning_rate": 2.4074074074074074e-05, | |
| "loss": 1.9774, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 2.97, | |
| "learning_rate": 2.3703703703703703e-05, | |
| "loss": 1.9267, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 3.08, | |
| "learning_rate": 2.3333333333333336e-05, | |
| "loss": 1.8192, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 3.19, | |
| "learning_rate": 2.296296296296296e-05, | |
| "loss": 1.6512, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 3.3, | |
| "learning_rate": 2.2592592592592594e-05, | |
| "loss": 1.5577, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 3.3, | |
| "eval_loss": 2.048072576522827, | |
| "eval_runtime": 55.3343, | |
| "eval_samples_per_second": 4.843, | |
| "eval_steps_per_second": 0.09, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 3.41, | |
| "learning_rate": 2.222222222222222e-05, | |
| "loss": 1.5442, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 3.52, | |
| "learning_rate": 2.1851851851851852e-05, | |
| "loss": 1.5807, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 3.63, | |
| "learning_rate": 2.148148148148148e-05, | |
| "loss": 1.6009, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 3.74, | |
| "learning_rate": 2.111111111111111e-05, | |
| "loss": 1.5485, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 3.85, | |
| "learning_rate": 2.074074074074074e-05, | |
| "loss": 1.6185, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 3.96, | |
| "learning_rate": 2.0370370370370372e-05, | |
| "loss": 1.5032, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 4.07, | |
| "learning_rate": 1.9999999999999998e-05, | |
| "loss": 1.2841, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 4.18, | |
| "learning_rate": 1.962962962962963e-05, | |
| "loss": 1.2237, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 4.29, | |
| "learning_rate": 1.925925925925926e-05, | |
| "loss": 1.2177, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 4.4, | |
| "learning_rate": 1.888888888888889e-05, | |
| "loss": 1.2009, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 4.4, | |
| "eval_loss": 1.8517041206359863, | |
| "eval_runtime": 56.0224, | |
| "eval_samples_per_second": 4.784, | |
| "eval_steps_per_second": 0.089, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 4.51, | |
| "learning_rate": 1.8518518518518518e-05, | |
| "loss": 1.1844, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 4.62, | |
| "learning_rate": 1.814814814814815e-05, | |
| "loss": 1.2252, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 4.73, | |
| "learning_rate": 1.7777777777777777e-05, | |
| "loss": 1.1829, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 4.84, | |
| "learning_rate": 1.740740740740741e-05, | |
| "loss": 1.177, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 4.95, | |
| "learning_rate": 1.7037037037037035e-05, | |
| "loss": 1.194, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 5.05, | |
| "learning_rate": 1.6666666666666667e-05, | |
| "loss": 1.0947, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 5.16, | |
| "learning_rate": 1.6296296296296297e-05, | |
| "loss": 1.0177, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 5.27, | |
| "learning_rate": 1.5925925925925926e-05, | |
| "loss": 0.9692, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 5.38, | |
| "learning_rate": 1.5555555555555555e-05, | |
| "loss": 0.9933, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 5.49, | |
| "learning_rate": 1.5185185185185186e-05, | |
| "loss": 0.9451, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 5.49, | |
| "eval_loss": 1.683161973953247, | |
| "eval_runtime": 51.3302, | |
| "eval_samples_per_second": 5.221, | |
| "eval_steps_per_second": 0.097, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 5.6, | |
| "learning_rate": 1.4814814814814815e-05, | |
| "loss": 0.9674, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 5.71, | |
| "learning_rate": 1.4444444444444444e-05, | |
| "loss": 0.9519, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 5.82, | |
| "learning_rate": 1.4074074074074073e-05, | |
| "loss": 0.9108, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 5.93, | |
| "learning_rate": 1.3703703703703704e-05, | |
| "loss": 0.8963, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 6.04, | |
| "learning_rate": 1.3333333333333333e-05, | |
| "loss": 0.8255, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 6.15, | |
| "learning_rate": 1.2962962962962962e-05, | |
| "loss": 0.773, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 6.26, | |
| "learning_rate": 1.2592592592592592e-05, | |
| "loss": 0.7945, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 6.37, | |
| "learning_rate": 1.2222222222222222e-05, | |
| "loss": 0.7398, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 6.48, | |
| "learning_rate": 1.1851851851851852e-05, | |
| "loss": 0.7495, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 6.59, | |
| "learning_rate": 1.148148148148148e-05, | |
| "loss": 0.7491, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 6.59, | |
| "eval_loss": 1.6291619539260864, | |
| "eval_runtime": 56.8018, | |
| "eval_samples_per_second": 4.718, | |
| "eval_steps_per_second": 0.088, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 6.7, | |
| "learning_rate": 1.111111111111111e-05, | |
| "loss": 0.7644, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 6.81, | |
| "learning_rate": 1.074074074074074e-05, | |
| "loss": 0.7328, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 6.92, | |
| "learning_rate": 1.037037037037037e-05, | |
| "loss": 0.7294, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 7.03, | |
| "learning_rate": 9.999999999999999e-06, | |
| "loss": 0.6665, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 7.14, | |
| "learning_rate": 9.62962962962963e-06, | |
| "loss": 0.6109, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 7.25, | |
| "learning_rate": 9.259259259259259e-06, | |
| "loss": 0.6219, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 7.36, | |
| "learning_rate": 8.888888888888888e-06, | |
| "loss": 0.592, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 7.47, | |
| "learning_rate": 8.518518518518517e-06, | |
| "loss": 0.5722, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 7.58, | |
| "learning_rate": 8.148148148148148e-06, | |
| "loss": 0.5538, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 7.69, | |
| "learning_rate": 7.777777777777777e-06, | |
| "loss": 0.5546, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 7.69, | |
| "eval_loss": 1.5901862382888794, | |
| "eval_runtime": 52.5596, | |
| "eval_samples_per_second": 5.099, | |
| "eval_steps_per_second": 0.095, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 7.8, | |
| "learning_rate": 7.4074074074074075e-06, | |
| "loss": 0.546, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 7.91, | |
| "learning_rate": 7.037037037037037e-06, | |
| "loss": 0.5322, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 8.02, | |
| "learning_rate": 6.666666666666667e-06, | |
| "loss": 0.588, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 8.13, | |
| "learning_rate": 6.296296296296296e-06, | |
| "loss": 0.4936, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 8.24, | |
| "learning_rate": 5.925925925925926e-06, | |
| "loss": 0.4625, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 8.35, | |
| "learning_rate": 5.555555555555555e-06, | |
| "loss": 0.4577, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 8.46, | |
| "learning_rate": 5.185185185185185e-06, | |
| "loss": 0.4619, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 8.57, | |
| "learning_rate": 4.814814814814815e-06, | |
| "loss": 0.5216, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 8.68, | |
| "learning_rate": 4.444444444444444e-06, | |
| "loss": 0.4771, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 8.79, | |
| "learning_rate": 4.074074074074074e-06, | |
| "loss": 0.452, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 8.79, | |
| "eval_loss": 1.4902839660644531, | |
| "eval_runtime": 54.4822, | |
| "eval_samples_per_second": 4.919, | |
| "eval_steps_per_second": 0.092, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 8.9, | |
| "learning_rate": 3.7037037037037037e-06, | |
| "loss": 0.4645, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 9.01, | |
| "learning_rate": 3.3333333333333333e-06, | |
| "loss": 0.4459, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 9.12, | |
| "learning_rate": 2.962962962962963e-06, | |
| "loss": 0.4157, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 9.23, | |
| "learning_rate": 2.5925925925925925e-06, | |
| "loss": 0.3948, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 9.34, | |
| "learning_rate": 2.222222222222222e-06, | |
| "loss": 0.4186, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 9.45, | |
| "learning_rate": 1.8518518518518519e-06, | |
| "loss": 0.3841, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 9.56, | |
| "learning_rate": 1.4814814814814815e-06, | |
| "loss": 0.4086, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 9.67, | |
| "learning_rate": 1.111111111111111e-06, | |
| "loss": 0.4018, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 9.78, | |
| "learning_rate": 7.407407407407407e-07, | |
| "loss": 0.405, | |
| "step": 890 | |
| } | |
| ], | |
| "max_steps": 910, | |
| "num_train_epochs": 10, | |
| "total_flos": 4163675271142080.0, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |