| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 6.097560975609756, | |
| "global_step": 500, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 1e-05, | |
| "loss": 0.4133, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "learning_rate": 2e-05, | |
| "loss": 0.2546, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "learning_rate": 3e-05, | |
| "loss": 0.1767, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "learning_rate": 4e-05, | |
| "loss": 0.1409, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.61, | |
| "learning_rate": 5e-05, | |
| "loss": 0.1687, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.73, | |
| "learning_rate": 6e-05, | |
| "loss": 0.1161, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.85, | |
| "learning_rate": 7e-05, | |
| "loss": 0.1192, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.98, | |
| "learning_rate": 8e-05, | |
| "loss": 0.1233, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 0.13027246296405792, | |
| "eval_runtime": 3.8565, | |
| "eval_samples_per_second": 37.599, | |
| "eval_steps_per_second": 2.593, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 1.1, | |
| "learning_rate": 9e-05, | |
| "loss": 0.1006, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 1.22, | |
| "learning_rate": 0.0001, | |
| "loss": 0.0578, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.34, | |
| "learning_rate": 9.861111111111112e-05, | |
| "loss": 0.0835, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 1.46, | |
| "learning_rate": 9.722222222222223e-05, | |
| "loss": 0.0605, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.59, | |
| "learning_rate": 9.583333333333334e-05, | |
| "loss": 0.0665, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 1.71, | |
| "learning_rate": 9.444444444444444e-05, | |
| "loss": 0.0513, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 1.83, | |
| "learning_rate": 9.305555555555556e-05, | |
| "loss": 0.0638, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.95, | |
| "learning_rate": 9.166666666666667e-05, | |
| "loss": 0.0585, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_loss": 0.08042938262224197, | |
| "eval_runtime": 3.8773, | |
| "eval_samples_per_second": 37.397, | |
| "eval_steps_per_second": 2.579, | |
| "step": 164 | |
| }, | |
| { | |
| "epoch": 2.07, | |
| "learning_rate": 9.027777777777779e-05, | |
| "loss": 0.022, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 2.2, | |
| "learning_rate": 8.888888888888889e-05, | |
| "loss": 0.0175, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 2.32, | |
| "learning_rate": 8.75e-05, | |
| "loss": 0.015, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 2.44, | |
| "learning_rate": 8.611111111111112e-05, | |
| "loss": 0.0186, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 2.56, | |
| "learning_rate": 8.472222222222222e-05, | |
| "loss": 0.025, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 2.68, | |
| "learning_rate": 8.333333333333334e-05, | |
| "loss": 0.0237, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 2.8, | |
| "learning_rate": 8.194444444444445e-05, | |
| "loss": 0.0199, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 2.93, | |
| "learning_rate": 8.055555555555556e-05, | |
| "loss": 0.0305, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_loss": 0.11400174349546432, | |
| "eval_runtime": 3.8699, | |
| "eval_samples_per_second": 37.469, | |
| "eval_steps_per_second": 2.584, | |
| "step": 246 | |
| }, | |
| { | |
| "epoch": 3.05, | |
| "learning_rate": 7.916666666666666e-05, | |
| "loss": 0.0067, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 3.17, | |
| "learning_rate": 7.777777777777778e-05, | |
| "loss": 0.0084, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 3.29, | |
| "learning_rate": 7.638888888888889e-05, | |
| "loss": 0.0078, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 3.41, | |
| "learning_rate": 7.500000000000001e-05, | |
| "loss": 0.0036, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 3.54, | |
| "learning_rate": 7.361111111111111e-05, | |
| "loss": 0.0075, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 3.66, | |
| "learning_rate": 7.222222222222222e-05, | |
| "loss": 0.0051, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 3.78, | |
| "learning_rate": 7.083333333333334e-05, | |
| "loss": 0.0067, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 3.9, | |
| "learning_rate": 6.944444444444444e-05, | |
| "loss": 0.0147, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_loss": 0.17557735741138458, | |
| "eval_runtime": 3.8616, | |
| "eval_samples_per_second": 37.549, | |
| "eval_steps_per_second": 2.59, | |
| "step": 328 | |
| }, | |
| { | |
| "epoch": 4.02, | |
| "learning_rate": 6.805555555555556e-05, | |
| "loss": 0.0212, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 4.15, | |
| "learning_rate": 6.666666666666667e-05, | |
| "loss": 0.0073, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 4.27, | |
| "learning_rate": 6.527777777777778e-05, | |
| "loss": 0.004, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 4.39, | |
| "learning_rate": 6.388888888888888e-05, | |
| "loss": 0.0049, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 4.51, | |
| "learning_rate": 6.25e-05, | |
| "loss": 0.0086, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 4.63, | |
| "learning_rate": 6.111111111111112e-05, | |
| "loss": 0.0051, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 4.76, | |
| "learning_rate": 5.972222222222223e-05, | |
| "loss": 0.0009, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 4.88, | |
| "learning_rate": 5.833333333333334e-05, | |
| "loss": 0.005, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "learning_rate": 5.6944444444444445e-05, | |
| "loss": 0.0025, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_loss": 0.19509804248809814, | |
| "eval_runtime": 3.8565, | |
| "eval_samples_per_second": 37.599, | |
| "eval_steps_per_second": 2.593, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 5.12, | |
| "learning_rate": 5.555555555555556e-05, | |
| "loss": 0.0039, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 5.24, | |
| "learning_rate": 5.4166666666666664e-05, | |
| "loss": 0.0037, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 5.37, | |
| "learning_rate": 5.2777777777777784e-05, | |
| "loss": 0.0032, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 5.49, | |
| "learning_rate": 5.138888888888889e-05, | |
| "loss": 0.0002, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 5.61, | |
| "learning_rate": 5e-05, | |
| "loss": 0.006, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 5.73, | |
| "learning_rate": 4.8611111111111115e-05, | |
| "loss": 0.0067, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 5.85, | |
| "learning_rate": 4.722222222222222e-05, | |
| "loss": 0.0046, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 5.98, | |
| "learning_rate": 4.5833333333333334e-05, | |
| "loss": 0.0009, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_loss": 0.21209779381752014, | |
| "eval_runtime": 3.8607, | |
| "eval_samples_per_second": 37.558, | |
| "eval_steps_per_second": 2.59, | |
| "step": 492 | |
| }, | |
| { | |
| "epoch": 6.1, | |
| "learning_rate": 4.4444444444444447e-05, | |
| "loss": 0.0017, | |
| "step": 500 | |
| } | |
| ], | |
| "max_steps": 820, | |
| "num_train_epochs": 10, | |
| "total_flos": 0.0, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |