| { |
| "best_metric": null, |
| "best_model_checkpoint": null, |
| "epoch": 2.0, |
| "eval_steps": 500, |
| "global_step": 600, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.03, |
| "learning_rate": 0.0, |
| "loss": 0.7227, |
| "step": 10 |
| }, |
| { |
| "epoch": 0.07, |
| "learning_rate": 1.6666666666666667e-06, |
| "loss": 0.7667, |
| "step": 20 |
| }, |
| { |
| "epoch": 0.1, |
| "learning_rate": 1e-05, |
| "loss": 0.2428, |
| "step": 30 |
| }, |
| { |
| "epoch": 0.13, |
| "learning_rate": 1.8333333333333333e-05, |
| "loss": 0.0194, |
| "step": 40 |
| }, |
| { |
| "epoch": 0.17, |
| "learning_rate": 2.6666666666666667e-05, |
| "loss": 0.0071, |
| "step": 50 |
| }, |
| { |
| "epoch": 0.2, |
| "learning_rate": 3.5e-05, |
| "loss": 0.0131, |
| "step": 60 |
| }, |
| { |
| "epoch": 0.23, |
| "learning_rate": 4.3333333333333334e-05, |
| "loss": 0.0079, |
| "step": 70 |
| }, |
| { |
| "epoch": 0.27, |
| "learning_rate": 4.981481481481482e-05, |
| "loss": 0.0094, |
| "step": 80 |
| }, |
| { |
| "epoch": 0.3, |
| "learning_rate": 4.888888888888889e-05, |
| "loss": 0.0177, |
| "step": 90 |
| }, |
| { |
| "epoch": 0.33, |
| "learning_rate": 4.796296296296296e-05, |
| "loss": 0.0083, |
| "step": 100 |
| }, |
| { |
| "epoch": 0.37, |
| "learning_rate": 4.703703703703704e-05, |
| "loss": 0.0226, |
| "step": 110 |
| }, |
| { |
| "epoch": 0.4, |
| "learning_rate": 4.6111111111111115e-05, |
| "loss": 0.0102, |
| "step": 120 |
| }, |
| { |
| "epoch": 0.43, |
| "learning_rate": 4.518518518518519e-05, |
| "loss": 0.0197, |
| "step": 130 |
| }, |
| { |
| "epoch": 0.47, |
| "learning_rate": 4.425925925925926e-05, |
| "loss": 0.0312, |
| "step": 140 |
| }, |
| { |
| "epoch": 0.5, |
| "learning_rate": 4.3333333333333334e-05, |
| "loss": 0.0282, |
| "step": 150 |
| }, |
| { |
| "epoch": 0.53, |
| "learning_rate": 4.240740740740741e-05, |
| "loss": 0.0231, |
| "step": 160 |
| }, |
| { |
| "epoch": 0.57, |
| "learning_rate": 4.148148148148148e-05, |
| "loss": 0.0376, |
| "step": 170 |
| }, |
| { |
| "epoch": 0.6, |
| "learning_rate": 4.055555555555556e-05, |
| "loss": 0.0209, |
| "step": 180 |
| }, |
| { |
| "epoch": 0.63, |
| "learning_rate": 3.981481481481482e-05, |
| "loss": 0.058, |
| "step": 190 |
| }, |
| { |
| "epoch": 0.67, |
| "learning_rate": 3.888888888888889e-05, |
| "loss": 0.0302, |
| "step": 200 |
| }, |
| { |
| "epoch": 0.7, |
| "learning_rate": 3.7962962962962964e-05, |
| "loss": 0.0434, |
| "step": 210 |
| }, |
| { |
| "epoch": 0.73, |
| "learning_rate": 3.7037037037037037e-05, |
| "loss": 0.0432, |
| "step": 220 |
| }, |
| { |
| "epoch": 0.77, |
| "learning_rate": 3.611111111111111e-05, |
| "loss": 0.0493, |
| "step": 230 |
| }, |
| { |
| "epoch": 0.8, |
| "learning_rate": 3.527777777777778e-05, |
| "loss": 0.0588, |
| "step": 240 |
| }, |
| { |
| "epoch": 0.83, |
| "learning_rate": 3.435185185185185e-05, |
| "loss": 0.0725, |
| "step": 250 |
| }, |
| { |
| "epoch": 0.87, |
| "learning_rate": 3.3425925925925924e-05, |
| "loss": 0.0859, |
| "step": 260 |
| }, |
| { |
| "epoch": 0.9, |
| "learning_rate": 3.2500000000000004e-05, |
| "loss": 0.0535, |
| "step": 270 |
| }, |
| { |
| "epoch": 0.93, |
| "learning_rate": 3.157407407407408e-05, |
| "loss": 0.0475, |
| "step": 280 |
| }, |
| { |
| "epoch": 0.97, |
| "learning_rate": 3.064814814814815e-05, |
| "loss": 0.0291, |
| "step": 290 |
| }, |
| { |
| "epoch": 1.0, |
| "learning_rate": 2.9722222222222223e-05, |
| "loss": 0.0327, |
| "step": 300 |
| }, |
| { |
| "epoch": 1.03, |
| "learning_rate": 2.87962962962963e-05, |
| "loss": 0.0254, |
| "step": 310 |
| }, |
| { |
| "epoch": 1.07, |
| "learning_rate": 2.7870370370370375e-05, |
| "loss": 0.0486, |
| "step": 320 |
| }, |
| { |
| "epoch": 1.1, |
| "learning_rate": 2.6944444444444445e-05, |
| "loss": 0.0413, |
| "step": 330 |
| }, |
| { |
| "epoch": 1.13, |
| "learning_rate": 2.601851851851852e-05, |
| "loss": 0.0229, |
| "step": 340 |
| }, |
| { |
| "epoch": 1.17, |
| "learning_rate": 2.5092592592592594e-05, |
| "loss": 0.0191, |
| "step": 350 |
| }, |
| { |
| "epoch": 1.2, |
| "learning_rate": 2.4166666666666667e-05, |
| "loss": 0.0378, |
| "step": 360 |
| }, |
| { |
| "epoch": 1.23, |
| "learning_rate": 2.324074074074074e-05, |
| "loss": 0.0244, |
| "step": 370 |
| }, |
| { |
| "epoch": 1.27, |
| "learning_rate": 2.2314814814814816e-05, |
| "loss": 0.014, |
| "step": 380 |
| }, |
| { |
| "epoch": 1.3, |
| "learning_rate": 2.138888888888889e-05, |
| "loss": 0.0264, |
| "step": 390 |
| }, |
| { |
| "epoch": 1.33, |
| "learning_rate": 2.0462962962962965e-05, |
| "loss": 0.043, |
| "step": 400 |
| }, |
| { |
| "epoch": 1.37, |
| "learning_rate": 1.9537037037037038e-05, |
| "loss": 0.0356, |
| "step": 410 |
| }, |
| { |
| "epoch": 1.4, |
| "learning_rate": 1.861111111111111e-05, |
| "loss": 0.023, |
| "step": 420 |
| }, |
| { |
| "epoch": 1.43, |
| "learning_rate": 1.7685185185185184e-05, |
| "loss": 0.0285, |
| "step": 430 |
| }, |
| { |
| "epoch": 1.47, |
| "learning_rate": 1.675925925925926e-05, |
| "loss": 0.0189, |
| "step": 440 |
| }, |
| { |
| "epoch": 1.5, |
| "learning_rate": 1.5833333333333333e-05, |
| "loss": 0.0296, |
| "step": 450 |
| }, |
| { |
| "epoch": 1.53, |
| "learning_rate": 1.490740740740741e-05, |
| "loss": 0.0189, |
| "step": 460 |
| }, |
| { |
| "epoch": 1.57, |
| "learning_rate": 1.3981481481481482e-05, |
| "loss": 0.0357, |
| "step": 470 |
| }, |
| { |
| "epoch": 1.6, |
| "learning_rate": 1.3055555555555557e-05, |
| "loss": 0.0235, |
| "step": 480 |
| }, |
| { |
| "epoch": 1.63, |
| "learning_rate": 1.212962962962963e-05, |
| "loss": 0.0396, |
| "step": 490 |
| }, |
| { |
| "epoch": 1.67, |
| "learning_rate": 1.1203703703703704e-05, |
| "loss": 0.0236, |
| "step": 500 |
| }, |
| { |
| "epoch": 1.7, |
| "learning_rate": 1.0277777777777777e-05, |
| "loss": 0.0147, |
| "step": 510 |
| }, |
| { |
| "epoch": 1.73, |
| "learning_rate": 9.351851851851852e-06, |
| "loss": 0.0135, |
| "step": 520 |
| }, |
| { |
| "epoch": 1.77, |
| "learning_rate": 8.425925925925926e-06, |
| "loss": 0.0138, |
| "step": 530 |
| }, |
| { |
| "epoch": 1.8, |
| "learning_rate": 7.5e-06, |
| "loss": 0.024, |
| "step": 540 |
| }, |
| { |
| "epoch": 1.83, |
| "learning_rate": 6.574074074074074e-06, |
| "loss": 0.0181, |
| "step": 550 |
| }, |
| { |
| "epoch": 1.87, |
| "learning_rate": 5.6481481481481485e-06, |
| "loss": 0.0201, |
| "step": 560 |
| }, |
| { |
| "epoch": 1.9, |
| "learning_rate": 4.722222222222222e-06, |
| "loss": 0.0276, |
| "step": 570 |
| }, |
| { |
| "epoch": 1.93, |
| "learning_rate": 3.7962962962962964e-06, |
| "loss": 0.0287, |
| "step": 580 |
| }, |
| { |
| "epoch": 1.97, |
| "learning_rate": 2.8703703703703706e-06, |
| "loss": 0.0256, |
| "step": 590 |
| }, |
| { |
| "epoch": 2.0, |
| "learning_rate": 1.9444444444444444e-06, |
| "loss": 0.018, |
| "step": 600 |
| }, |
| { |
| "epoch": 2.0, |
| "step": 600, |
| "total_flos": 1.5734632955772928e+17, |
| "train_loss": 0.05660845875740051, |
| "train_runtime": 609.6094, |
| "train_samples_per_second": 19.685, |
| "train_steps_per_second": 0.984 |
| } |
| ], |
| "logging_steps": 10, |
| "max_steps": 600, |
| "num_train_epochs": 2, |
| "save_steps": 500, |
| "total_flos": 1.5734632955772928e+17, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|