| { | |
| "best_metric": 0.4640955328941345, | |
| "best_model_checkpoint": "./output/checkpoint-400", | |
| "epoch": 4.299630500503863, | |
| "global_step": 400, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 5e-06, | |
| "loss": 1.5355, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "learning_rate": 1e-05, | |
| "loss": 1.5218, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "learning_rate": 1.5e-05, | |
| "loss": 1.4822, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "learning_rate": 2e-05, | |
| "loss": 1.4292, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "learning_rate": 2.5e-05, | |
| "loss": 1.3539, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "learning_rate": 3e-05, | |
| "loss": 1.2282, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "learning_rate": 3.5e-05, | |
| "loss": 1.0398, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.86, | |
| "learning_rate": 4e-05, | |
| "loss": 0.8365, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.97, | |
| "learning_rate": 4.5e-05, | |
| "loss": 0.6591, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 1.07, | |
| "learning_rate": 5e-05, | |
| "loss": 0.5647, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.18, | |
| "learning_rate": 4.863013698630137e-05, | |
| "loss": 0.5343, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 1.29, | |
| "learning_rate": 4.726027397260274e-05, | |
| "loss": 0.5249, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.4, | |
| "learning_rate": 4.589041095890411e-05, | |
| "loss": 0.5126, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 1.5, | |
| "learning_rate": 4.452054794520548e-05, | |
| "loss": 0.5099, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 1.61, | |
| "learning_rate": 4.3150684931506855e-05, | |
| "loss": 0.5067, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.72, | |
| "learning_rate": 4.1780821917808224e-05, | |
| "loss": 0.5047, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 1.83, | |
| "learning_rate": 4.041095890410959e-05, | |
| "loss": 0.4949, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 1.93, | |
| "learning_rate": 3.904109589041096e-05, | |
| "loss": 0.4961, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 2.04, | |
| "learning_rate": 3.767123287671233e-05, | |
| "loss": 0.4933, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 2.15, | |
| "learning_rate": 3.63013698630137e-05, | |
| "loss": 0.488, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 2.15, | |
| "eval_loss": 0.4859355092048645, | |
| "eval_runtime": 182.5747, | |
| "eval_samples_per_second": 10.954, | |
| "eval_steps_per_second": 1.369, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 2.26, | |
| "learning_rate": 3.493150684931507e-05, | |
| "loss": 0.4836, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 2.36, | |
| "learning_rate": 3.356164383561644e-05, | |
| "loss": 0.4798, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 2.47, | |
| "learning_rate": 3.219178082191781e-05, | |
| "loss": 0.4815, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 2.58, | |
| "learning_rate": 3.082191780821918e-05, | |
| "loss": 0.4738, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 2.69, | |
| "learning_rate": 2.945205479452055e-05, | |
| "loss": 0.4751, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 2.79, | |
| "learning_rate": 2.808219178082192e-05, | |
| "loss": 0.4771, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 2.9, | |
| "learning_rate": 2.671232876712329e-05, | |
| "loss": 0.4767, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 3.01, | |
| "learning_rate": 2.534246575342466e-05, | |
| "loss": 0.4756, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 3.12, | |
| "learning_rate": 2.3972602739726026e-05, | |
| "loss": 0.4681, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 3.22, | |
| "learning_rate": 2.2602739726027396e-05, | |
| "loss": 0.4707, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 3.33, | |
| "learning_rate": 2.1232876712328768e-05, | |
| "loss": 0.4704, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 3.44, | |
| "learning_rate": 1.9863013698630137e-05, | |
| "loss": 0.4677, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 3.55, | |
| "learning_rate": 1.8493150684931506e-05, | |
| "loss": 0.4676, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 3.65, | |
| "learning_rate": 1.7123287671232875e-05, | |
| "loss": 0.4659, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 3.76, | |
| "learning_rate": 1.5753424657534248e-05, | |
| "loss": 0.4717, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 3.87, | |
| "learning_rate": 1.4383561643835617e-05, | |
| "loss": 0.4679, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 3.98, | |
| "learning_rate": 1.3013698630136986e-05, | |
| "loss": 0.466, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 4.08, | |
| "learning_rate": 1.1643835616438355e-05, | |
| "loss": 0.4644, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 4.19, | |
| "learning_rate": 1.0273972602739726e-05, | |
| "loss": 0.4671, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 4.3, | |
| "learning_rate": 8.904109589041095e-06, | |
| "loss": 0.4649, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 4.3, | |
| "eval_loss": 0.4640955328941345, | |
| "eval_runtime": 182.5257, | |
| "eval_samples_per_second": 10.957, | |
| "eval_steps_per_second": 1.37, | |
| "step": 400 | |
| } | |
| ], | |
| "max_steps": 465, | |
| "num_train_epochs": 5, | |
| "total_flos": 2.0793225403328102e+18, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |