| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.935625917383099, | |
| "global_step": 14000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.1, | |
| "learning_rate": 1.930104144824212e-05, | |
| "loss": 2.0398, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "learning_rate": 1.860208289648424e-05, | |
| "loss": 1.7138, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "learning_rate": 1.790312434472636e-05, | |
| "loss": 1.6022, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "learning_rate": 1.720416579296848e-05, | |
| "loss": 1.5263, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "learning_rate": 1.6505207241210597e-05, | |
| "loss": 1.4804, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "learning_rate": 1.5806248689452716e-05, | |
| "loss": 1.434, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.73, | |
| "learning_rate": 1.5107290137694836e-05, | |
| "loss": 1.4131, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.84, | |
| "learning_rate": 1.4408331585936954e-05, | |
| "loss": 1.3862, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.94, | |
| "learning_rate": 1.3709373034179074e-05, | |
| "loss": 1.3675, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 1.4010822772979736, | |
| "eval_runtime": 3146.6255, | |
| "eval_samples_per_second": 11.181, | |
| "eval_steps_per_second": 1.398, | |
| "step": 4769 | |
| }, | |
| { | |
| "epoch": 1.05, | |
| "learning_rate": 1.3010414482421195e-05, | |
| "loss": 1.3461, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 1.15, | |
| "learning_rate": 1.2311455930663311e-05, | |
| "loss": 1.3293, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 1.26, | |
| "learning_rate": 1.1612497378905431e-05, | |
| "loss": 1.325, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 1.36, | |
| "learning_rate": 1.0913538827147552e-05, | |
| "loss": 1.3074, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 1.47, | |
| "learning_rate": 1.021458027538967e-05, | |
| "loss": 1.3023, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 1.57, | |
| "learning_rate": 9.51562172363179e-06, | |
| "loss": 1.2898, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 1.68, | |
| "learning_rate": 8.816663171873909e-06, | |
| "loss": 1.2822, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 1.78, | |
| "learning_rate": 8.117704620116029e-06, | |
| "loss": 1.2721, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 1.89, | |
| "learning_rate": 7.418746068358147e-06, | |
| "loss": 1.2596, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 1.99, | |
| "learning_rate": 6.719787516600266e-06, | |
| "loss": 1.2635, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_loss": 1.3475877046585083, | |
| "eval_runtime": 2879.1327, | |
| "eval_samples_per_second": 12.22, | |
| "eval_steps_per_second": 1.528, | |
| "step": 9538 | |
| }, | |
| { | |
| "epoch": 2.1, | |
| "learning_rate": 6.020828964842386e-06, | |
| "loss": 1.2592, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 2.2, | |
| "learning_rate": 5.321870413084504e-06, | |
| "loss": 1.2459, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 2.31, | |
| "learning_rate": 4.6229118613266235e-06, | |
| "loss": 1.2477, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 2.41, | |
| "learning_rate": 3.923953309568743e-06, | |
| "loss": 1.249, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 2.52, | |
| "learning_rate": 3.224994757810862e-06, | |
| "loss": 1.2376, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 2.62, | |
| "learning_rate": 2.5260362060529814e-06, | |
| "loss": 1.238, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 2.73, | |
| "learning_rate": 1.8270776542951004e-06, | |
| "loss": 1.2409, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 2.83, | |
| "learning_rate": 1.1281191025372195e-06, | |
| "loss": 1.2365, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 2.94, | |
| "learning_rate": 4.291605507793388e-07, | |
| "loss": 1.237, | |
| "step": 14000 | |
| } | |
| ], | |
| "max_steps": 14307, | |
| "num_train_epochs": 3, | |
| "total_flos": 3657827887349760.0, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |