| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 100.0, | |
| "global_step": 24100, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 2.07, | |
| "learning_rate": 2.937759336099585e-05, | |
| "loss": 0.3885, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 4.15, | |
| "learning_rate": 2.8755186721991704e-05, | |
| "loss": 0.2791, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 6.22, | |
| "learning_rate": 2.8132780082987553e-05, | |
| "loss": 0.2363, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 8.3, | |
| "learning_rate": 2.75103734439834e-05, | |
| "loss": 0.2137, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 10.37, | |
| "learning_rate": 2.6887966804979253e-05, | |
| "loss": 0.1969, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 12.45, | |
| "learning_rate": 2.6265560165975105e-05, | |
| "loss": 0.1854, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 14.52, | |
| "learning_rate": 2.5643153526970954e-05, | |
| "loss": 0.1694, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 16.6, | |
| "learning_rate": 2.5020746887966805e-05, | |
| "loss": 0.1722, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 18.67, | |
| "learning_rate": 2.4398340248962657e-05, | |
| "loss": 0.1514, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 20.75, | |
| "learning_rate": 2.3775933609958506e-05, | |
| "loss": 0.1499, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 22.82, | |
| "learning_rate": 2.3153526970954358e-05, | |
| "loss": 0.1464, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 24.9, | |
| "learning_rate": 2.253112033195021e-05, | |
| "loss": 0.1343, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 26.97, | |
| "learning_rate": 2.1908713692946058e-05, | |
| "loss": 0.1304, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 29.05, | |
| "learning_rate": 2.128630705394191e-05, | |
| "loss": 0.1243, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 31.12, | |
| "learning_rate": 2.0663900414937758e-05, | |
| "loss": 0.1165, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 33.2, | |
| "learning_rate": 2.004149377593361e-05, | |
| "loss": 0.119, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 35.27, | |
| "learning_rate": 1.9419087136929462e-05, | |
| "loss": 0.112, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 37.34, | |
| "learning_rate": 1.879668049792531e-05, | |
| "loss": 0.1061, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 39.42, | |
| "learning_rate": 1.8174273858921162e-05, | |
| "loss": 0.1048, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 41.49, | |
| "learning_rate": 1.7551867219917014e-05, | |
| "loss": 0.1008, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 43.57, | |
| "learning_rate": 1.6929460580912863e-05, | |
| "loss": 0.0924, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 45.64, | |
| "learning_rate": 1.630705394190871e-05, | |
| "loss": 0.0912, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 47.72, | |
| "learning_rate": 1.5684647302904566e-05, | |
| "loss": 0.0942, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 49.79, | |
| "learning_rate": 1.5062240663900415e-05, | |
| "loss": 0.0855, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 51.87, | |
| "learning_rate": 1.4439834024896267e-05, | |
| "loss": 0.0855, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 53.94, | |
| "learning_rate": 1.3817427385892115e-05, | |
| "loss": 0.0801, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 56.02, | |
| "learning_rate": 1.3195020746887967e-05, | |
| "loss": 0.0787, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 58.09, | |
| "learning_rate": 1.2572614107883817e-05, | |
| "loss": 0.0777, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 60.17, | |
| "learning_rate": 1.1950207468879667e-05, | |
| "loss": 0.0733, | |
| "step": 14500 | |
| }, | |
| { | |
| "epoch": 62.24, | |
| "learning_rate": 1.132780082987552e-05, | |
| "loss": 0.0722, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 64.32, | |
| "learning_rate": 1.070539419087137e-05, | |
| "loss": 0.0713, | |
| "step": 15500 | |
| }, | |
| { | |
| "epoch": 66.39, | |
| "learning_rate": 1.0082987551867221e-05, | |
| "loss": 0.0679, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 68.46, | |
| "learning_rate": 9.46058091286307e-06, | |
| "loss": 0.0685, | |
| "step": 16500 | |
| }, | |
| { | |
| "epoch": 70.54, | |
| "learning_rate": 8.838174273858922e-06, | |
| "loss": 0.0656, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 72.61, | |
| "learning_rate": 8.215767634854773e-06, | |
| "loss": 0.0657, | |
| "step": 17500 | |
| }, | |
| { | |
| "epoch": 74.69, | |
| "learning_rate": 7.593360995850622e-06, | |
| "loss": 0.0611, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 76.76, | |
| "learning_rate": 6.970954356846474e-06, | |
| "loss": 0.061, | |
| "step": 18500 | |
| }, | |
| { | |
| "epoch": 78.84, | |
| "learning_rate": 6.348547717842324e-06, | |
| "loss": 0.0605, | |
| "step": 19000 | |
| }, | |
| { | |
| "epoch": 80.91, | |
| "learning_rate": 5.726141078838174e-06, | |
| "loss": 0.0602, | |
| "step": 19500 | |
| }, | |
| { | |
| "epoch": 82.99, | |
| "learning_rate": 5.103734439834025e-06, | |
| "loss": 0.0585, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 85.06, | |
| "learning_rate": 4.481327800829875e-06, | |
| "loss": 0.0583, | |
| "step": 20500 | |
| }, | |
| { | |
| "epoch": 87.14, | |
| "learning_rate": 3.858921161825726e-06, | |
| "loss": 0.056, | |
| "step": 21000 | |
| }, | |
| { | |
| "epoch": 89.21, | |
| "learning_rate": 3.236514522821577e-06, | |
| "loss": 0.055, | |
| "step": 21500 | |
| }, | |
| { | |
| "epoch": 91.29, | |
| "learning_rate": 2.6141078838174274e-06, | |
| "loss": 0.0582, | |
| "step": 22000 | |
| }, | |
| { | |
| "epoch": 93.36, | |
| "learning_rate": 1.991701244813278e-06, | |
| "loss": 0.0556, | |
| "step": 22500 | |
| }, | |
| { | |
| "epoch": 95.44, | |
| "learning_rate": 1.3692946058091286e-06, | |
| "loss": 0.0545, | |
| "step": 23000 | |
| }, | |
| { | |
| "epoch": 97.51, | |
| "learning_rate": 7.468879668049792e-07, | |
| "loss": 0.0554, | |
| "step": 23500 | |
| }, | |
| { | |
| "epoch": 99.59, | |
| "learning_rate": 1.2448132780082988e-07, | |
| "loss": 0.0548, | |
| "step": 24000 | |
| }, | |
| { | |
| "epoch": 100.0, | |
| "step": 24100, | |
| "total_flos": 3.90325755445248e+16, | |
| "train_loss": 0.1092665815749109, | |
| "train_runtime": 5451.3155, | |
| "train_samples_per_second": 52.905, | |
| "train_steps_per_second": 4.421 | |
| } | |
| ], | |
| "max_steps": 24100, | |
| "num_train_epochs": 100, | |
| "total_flos": 3.90325755445248e+16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |