| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 5.0, | |
| "global_step": 27665, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 2.9457798662570034e-05, | |
| "loss": 1.4635, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "learning_rate": 2.891559732514007e-05, | |
| "loss": 1.036, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 2.83733959877101e-05, | |
| "loss": 0.8904, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "learning_rate": 2.783119465028014e-05, | |
| "loss": 0.8604, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "learning_rate": 2.7288993312850172e-05, | |
| "loss": 0.8931, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "learning_rate": 2.6746791975420206e-05, | |
| "loss": 0.8083, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "learning_rate": 2.6204590637990243e-05, | |
| "loss": 0.8035, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.72, | |
| "learning_rate": 2.5662389300560273e-05, | |
| "loss": 0.8022, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.81, | |
| "learning_rate": 2.512018796313031e-05, | |
| "loss": 0.8007, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "learning_rate": 2.4577986625700344e-05, | |
| "loss": 0.7696, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.99, | |
| "learning_rate": 2.4035785288270377e-05, | |
| "loss": 0.8287, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 1.08, | |
| "learning_rate": 2.3493583950840414e-05, | |
| "loss": 0.5808, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 1.17, | |
| "learning_rate": 2.2951382613410445e-05, | |
| "loss": 0.5755, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 1.27, | |
| "learning_rate": 2.2409181275980482e-05, | |
| "loss": 0.5957, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 1.36, | |
| "learning_rate": 2.1866979938550515e-05, | |
| "loss": 0.5736, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 1.45, | |
| "learning_rate": 2.132477860112055e-05, | |
| "loss": 0.5806, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 1.54, | |
| "learning_rate": 2.0782577263690586e-05, | |
| "loss": 0.6211, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 1.63, | |
| "learning_rate": 2.024037592626062e-05, | |
| "loss": 0.5684, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 1.72, | |
| "learning_rate": 1.9698174588830653e-05, | |
| "loss": 0.5927, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 1.81, | |
| "learning_rate": 1.9155973251400687e-05, | |
| "loss": 0.5779, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 1.9, | |
| "learning_rate": 1.861377191397072e-05, | |
| "loss": 0.5363, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 1.99, | |
| "learning_rate": 1.8071570576540758e-05, | |
| "loss": 0.5872, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 2.08, | |
| "learning_rate": 1.752936923911079e-05, | |
| "loss": 0.3776, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 2.17, | |
| "learning_rate": 1.6987167901680825e-05, | |
| "loss": 0.3865, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 2.26, | |
| "learning_rate": 1.644496656425086e-05, | |
| "loss": 0.3685, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 2.35, | |
| "learning_rate": 1.5902765226820892e-05, | |
| "loss": 0.3677, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 2.44, | |
| "learning_rate": 1.536056388939093e-05, | |
| "loss": 0.3914, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 2.53, | |
| "learning_rate": 1.4818362551960961e-05, | |
| "loss": 0.3858, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 2.62, | |
| "learning_rate": 1.4276161214530996e-05, | |
| "loss": 0.3726, | |
| "step": 14500 | |
| }, | |
| { | |
| "epoch": 2.71, | |
| "learning_rate": 1.373395987710103e-05, | |
| "loss": 0.3758, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 2.8, | |
| "learning_rate": 1.3191758539671065e-05, | |
| "loss": 0.389, | |
| "step": 15500 | |
| }, | |
| { | |
| "epoch": 2.89, | |
| "learning_rate": 1.26495572022411e-05, | |
| "loss": 0.3689, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 2.98, | |
| "learning_rate": 1.2107355864811133e-05, | |
| "loss": 0.3964, | |
| "step": 16500 | |
| }, | |
| { | |
| "epoch": 3.07, | |
| "learning_rate": 1.1565154527381168e-05, | |
| "loss": 0.2548, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 3.16, | |
| "learning_rate": 1.1022953189951202e-05, | |
| "loss": 0.2504, | |
| "step": 17500 | |
| }, | |
| { | |
| "epoch": 3.25, | |
| "learning_rate": 1.0480751852521237e-05, | |
| "loss": 0.2285, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 3.34, | |
| "learning_rate": 9.938550515091272e-06, | |
| "loss": 0.2209, | |
| "step": 18500 | |
| }, | |
| { | |
| "epoch": 3.43, | |
| "learning_rate": 9.396349177661304e-06, | |
| "loss": 0.2206, | |
| "step": 19000 | |
| }, | |
| { | |
| "epoch": 3.52, | |
| "learning_rate": 8.85414784023134e-06, | |
| "loss": 0.2366, | |
| "step": 19500 | |
| }, | |
| { | |
| "epoch": 3.61, | |
| "learning_rate": 8.311946502801373e-06, | |
| "loss": 0.2187, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 3.71, | |
| "learning_rate": 7.769745165371408e-06, | |
| "loss": 0.2346, | |
| "step": 20500 | |
| }, | |
| { | |
| "epoch": 3.8, | |
| "learning_rate": 7.227543827941442e-06, | |
| "loss": 0.2387, | |
| "step": 21000 | |
| }, | |
| { | |
| "epoch": 3.89, | |
| "learning_rate": 6.685342490511477e-06, | |
| "loss": 0.2274, | |
| "step": 21500 | |
| }, | |
| { | |
| "epoch": 3.98, | |
| "learning_rate": 6.143141153081511e-06, | |
| "loss": 0.2316, | |
| "step": 22000 | |
| }, | |
| { | |
| "epoch": 4.07, | |
| "learning_rate": 5.6009398156515455e-06, | |
| "loss": 0.1797, | |
| "step": 22500 | |
| }, | |
| { | |
| "epoch": 4.16, | |
| "learning_rate": 5.058738478221579e-06, | |
| "loss": 0.1121, | |
| "step": 23000 | |
| }, | |
| { | |
| "epoch": 4.25, | |
| "learning_rate": 4.516537140791614e-06, | |
| "loss": 0.1107, | |
| "step": 23500 | |
| }, | |
| { | |
| "epoch": 4.34, | |
| "learning_rate": 3.974335803361649e-06, | |
| "loss": 0.1226, | |
| "step": 24000 | |
| }, | |
| { | |
| "epoch": 4.43, | |
| "learning_rate": 3.4321344659316826e-06, | |
| "loss": 0.1159, | |
| "step": 24500 | |
| }, | |
| { | |
| "epoch": 4.52, | |
| "learning_rate": 2.889933128501717e-06, | |
| "loss": 0.1211, | |
| "step": 25000 | |
| }, | |
| { | |
| "epoch": 4.61, | |
| "learning_rate": 2.3477317910717515e-06, | |
| "loss": 0.1413, | |
| "step": 25500 | |
| }, | |
| { | |
| "epoch": 4.7, | |
| "learning_rate": 1.8055304536417856e-06, | |
| "loss": 0.1346, | |
| "step": 26000 | |
| }, | |
| { | |
| "epoch": 4.79, | |
| "learning_rate": 1.26332911621182e-06, | |
| "loss": 0.1065, | |
| "step": 26500 | |
| }, | |
| { | |
| "epoch": 4.88, | |
| "learning_rate": 7.211277787818543e-07, | |
| "loss": 0.1064, | |
| "step": 27000 | |
| }, | |
| { | |
| "epoch": 4.97, | |
| "learning_rate": 1.7892644135188868e-07, | |
| "loss": 0.0909, | |
| "step": 27500 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "step": 27665, | |
| "total_flos": 3.407082354156503e+17, | |
| "train_runtime": 31051.1493, | |
| "train_samples_per_second": 14.255, | |
| "train_steps_per_second": 0.891 | |
| } | |
| ], | |
| "max_steps": 27665, | |
| "num_train_epochs": 5, | |
| "total_flos": 3.407082354156503e+17, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |