| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.9997518764344644, | |
| "global_step": 16120, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 4.8449131513647645e-05, | |
| "loss": 7.5778, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "learning_rate": 4.689826302729529e-05, | |
| "loss": 5.6514, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "learning_rate": 4.534739454094293e-05, | |
| "loss": 4.753, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "learning_rate": 4.379652605459057e-05, | |
| "loss": 4.1203, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "learning_rate": 4.2245657568238216e-05, | |
| "loss": 3.6995, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.74, | |
| "learning_rate": 4.069478908188586e-05, | |
| "loss": 3.4027, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.87, | |
| "learning_rate": 3.91439205955335e-05, | |
| "loss": 3.1636, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.99, | |
| "learning_rate": 3.7593052109181144e-05, | |
| "loss": 2.9862, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 1.12, | |
| "learning_rate": 3.6042183622828787e-05, | |
| "loss": 2.8469, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 1.24, | |
| "learning_rate": 3.449131513647643e-05, | |
| "loss": 2.7431, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 1.36, | |
| "learning_rate": 3.294044665012407e-05, | |
| "loss": 2.6552, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 1.49, | |
| "learning_rate": 3.1389578163771715e-05, | |
| "loss": 2.5856, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 1.61, | |
| "learning_rate": 2.9838709677419357e-05, | |
| "loss": 2.5195, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 1.74, | |
| "learning_rate": 2.8287841191067e-05, | |
| "loss": 2.4654, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 1.86, | |
| "learning_rate": 2.6736972704714643e-05, | |
| "loss": 2.4238, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 1.98, | |
| "learning_rate": 2.5186104218362282e-05, | |
| "loss": 2.3821, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 2.11, | |
| "learning_rate": 2.3635235732009928e-05, | |
| "loss": 2.3322, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 2.23, | |
| "learning_rate": 2.208436724565757e-05, | |
| "loss": 2.3004, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 2.36, | |
| "learning_rate": 2.053349875930521e-05, | |
| "loss": 2.2745, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 2.48, | |
| "learning_rate": 1.8982630272952853e-05, | |
| "loss": 2.2494, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 2.61, | |
| "learning_rate": 1.74317617866005e-05, | |
| "loss": 2.2309, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 2.73, | |
| "learning_rate": 1.588089330024814e-05, | |
| "loss": 2.2102, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 2.85, | |
| "learning_rate": 1.4330024813895782e-05, | |
| "loss": 2.1912, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 2.98, | |
| "learning_rate": 1.2779156327543427e-05, | |
| "loss": 2.1743, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 3.1, | |
| "learning_rate": 1.1228287841191068e-05, | |
| "loss": 2.1509, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 3.23, | |
| "learning_rate": 9.67741935483871e-06, | |
| "loss": 2.1385, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 3.35, | |
| "learning_rate": 8.126550868486353e-06, | |
| "loss": 2.1343, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 3.47, | |
| "learning_rate": 6.575682382133995e-06, | |
| "loss": 2.1247, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 3.6, | |
| "learning_rate": 5.0248138957816385e-06, | |
| "loss": 2.1188, | |
| "step": 14500 | |
| }, | |
| { | |
| "epoch": 3.72, | |
| "learning_rate": 3.4739454094292807e-06, | |
| "loss": 2.1198, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 3.85, | |
| "learning_rate": 1.9230769230769234e-06, | |
| "loss": 2.1102, | |
| "step": 15500 | |
| }, | |
| { | |
| "epoch": 3.97, | |
| "learning_rate": 3.7220843672456576e-07, | |
| "loss": 2.106, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "step": 16120, | |
| "total_flos": 4.540075318875832e+17, | |
| "train_loss": 2.836493848511954, | |
| "train_runtime": 50326.1237, | |
| "train_samples_per_second": 307.517, | |
| "train_steps_per_second": 0.32 | |
| } | |
| ], | |
| "max_steps": 16120, | |
| "num_train_epochs": 4, | |
| "total_flos": 4.540075318875832e+17, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |