| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.3083005260197837, | |
| "global_step": 25000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 4.9172842773954474e-05, | |
| "loss": 7.5606, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 4.8345685547908946e-05, | |
| "loss": 5.6106, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 4.751852832186342e-05, | |
| "loss": 4.6836, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "learning_rate": 4.669137109581789e-05, | |
| "loss": 4.0373, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "learning_rate": 4.586421386977237e-05, | |
| "loss": 3.6003, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "learning_rate": 4.503705664372684e-05, | |
| "loss": 3.2726, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "learning_rate": 4.4209899417681313e-05, | |
| "loss": 3.0363, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.53, | |
| "learning_rate": 4.338274219163579e-05, | |
| "loss": 2.8696, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "learning_rate": 4.2555584965590264e-05, | |
| "loss": 2.7368, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.66, | |
| "learning_rate": 4.1728427739544736e-05, | |
| "loss": 2.6329, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.73, | |
| "learning_rate": 4.090127051349921e-05, | |
| "loss": 2.5426, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 0.79, | |
| "learning_rate": 4.007411328745368e-05, | |
| "loss": 2.4677, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.86, | |
| "learning_rate": 3.924695606140815e-05, | |
| "loss": 2.4017, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 0.93, | |
| "learning_rate": 3.8419798835362624e-05, | |
| "loss": 2.3466, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 0.99, | |
| "learning_rate": 3.75926416093171e-05, | |
| "loss": 2.2957, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 1.06, | |
| "learning_rate": 3.6765484383271575e-05, | |
| "loss": 2.2443, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 1.12, | |
| "learning_rate": 3.593832715722605e-05, | |
| "loss": 2.2067, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 1.19, | |
| "learning_rate": 3.511116993118052e-05, | |
| "loss": 2.17, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 1.26, | |
| "learning_rate": 3.428401270513499e-05, | |
| "loss": 2.1339, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 1.32, | |
| "learning_rate": 3.345685547908946e-05, | |
| "loss": 2.1097, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 1.39, | |
| "learning_rate": 3.2629698253043935e-05, | |
| "loss": 2.0808, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 1.46, | |
| "learning_rate": 3.1802541026998414e-05, | |
| "loss": 2.0546, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 1.52, | |
| "learning_rate": 3.0975383800952886e-05, | |
| "loss": 2.0371, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 1.59, | |
| "learning_rate": 3.014822657490736e-05, | |
| "loss": 2.0158, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 1.65, | |
| "learning_rate": 2.9321069348861834e-05, | |
| "loss": 1.9924, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 1.72, | |
| "learning_rate": 2.849391212281631e-05, | |
| "loss": 1.9762, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 1.79, | |
| "learning_rate": 2.766675489677078e-05, | |
| "loss": 1.9568, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 1.85, | |
| "learning_rate": 2.6839597670725253e-05, | |
| "loss": 1.9439, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 1.92, | |
| "learning_rate": 2.6012440444679725e-05, | |
| "loss": 1.9298, | |
| "step": 14500 | |
| }, | |
| { | |
| "epoch": 1.98, | |
| "learning_rate": 2.5185283218634197e-05, | |
| "loss": 1.9172, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 2.05, | |
| "learning_rate": 2.4358125992588673e-05, | |
| "loss": 1.893, | |
| "step": 15500 | |
| }, | |
| { | |
| "epoch": 2.12, | |
| "learning_rate": 2.3530968766543145e-05, | |
| "loss": 1.8762, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 2.18, | |
| "learning_rate": 2.270381154049762e-05, | |
| "loss": 1.8717, | |
| "step": 16500 | |
| }, | |
| { | |
| "epoch": 2.25, | |
| "learning_rate": 2.1876654314452092e-05, | |
| "loss": 1.8579, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 2.32, | |
| "learning_rate": 2.1049497088406564e-05, | |
| "loss": 1.8454, | |
| "step": 17500 | |
| }, | |
| { | |
| "epoch": 2.38, | |
| "learning_rate": 2.022233986236104e-05, | |
| "loss": 1.8387, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 2.45, | |
| "learning_rate": 1.939518263631551e-05, | |
| "loss": 1.8287, | |
| "step": 18500 | |
| }, | |
| { | |
| "epoch": 2.51, | |
| "learning_rate": 1.8568025410269984e-05, | |
| "loss": 1.8253, | |
| "step": 19000 | |
| }, | |
| { | |
| "epoch": 2.58, | |
| "learning_rate": 1.7740868184224456e-05, | |
| "loss": 1.8145, | |
| "step": 19500 | |
| }, | |
| { | |
| "epoch": 2.65, | |
| "learning_rate": 1.691371095817893e-05, | |
| "loss": 1.8095, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 2.71, | |
| "learning_rate": 1.6086553732133407e-05, | |
| "loss": 1.8027, | |
| "step": 20500 | |
| }, | |
| { | |
| "epoch": 2.78, | |
| "learning_rate": 1.525939650608788e-05, | |
| "loss": 1.7982, | |
| "step": 21000 | |
| }, | |
| { | |
| "epoch": 2.85, | |
| "learning_rate": 1.443223928004235e-05, | |
| "loss": 1.7908, | |
| "step": 21500 | |
| }, | |
| { | |
| "epoch": 2.91, | |
| "learning_rate": 1.3605082053996823e-05, | |
| "loss": 1.7863, | |
| "step": 22000 | |
| }, | |
| { | |
| "epoch": 2.98, | |
| "learning_rate": 1.2777924827951298e-05, | |
| "loss": 1.7812, | |
| "step": 22500 | |
| }, | |
| { | |
| "epoch": 3.04, | |
| "learning_rate": 1.1950767601905772e-05, | |
| "loss": 1.7693, | |
| "step": 23000 | |
| }, | |
| { | |
| "epoch": 3.11, | |
| "learning_rate": 1.1123610375860244e-05, | |
| "loss": 1.7593, | |
| "step": 23500 | |
| }, | |
| { | |
| "epoch": 3.18, | |
| "learning_rate": 1.0296453149814718e-05, | |
| "loss": 1.757, | |
| "step": 24000 | |
| }, | |
| { | |
| "epoch": 3.24, | |
| "learning_rate": 9.469295923769191e-06, | |
| "loss": 1.7535, | |
| "step": 24500 | |
| }, | |
| { | |
| "epoch": 3.31, | |
| "learning_rate": 8.642138697723663e-06, | |
| "loss": 1.7508, | |
| "step": 25000 | |
| } | |
| ], | |
| "max_steps": 30224, | |
| "num_train_epochs": 4, | |
| "total_flos": 7.166643794309775e+17, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |