| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "global_step": 16314, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 4.846757386293981e-05, | |
| "loss": 0.9182, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "learning_rate": 4.6935147725879615e-05, | |
| "loss": 0.9324, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "learning_rate": 4.540272158881942e-05, | |
| "loss": 0.9328, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "learning_rate": 4.387029545175923e-05, | |
| "loss": 0.9331, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "learning_rate": 4.2337869314699034e-05, | |
| "loss": 0.9462, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "learning_rate": 4.080544317763884e-05, | |
| "loss": 0.9241, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "learning_rate": 3.9273017040578646e-05, | |
| "loss": 0.9323, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.74, | |
| "learning_rate": 3.774059090351845e-05, | |
| "loss": 0.9296, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.83, | |
| "learning_rate": 3.620816476645826e-05, | |
| "loss": 0.9296, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "learning_rate": 3.4675738629398065e-05, | |
| "loss": 0.9232, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 1.01, | |
| "learning_rate": 3.314331249233787e-05, | |
| "loss": 0.9, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 1.1, | |
| "learning_rate": 3.161088635527768e-05, | |
| "loss": 0.7056, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 1.2, | |
| "learning_rate": 3.0078460218217487e-05, | |
| "loss": 0.7021, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 1.29, | |
| "learning_rate": 2.854603408115729e-05, | |
| "loss": 0.6979, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 1.38, | |
| "learning_rate": 2.7013607944097096e-05, | |
| "loss": 0.7094, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 1.47, | |
| "learning_rate": 2.5481181807036903e-05, | |
| "loss": 0.7099, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 1.56, | |
| "learning_rate": 2.394875566997671e-05, | |
| "loss": 0.7178, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 1.66, | |
| "learning_rate": 2.2416329532916515e-05, | |
| "loss": 0.7136, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 1.75, | |
| "learning_rate": 2.088390339585632e-05, | |
| "loss": 0.71, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 1.84, | |
| "learning_rate": 1.9351477258796128e-05, | |
| "loss": 0.7183, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 1.93, | |
| "learning_rate": 1.7819051121735934e-05, | |
| "loss": 0.7188, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 2.02, | |
| "learning_rate": 1.628662498467574e-05, | |
| "loss": 0.6733, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 2.11, | |
| "learning_rate": 1.4754198847615546e-05, | |
| "loss": 0.5524, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 2.21, | |
| "learning_rate": 1.3221772710555353e-05, | |
| "loss": 0.5488, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 2.3, | |
| "learning_rate": 1.1689346573495157e-05, | |
| "loss": 0.5507, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 2.39, | |
| "learning_rate": 1.0156920436434965e-05, | |
| "loss": 0.553, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 2.48, | |
| "learning_rate": 8.62449429937477e-06, | |
| "loss": 0.5558, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 2.57, | |
| "learning_rate": 7.092068162314578e-06, | |
| "loss": 0.5554, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 2.67, | |
| "learning_rate": 5.559642025254383e-06, | |
| "loss": 0.5457, | |
| "step": 14500 | |
| }, | |
| { | |
| "epoch": 2.76, | |
| "learning_rate": 4.027215888194189e-06, | |
| "loss": 0.5548, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 2.85, | |
| "learning_rate": 2.4947897511339955e-06, | |
| "loss": 0.5538, | |
| "step": 15500 | |
| }, | |
| { | |
| "epoch": 2.94, | |
| "learning_rate": 9.623636140738016e-07, | |
| "loss": 0.5554, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "step": 16314, | |
| "total_flos": 246245231169216000, | |
| "train_runtime": 35196.4998, | |
| "train_samples_per_second": 0.464 | |
| } | |
| ], | |
| "max_steps": 16314, | |
| "num_train_epochs": 3, | |
| "total_flos": 246245231169216000, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |