| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.897018970189702, | |
| "global_step": 14000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 2.932249322493225e-05, | |
| "loss": 1.2027, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 2.86449864498645e-05, | |
| "loss": 0.9342, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 2.7967479674796748e-05, | |
| "loss": 0.8946, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 2.7289972899729e-05, | |
| "loss": 0.8385, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "learning_rate": 2.6612466124661247e-05, | |
| "loss": 0.8886, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "learning_rate": 2.59349593495935e-05, | |
| "loss": 0.8402, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "learning_rate": 2.5257452574525747e-05, | |
| "loss": 0.8194, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "learning_rate": 2.4579945799457998e-05, | |
| "loss": 0.8212, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.61, | |
| "learning_rate": 2.3902439024390246e-05, | |
| "loss": 0.8091, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "learning_rate": 2.3224932249322494e-05, | |
| "loss": 0.8132, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "learning_rate": 2.2547425474254745e-05, | |
| "loss": 0.8147, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 0.81, | |
| "learning_rate": 2.186991869918699e-05, | |
| "loss": 0.7896, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.88, | |
| "learning_rate": 2.119241192411924e-05, | |
| "loss": 0.789, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 0.95, | |
| "learning_rate": 2.051490514905149e-05, | |
| "loss": 0.7633, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 1.02, | |
| "learning_rate": 1.983739837398374e-05, | |
| "loss": 0.7146, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 1.08, | |
| "learning_rate": 1.915989159891599e-05, | |
| "loss": 0.5437, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 1.15, | |
| "learning_rate": 1.8482384823848237e-05, | |
| "loss": 0.5602, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 1.22, | |
| "learning_rate": 1.7804878048780488e-05, | |
| "loss": 0.5229, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 1.29, | |
| "learning_rate": 1.7127371273712736e-05, | |
| "loss": 0.5459, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 1.36, | |
| "learning_rate": 1.6449864498644987e-05, | |
| "loss": 0.5575, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 1.42, | |
| "learning_rate": 1.5772357723577235e-05, | |
| "loss": 0.5722, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 1.49, | |
| "learning_rate": 1.5094850948509487e-05, | |
| "loss": 0.5476, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 1.56, | |
| "learning_rate": 1.4417344173441735e-05, | |
| "loss": 0.5541, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 1.63, | |
| "learning_rate": 1.3739837398373984e-05, | |
| "loss": 0.5298, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 1.69, | |
| "learning_rate": 1.3062330623306234e-05, | |
| "loss": 0.5409, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 1.76, | |
| "learning_rate": 1.2384823848238483e-05, | |
| "loss": 0.5411, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 1.83, | |
| "learning_rate": 1.1707317073170733e-05, | |
| "loss": 0.5214, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 1.9, | |
| "learning_rate": 1.1029810298102981e-05, | |
| "loss": 0.5251, | |
| "step": 14000 | |
| } | |
| ], | |
| "max_steps": 22140, | |
| "num_train_epochs": 3, | |
| "total_flos": 1.3649092771917312e+17, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |