roberta-base-bne-squad-2.0-es / trainer_state.json
Javi
reduced bs results in slight improvement of metrics
ca67487
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 4.0,
"global_step": 22044,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.09,
"learning_rate": 2.9319542732716386e-05,
"loss": 2.1551,
"step": 500
},
{
"epoch": 0.18,
"learning_rate": 2.863908546543277e-05,
"loss": 1.7229,
"step": 1000
},
{
"epoch": 0.27,
"learning_rate": 2.7958628198149155e-05,
"loss": 1.5902,
"step": 1500
},
{
"epoch": 0.36,
"learning_rate": 2.7278170930865543e-05,
"loss": 1.5139,
"step": 2000
},
{
"epoch": 0.45,
"learning_rate": 2.6597713663581925e-05,
"loss": 1.4848,
"step": 2500
},
{
"epoch": 0.54,
"learning_rate": 2.5917256396298313e-05,
"loss": 1.4533,
"step": 3000
},
{
"epoch": 0.64,
"learning_rate": 2.5236799129014698e-05,
"loss": 1.3801,
"step": 3500
},
{
"epoch": 0.73,
"learning_rate": 2.4556341861731083e-05,
"loss": 1.3509,
"step": 4000
},
{
"epoch": 0.82,
"learning_rate": 2.3875884594447467e-05,
"loss": 1.3392,
"step": 4500
},
{
"epoch": 0.91,
"learning_rate": 2.3195427327163856e-05,
"loss": 1.3046,
"step": 5000
},
{
"epoch": 1.0,
"learning_rate": 2.251497005988024e-05,
"loss": 1.275,
"step": 5500
},
{
"epoch": 1.09,
"learning_rate": 2.1834512792596625e-05,
"loss": 0.9101,
"step": 6000
},
{
"epoch": 1.18,
"learning_rate": 2.115405552531301e-05,
"loss": 0.937,
"step": 6500
},
{
"epoch": 1.27,
"learning_rate": 2.0473598258029398e-05,
"loss": 0.9173,
"step": 7000
},
{
"epoch": 1.36,
"learning_rate": 1.979314099074578e-05,
"loss": 0.9003,
"step": 7500
},
{
"epoch": 1.45,
"learning_rate": 1.9112683723462168e-05,
"loss": 0.9009,
"step": 8000
},
{
"epoch": 1.54,
"learning_rate": 1.8432226456178553e-05,
"loss": 0.9359,
"step": 8500
},
{
"epoch": 1.63,
"learning_rate": 1.7751769188894937e-05,
"loss": 0.9038,
"step": 9000
},
{
"epoch": 1.72,
"learning_rate": 1.7071311921611322e-05,
"loss": 0.9211,
"step": 9500
},
{
"epoch": 1.81,
"learning_rate": 1.639085465432771e-05,
"loss": 0.8918,
"step": 10000
},
{
"epoch": 1.91,
"learning_rate": 1.5710397387044092e-05,
"loss": 0.9076,
"step": 10500
},
{
"epoch": 2.0,
"learning_rate": 1.502994011976048e-05,
"loss": 0.8857,
"step": 11000
},
{
"epoch": 2.09,
"learning_rate": 1.4349482852476866e-05,
"loss": 0.5123,
"step": 11500
},
{
"epoch": 2.18,
"learning_rate": 1.366902558519325e-05,
"loss": 0.481,
"step": 12000
},
{
"epoch": 2.27,
"learning_rate": 1.2988568317909634e-05,
"loss": 0.4587,
"step": 12500
},
{
"epoch": 2.36,
"learning_rate": 1.2308111050626021e-05,
"loss": 0.4773,
"step": 13000
},
{
"epoch": 2.45,
"learning_rate": 1.1627653783342406e-05,
"loss": 0.4962,
"step": 13500
},
{
"epoch": 2.54,
"learning_rate": 1.094719651605879e-05,
"loss": 0.5124,
"step": 14000
},
{
"epoch": 2.63,
"learning_rate": 1.0266739248775177e-05,
"loss": 0.4949,
"step": 14500
},
{
"epoch": 2.72,
"learning_rate": 9.586281981491562e-06,
"loss": 0.4931,
"step": 15000
},
{
"epoch": 2.81,
"learning_rate": 8.905824714207947e-06,
"loss": 0.4849,
"step": 15500
},
{
"epoch": 2.9,
"learning_rate": 8.225367446924333e-06,
"loss": 0.4878,
"step": 16000
},
{
"epoch": 2.99,
"learning_rate": 7.544910179640718e-06,
"loss": 0.4949,
"step": 16500
},
{
"epoch": 3.08,
"learning_rate": 6.864452912357104e-06,
"loss": 0.2131,
"step": 17000
},
{
"epoch": 3.18,
"learning_rate": 6.183995645073489e-06,
"loss": 0.2022,
"step": 17500
},
{
"epoch": 3.27,
"learning_rate": 5.503538377789875e-06,
"loss": 0.2071,
"step": 18000
},
{
"epoch": 3.36,
"learning_rate": 4.82308111050626e-06,
"loss": 0.2067,
"step": 18500
},
{
"epoch": 3.45,
"learning_rate": 4.142623843222645e-06,
"loss": 0.2029,
"step": 19000
},
{
"epoch": 3.54,
"learning_rate": 3.4621665759390314e-06,
"loss": 0.1866,
"step": 19500
},
{
"epoch": 3.63,
"learning_rate": 2.7817093086554166e-06,
"loss": 0.2056,
"step": 20000
},
{
"epoch": 3.72,
"learning_rate": 2.101252041371802e-06,
"loss": 0.1925,
"step": 20500
},
{
"epoch": 3.81,
"learning_rate": 1.4207947740881873e-06,
"loss": 0.1866,
"step": 21000
},
{
"epoch": 3.9,
"learning_rate": 7.403375068045727e-07,
"loss": 0.1952,
"step": 21500
},
{
"epoch": 3.99,
"learning_rate": 5.988023952095808e-08,
"loss": 0.1866,
"step": 22000
},
{
"epoch": 4.0,
"step": 22044,
"total_flos": 1.0366869961703424e+17,
"train_loss": 0.7753000720575617,
"train_runtime": 7643.3678,
"train_samples_per_second": 69.209,
"train_steps_per_second": 2.884
}
],
"max_steps": 22044,
"num_train_epochs": 4,
"total_flos": 1.0366869961703424e+17,
"trial_name": null,
"trial_params": null
}