roberta-base-trained-43 / trainer_state.json
Rustem's picture
Upload trainer_state.json
d849a52
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 5.0,
"global_step": 25000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.07,
"learning_rate": 2.9550022498875057e-05,
"loss": 1.4723,
"step": 500
},
{
"epoch": 0.15,
"learning_rate": 2.9100044997750113e-05,
"loss": 1.1147,
"step": 1000
},
{
"epoch": 0.22,
"learning_rate": 2.865006749662517e-05,
"loss": 1.0838,
"step": 1500
},
{
"epoch": 0.3,
"learning_rate": 2.8200089995500226e-05,
"loss": 1.0829,
"step": 2000
},
{
"epoch": 0.37,
"learning_rate": 2.775011249437528e-05,
"loss": 1.0867,
"step": 2500
},
{
"epoch": 0.45,
"learning_rate": 2.730013499325034e-05,
"loss": 1.0653,
"step": 3000
},
{
"epoch": 0.52,
"learning_rate": 2.6850157492125395e-05,
"loss": 1.0727,
"step": 3500
},
{
"epoch": 0.6,
"learning_rate": 2.640017999100045e-05,
"loss": 1.0808,
"step": 4000
},
{
"epoch": 0.67,
"learning_rate": 2.5950202489875508e-05,
"loss": 1.0751,
"step": 4500
},
{
"epoch": 0.75,
"learning_rate": 2.5500224988750564e-05,
"loss": 1.0788,
"step": 5000
},
{
"epoch": 0.82,
"learning_rate": 2.505024748762562e-05,
"loss": 1.0742,
"step": 5500
},
{
"epoch": 0.9,
"learning_rate": 2.4600269986500677e-05,
"loss": 1.0765,
"step": 6000
},
{
"epoch": 0.97,
"learning_rate": 2.415029248537573e-05,
"loss": 1.0781,
"step": 6500
},
{
"epoch": 1.4,
"learning_rate": 2.16e-05,
"loss": 1.0563,
"step": 7000
},
{
"epoch": 1.5,
"learning_rate": 2.1e-05,
"loss": 1.0432,
"step": 7500
},
{
"epoch": 1.6,
"learning_rate": 2.04e-05,
"loss": 1.0363,
"step": 8000
},
{
"epoch": 1.7,
"learning_rate": 1.98e-05,
"loss": 1.044,
"step": 8500
},
{
"epoch": 1.8,
"learning_rate": 1.9200000000000003e-05,
"loss": 1.0317,
"step": 9000
},
{
"epoch": 1.9,
"learning_rate": 1.86e-05,
"loss": 1.0381,
"step": 9500
},
{
"epoch": 2.0,
"learning_rate": 1.8e-05,
"loss": 1.0314,
"step": 10000
},
{
"epoch": 2.0,
"eval_loss": 0.9475362300872803,
"eval_runtime": 248.7207,
"eval_samples_per_second": 40.206,
"eval_steps_per_second": 5.026,
"step": 10000
},
{
"epoch": 2.1,
"learning_rate": 1.74e-05,
"loss": 1.0283,
"step": 10500
},
{
"epoch": 2.2,
"learning_rate": 1.6800000000000002e-05,
"loss": 1.0251,
"step": 11000
},
{
"epoch": 2.3,
"learning_rate": 1.62e-05,
"loss": 1.0191,
"step": 11500
},
{
"epoch": 2.4,
"learning_rate": 1.56e-05,
"loss": 1.0175,
"step": 12000
},
{
"epoch": 2.5,
"learning_rate": 1.5e-05,
"loss": 1.0132,
"step": 12500
},
{
"epoch": 2.6,
"learning_rate": 1.44e-05,
"loss": 1.0127,
"step": 13000
},
{
"epoch": 2.7,
"learning_rate": 1.3800000000000002e-05,
"loss": 0.9994,
"step": 13500
},
{
"epoch": 2.8,
"learning_rate": 1.32e-05,
"loss": 1.0096,
"step": 14000
},
{
"epoch": 2.9,
"learning_rate": 1.26e-05,
"loss": 1.0119,
"step": 14500
},
{
"epoch": 3.0,
"learning_rate": 1.2e-05,
"loss": 1.0087,
"step": 15000
},
{
"epoch": 3.1,
"learning_rate": 1.1400000000000001e-05,
"loss": 1.0075,
"step": 15500
},
{
"epoch": 3.2,
"learning_rate": 1.08e-05,
"loss": 1.0002,
"step": 16000
},
{
"epoch": 3.3,
"learning_rate": 1.02e-05,
"loss": 1.001,
"step": 16500
},
{
"epoch": 3.4,
"learning_rate": 9.600000000000001e-06,
"loss": 0.9938,
"step": 17000
},
{
"epoch": 3.5,
"learning_rate": 9e-06,
"loss": 0.9887,
"step": 17500
},
{
"epoch": 3.6,
"learning_rate": 8.400000000000001e-06,
"loss": 0.9931,
"step": 18000
},
{
"epoch": 3.7,
"learning_rate": 7.8e-06,
"loss": 0.9879,
"step": 18500
},
{
"epoch": 3.8,
"learning_rate": 7.2e-06,
"loss": 0.9825,
"step": 19000
},
{
"epoch": 3.9,
"learning_rate": 6.6e-06,
"loss": 0.9939,
"step": 19500
},
{
"epoch": 4.0,
"learning_rate": 6e-06,
"loss": 0.9785,
"step": 20000
},
{
"epoch": 4.0,
"eval_loss": 0.9039560556411743,
"eval_runtime": 248.8855,
"eval_samples_per_second": 40.179,
"eval_steps_per_second": 5.022,
"step": 20000
},
{
"epoch": 4.1,
"learning_rate": 5.4e-06,
"loss": 0.9823,
"step": 20500
},
{
"epoch": 4.2,
"learning_rate": 4.800000000000001e-06,
"loss": 0.9772,
"step": 21000
},
{
"epoch": 4.3,
"learning_rate": 4.2000000000000004e-06,
"loss": 0.9766,
"step": 21500
},
{
"epoch": 4.4,
"learning_rate": 3.6e-06,
"loss": 0.9655,
"step": 22000
},
{
"epoch": 4.5,
"learning_rate": 3e-06,
"loss": 0.9723,
"step": 22500
},
{
"epoch": 4.6,
"learning_rate": 2.4000000000000003e-06,
"loss": 0.9602,
"step": 23000
},
{
"epoch": 4.7,
"learning_rate": 1.8e-06,
"loss": 0.9763,
"step": 23500
},
{
"epoch": 4.8,
"learning_rate": 1.2000000000000002e-06,
"loss": 0.9717,
"step": 24000
},
{
"epoch": 4.9,
"learning_rate": 6.000000000000001e-07,
"loss": 0.964,
"step": 24500
},
{
"epoch": 5.0,
"learning_rate": 0.0,
"loss": 0.9579,
"step": 25000
}
],
"max_steps": 25000,
"num_train_epochs": 5,
"total_flos": 4.9230641636352e+16,
"trial_name": null,
"trial_params": null
}