xlm-roberta-base / trainer_state.json
diffuserconfuser's picture
Upload folder using huggingface_hub
ea33156
raw
history blame
2.09 kB
{
"best_metric": 0.9376567006111145,
"best_model_checkpoint": "finetune_xlm-roberta-base/checkpoint-5476",
"epoch": 2.0,
"eval_steps": 500,
"global_step": 5476,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.18,
"learning_rate": 1.8789870952033116e-05,
"loss": 2.1799,
"step": 500
},
{
"epoch": 0.37,
"learning_rate": 1.7572437302167033e-05,
"loss": 1.2786,
"step": 1000
},
{
"epoch": 0.55,
"learning_rate": 1.635500365230095e-05,
"loss": 1.1682,
"step": 1500
},
{
"epoch": 0.73,
"learning_rate": 1.513757000243487e-05,
"loss": 1.1019,
"step": 2000
},
{
"epoch": 0.91,
"learning_rate": 1.3920136352568787e-05,
"loss": 1.0745,
"step": 2500
},
{
"epoch": 1.0,
"eval_loss": 0.9917843341827393,
"eval_runtime": 32.4148,
"eval_samples_per_second": 326.086,
"eval_steps_per_second": 2.561,
"step": 2738
},
{
"epoch": 1.1,
"learning_rate": 1.2702702702702702e-05,
"loss": 0.9744,
"step": 3000
},
{
"epoch": 1.28,
"learning_rate": 1.1487703920136353e-05,
"loss": 0.9039,
"step": 3500
},
{
"epoch": 1.46,
"learning_rate": 1.027027027027027e-05,
"loss": 0.8688,
"step": 4000
},
{
"epoch": 1.64,
"learning_rate": 9.052836620404189e-06,
"loss": 0.8802,
"step": 4500
},
{
"epoch": 1.83,
"learning_rate": 7.835402970538106e-06,
"loss": 0.8816,
"step": 5000
},
{
"epoch": 2.0,
"eval_loss": 0.9376567006111145,
"eval_runtime": 32.177,
"eval_samples_per_second": 328.495,
"eval_steps_per_second": 2.579,
"step": 5476
}
],
"logging_steps": 500,
"max_steps": 8214,
"num_train_epochs": 3,
"save_steps": 500,
"total_flos": 3.4334001889975296e+16,
"trial_name": null,
"trial_params": null
}