mlm / trainer_state.json
nomsgadded's picture
End of training
b40530e
{
"best_metric": 0.7278010101558682,
"best_model_checkpoint": ".\\output\\checkpoint-450",
"epoch": 3.0,
"eval_steps": 500,
"global_step": 450,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.07,
"learning_rate": 6e-06,
"loss": 1.8646,
"step": 10
},
{
"epoch": 0.13,
"learning_rate": 1.2666666666666667e-05,
"loss": 1.7999,
"step": 20
},
{
"epoch": 0.2,
"learning_rate": 1.9333333333333333e-05,
"loss": 1.6122,
"step": 30
},
{
"epoch": 0.27,
"learning_rate": 2.6000000000000002e-05,
"loss": 1.5558,
"step": 40
},
{
"epoch": 0.33,
"learning_rate": 2.9703703703703707e-05,
"loss": 1.4972,
"step": 50
},
{
"epoch": 0.4,
"learning_rate": 2.8962962962962965e-05,
"loss": 1.482,
"step": 60
},
{
"epoch": 0.47,
"learning_rate": 2.8222222222222223e-05,
"loss": 1.4642,
"step": 70
},
{
"epoch": 0.53,
"learning_rate": 2.7481481481481482e-05,
"loss": 1.4455,
"step": 80
},
{
"epoch": 0.6,
"learning_rate": 2.6740740740740743e-05,
"loss": 1.4228,
"step": 90
},
{
"epoch": 0.67,
"learning_rate": 2.6000000000000002e-05,
"loss": 1.4093,
"step": 100
},
{
"epoch": 0.73,
"learning_rate": 2.525925925925926e-05,
"loss": 1.3687,
"step": 110
},
{
"epoch": 0.8,
"learning_rate": 2.451851851851852e-05,
"loss": 1.4491,
"step": 120
},
{
"epoch": 0.87,
"learning_rate": 2.377777777777778e-05,
"loss": 1.4152,
"step": 130
},
{
"epoch": 0.93,
"learning_rate": 2.303703703703704e-05,
"loss": 1.4672,
"step": 140
},
{
"epoch": 1.0,
"learning_rate": 2.2296296296296297e-05,
"loss": 1.3758,
"step": 150
},
{
"epoch": 1.0,
"eval_accuracy": 0.7276595744680852,
"eval_loss": 1.2825729846954346,
"eval_runtime": 14.8174,
"eval_samples_per_second": 33.474,
"eval_steps_per_second": 4.184,
"step": 150
},
{
"epoch": 1.07,
"learning_rate": 2.155555555555556e-05,
"loss": 1.372,
"step": 160
},
{
"epoch": 1.13,
"learning_rate": 2.0814814814814817e-05,
"loss": 1.4247,
"step": 170
},
{
"epoch": 1.2,
"learning_rate": 2.0074074074074075e-05,
"loss": 1.3985,
"step": 180
},
{
"epoch": 1.27,
"learning_rate": 1.9333333333333333e-05,
"loss": 1.3752,
"step": 190
},
{
"epoch": 1.33,
"learning_rate": 1.8592592592592595e-05,
"loss": 1.4129,
"step": 200
},
{
"epoch": 1.4,
"learning_rate": 1.7851851851851853e-05,
"loss": 1.3847,
"step": 210
},
{
"epoch": 1.47,
"learning_rate": 1.7111111111111112e-05,
"loss": 1.3855,
"step": 220
},
{
"epoch": 1.53,
"learning_rate": 1.6370370370370374e-05,
"loss": 1.3916,
"step": 230
},
{
"epoch": 1.6,
"learning_rate": 1.5629629629629632e-05,
"loss": 1.4184,
"step": 240
},
{
"epoch": 1.67,
"learning_rate": 1.4888888888888888e-05,
"loss": 1.3827,
"step": 250
},
{
"epoch": 1.73,
"learning_rate": 1.4148148148148148e-05,
"loss": 1.3742,
"step": 260
},
{
"epoch": 1.8,
"learning_rate": 1.3407407407407408e-05,
"loss": 1.4016,
"step": 270
},
{
"epoch": 1.87,
"learning_rate": 1.2666666666666667e-05,
"loss": 1.3888,
"step": 280
},
{
"epoch": 1.93,
"learning_rate": 1.1925925925925927e-05,
"loss": 1.3748,
"step": 290
},
{
"epoch": 2.0,
"learning_rate": 1.1185185185185185e-05,
"loss": 1.3763,
"step": 300
},
{
"epoch": 2.0,
"eval_accuracy": 0.727228590694538,
"eval_loss": 1.2746729850769043,
"eval_runtime": 14.4425,
"eval_samples_per_second": 34.343,
"eval_steps_per_second": 4.293,
"step": 300
},
{
"epoch": 2.07,
"learning_rate": 1.0444444444444445e-05,
"loss": 1.3781,
"step": 310
},
{
"epoch": 2.13,
"learning_rate": 9.703703703703703e-06,
"loss": 1.4019,
"step": 320
},
{
"epoch": 2.2,
"learning_rate": 8.962962962962963e-06,
"loss": 1.3862,
"step": 330
},
{
"epoch": 2.27,
"learning_rate": 8.222222222222222e-06,
"loss": 1.3784,
"step": 340
},
{
"epoch": 2.33,
"learning_rate": 7.481481481481482e-06,
"loss": 1.3407,
"step": 350
},
{
"epoch": 2.4,
"learning_rate": 6.740740740740741e-06,
"loss": 1.3496,
"step": 360
},
{
"epoch": 2.47,
"learning_rate": 6e-06,
"loss": 1.3681,
"step": 370
},
{
"epoch": 2.53,
"learning_rate": 5.25925925925926e-06,
"loss": 1.3453,
"step": 380
},
{
"epoch": 2.6,
"learning_rate": 4.518518518518519e-06,
"loss": 1.3294,
"step": 390
},
{
"epoch": 2.67,
"learning_rate": 3.7777777777777777e-06,
"loss": 1.3361,
"step": 400
},
{
"epoch": 2.73,
"learning_rate": 3.0370370370370372e-06,
"loss": 1.3615,
"step": 410
},
{
"epoch": 2.8,
"learning_rate": 2.2962962962962964e-06,
"loss": 1.3104,
"step": 420
},
{
"epoch": 2.87,
"learning_rate": 1.5555555555555556e-06,
"loss": 1.3395,
"step": 430
},
{
"epoch": 2.93,
"learning_rate": 8.148148148148149e-07,
"loss": 1.3499,
"step": 440
},
{
"epoch": 3.0,
"learning_rate": 7.407407407407407e-08,
"loss": 1.3558,
"step": 450
},
{
"epoch": 3.0,
"eval_accuracy": 0.7278010101558682,
"eval_loss": 1.260701298713684,
"eval_runtime": 14.198,
"eval_samples_per_second": 34.935,
"eval_steps_per_second": 4.367,
"step": 450
},
{
"epoch": 3.0,
"step": 450,
"total_flos": 3789443078682624.0,
"train_loss": 1.4182749218410915,
"train_runtime": 1171.1026,
"train_samples_per_second": 12.291,
"train_steps_per_second": 0.384
}
],
"logging_steps": 10,
"max_steps": 450,
"num_train_epochs": 3,
"save_steps": 100.0,
"total_flos": 3789443078682624.0,
"trial_name": null,
"trial_params": null
}