llama-160m-boolq / trainer_state.json
Cheng98's picture
End of training
32cd7a0
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.959322033898305,
"global_step": 292,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.99,
"eval_accuracy": 0.5730886850152905,
"eval_loss": 0.6870243549346924,
"eval_runtime": 9.2341,
"eval_samples_per_second": 354.122,
"eval_steps_per_second": 11.154,
"step": 73
},
{
"epoch": 1.99,
"eval_accuracy": 0.5957186544342508,
"eval_loss": 0.6825068593025208,
"eval_runtime": 9.2756,
"eval_samples_per_second": 352.537,
"eval_steps_per_second": 11.104,
"step": 147
},
{
"epoch": 3.0,
"eval_accuracy": 0.6012232415902141,
"eval_loss": 0.6809478402137756,
"eval_runtime": 9.287,
"eval_samples_per_second": 352.104,
"eval_steps_per_second": 11.091,
"step": 221
},
{
"epoch": 3.96,
"eval_accuracy": 0.5957186544342508,
"eval_loss": 0.679547131061554,
"eval_runtime": 9.2851,
"eval_samples_per_second": 352.177,
"eval_steps_per_second": 11.093,
"step": 292
},
{
"epoch": 3.96,
"step": 292,
"total_flos": 1.2991635112329216e+16,
"train_loss": 0.6814518758695419,
"train_runtime": 329.9402,
"train_samples_per_second": 114.287,
"train_steps_per_second": 0.885
}
],
"max_steps": 292,
"num_train_epochs": 4,
"total_flos": 1.2991635112329216e+16,
"trial_name": null,
"trial_params": null
}