johaanm's picture
Upload folder using huggingface_hub
e3f3ffa
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 7.428571428571429,
"eval_steps": 500,
"global_step": 24,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.14,
"learning_rate": 4.9999999999999996e-05,
"loss": 1.0283,
"step": 1
},
{
"epoch": 0.29,
"learning_rate": 9.999999999999999e-05,
"loss": 1.0447,
"step": 2
},
{
"epoch": 0.43,
"learning_rate": 0.00015,
"loss": 1.0075,
"step": 3
},
{
"epoch": 1.14,
"learning_rate": 0.00019999999999999998,
"loss": 0.9604,
"step": 4
},
{
"epoch": 1.29,
"learning_rate": 0.00025,
"loss": 0.8964,
"step": 5
},
{
"epoch": 1.43,
"learning_rate": 0.0003,
"loss": 0.8099,
"step": 6
},
{
"epoch": 2.14,
"learning_rate": 0.000294,
"loss": 0.7356,
"step": 7
},
{
"epoch": 2.29,
"learning_rate": 0.00028799999999999995,
"loss": 0.6955,
"step": 8
},
{
"epoch": 2.43,
"learning_rate": 0.00028199999999999997,
"loss": 0.6318,
"step": 9
},
{
"epoch": 3.14,
"learning_rate": 0.000276,
"loss": 0.594,
"step": 10
},
{
"epoch": 3.29,
"learning_rate": 0.00027,
"loss": 0.5653,
"step": 11
},
{
"epoch": 3.43,
"learning_rate": 0.00026399999999999997,
"loss": 0.5067,
"step": 12
},
{
"epoch": 4.14,
"learning_rate": 0.000258,
"loss": 0.4433,
"step": 13
},
{
"epoch": 4.29,
"learning_rate": 0.00025199999999999995,
"loss": 0.3947,
"step": 14
},
{
"epoch": 4.43,
"learning_rate": 0.00024599999999999996,
"loss": 0.3413,
"step": 15
},
{
"epoch": 5.14,
"learning_rate": 0.00023999999999999998,
"loss": 0.2837,
"step": 16
},
{
"epoch": 5.29,
"learning_rate": 0.000234,
"loss": 0.2606,
"step": 17
},
{
"epoch": 5.43,
"learning_rate": 0.00022799999999999999,
"loss": 0.2437,
"step": 18
},
{
"epoch": 6.14,
"learning_rate": 0.00022199999999999998,
"loss": 0.2033,
"step": 19
},
{
"epoch": 6.29,
"learning_rate": 0.00021599999999999996,
"loss": 0.187,
"step": 20
},
{
"epoch": 6.43,
"learning_rate": 0.00020999999999999998,
"loss": 0.162,
"step": 21
},
{
"epoch": 7.14,
"learning_rate": 0.000204,
"loss": 0.1494,
"step": 22
},
{
"epoch": 7.29,
"learning_rate": 0.000198,
"loss": 0.1454,
"step": 23
},
{
"epoch": 7.43,
"learning_rate": 0.00019199999999999998,
"loss": 0.1422,
"step": 24
}
],
"logging_steps": 1,
"max_steps": 56,
"num_train_epochs": 8,
"save_steps": 500,
"total_flos": 3984531891683328.0,
"trial_name": null,
"trial_params": null
}