johaanm's picture
Upload folder using huggingface_hub
ec31dca
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 7.461538461538462,
"global_step": 48,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.15,
"learning_rate": 5.454545454545454e-05,
"loss": 0.8622,
"step": 2
},
{
"epoch": 0.31,
"learning_rate": 0.00010909090909090908,
"loss": 0.8607,
"step": 4
},
{
"epoch": 0.46,
"learning_rate": 0.0001636363636363636,
"loss": 0.8091,
"step": 6
},
{
"epoch": 1.15,
"learning_rate": 0.00021818181818181816,
"loss": 0.738,
"step": 8
},
{
"epoch": 1.31,
"learning_rate": 0.0002727272727272727,
"loss": 0.6651,
"step": 10
},
{
"epoch": 1.46,
"learning_rate": 0.0002967741935483871,
"loss": 0.5799,
"step": 12
},
{
"epoch": 2.15,
"learning_rate": 0.00029032258064516127,
"loss": 0.4795,
"step": 14
},
{
"epoch": 2.31,
"learning_rate": 0.00028387096774193545,
"loss": 0.4095,
"step": 16
},
{
"epoch": 2.46,
"learning_rate": 0.0002774193548387096,
"loss": 0.2854,
"step": 18
},
{
"epoch": 3.15,
"learning_rate": 0.00027096774193548386,
"loss": 0.2146,
"step": 20
},
{
"epoch": 3.31,
"learning_rate": 0.00026451612903225804,
"loss": 0.176,
"step": 22
},
{
"epoch": 3.46,
"learning_rate": 0.0002580645161290322,
"loss": 0.1498,
"step": 24
},
{
"epoch": 4.15,
"learning_rate": 0.00025161290322580645,
"loss": 0.1382,
"step": 26
},
{
"epoch": 4.31,
"learning_rate": 0.0002451612903225806,
"loss": 0.1273,
"step": 28
},
{
"epoch": 4.46,
"learning_rate": 0.0002387096774193548,
"loss": 0.1222,
"step": 30
},
{
"epoch": 5.15,
"learning_rate": 0.000232258064516129,
"loss": 0.1074,
"step": 32
},
{
"epoch": 5.31,
"learning_rate": 0.00022580645161290321,
"loss": 0.1078,
"step": 34
},
{
"epoch": 5.46,
"learning_rate": 0.0002193548387096774,
"loss": 0.1087,
"step": 36
},
{
"epoch": 6.15,
"learning_rate": 0.0002129032258064516,
"loss": 0.0975,
"step": 38
},
{
"epoch": 6.31,
"learning_rate": 0.0002064516129032258,
"loss": 0.0958,
"step": 40
},
{
"epoch": 6.46,
"learning_rate": 0.00019999999999999998,
"loss": 0.0843,
"step": 42
},
{
"epoch": 7.15,
"learning_rate": 0.00019354838709677416,
"loss": 0.0783,
"step": 44
},
{
"epoch": 7.31,
"learning_rate": 0.00018709677419354837,
"loss": 0.0781,
"step": 46
},
{
"epoch": 7.46,
"learning_rate": 0.00018064516129032257,
"loss": 0.079,
"step": 48
}
],
"max_steps": 104,
"num_train_epochs": 8,
"total_flos": 3984531891683328.0,
"trial_name": null,
"trial_params": null
}