llama2-testv9 / checkpoint-140 /trainer_state.json
Flyfer's picture
Upload folder using huggingface_hub
859ad93
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 19.38888888888889,
"eval_steps": 500,
"global_step": 140,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.17,
"learning_rate": 8.333333333333334e-06,
"loss": 1.7271,
"step": 3
},
{
"epoch": 0.33,
"learning_rate": 1.6666666666666667e-05,
"loss": 1.9643,
"step": 6
},
{
"epoch": 1.11,
"learning_rate": 2.5e-05,
"loss": 1.872,
"step": 9
},
{
"epoch": 1.28,
"learning_rate": 3.3333333333333335e-05,
"loss": 1.6191,
"step": 12
},
{
"epoch": 2.06,
"learning_rate": 4.166666666666667e-05,
"loss": 1.8705,
"step": 15
},
{
"epoch": 2.22,
"learning_rate": 5e-05,
"loss": 1.5021,
"step": 18
},
{
"epoch": 2.39,
"learning_rate": 5.833333333333334e-05,
"loss": 1.6868,
"step": 21
},
{
"epoch": 3.17,
"learning_rate": 6.666666666666667e-05,
"loss": 1.5721,
"step": 24
},
{
"epoch": 3.33,
"learning_rate": 7.500000000000001e-05,
"loss": 1.3664,
"step": 27
},
{
"epoch": 4.11,
"learning_rate": 8.333333333333334e-05,
"loss": 1.5311,
"step": 30
},
{
"epoch": 4.28,
"learning_rate": 9.166666666666667e-05,
"loss": 1.3607,
"step": 33
},
{
"epoch": 5.06,
"learning_rate": 0.0001,
"loss": 1.0423,
"step": 36
},
{
"epoch": 5.22,
"learning_rate": 9.907407407407407e-05,
"loss": 1.2364,
"step": 39
},
{
"epoch": 5.39,
"learning_rate": 9.814814814814815e-05,
"loss": 1.1658,
"step": 42
},
{
"epoch": 6.17,
"learning_rate": 9.722222222222223e-05,
"loss": 1.056,
"step": 45
},
{
"epoch": 6.33,
"learning_rate": 9.62962962962963e-05,
"loss": 1.0494,
"step": 48
},
{
"epoch": 7.11,
"learning_rate": 9.537037037037038e-05,
"loss": 0.8505,
"step": 51
},
{
"epoch": 7.28,
"learning_rate": 9.444444444444444e-05,
"loss": 0.8941,
"step": 54
},
{
"epoch": 8.06,
"learning_rate": 9.351851851851852e-05,
"loss": 0.7708,
"step": 57
},
{
"epoch": 8.22,
"learning_rate": 9.25925925925926e-05,
"loss": 0.5983,
"step": 60
},
{
"epoch": 8.39,
"learning_rate": 9.166666666666667e-05,
"loss": 0.5789,
"step": 63
},
{
"epoch": 9.17,
"learning_rate": 9.074074074074075e-05,
"loss": 0.4382,
"step": 66
},
{
"epoch": 9.33,
"learning_rate": 8.981481481481481e-05,
"loss": 0.6309,
"step": 69
},
{
"epoch": 10.11,
"learning_rate": 8.888888888888889e-05,
"loss": 0.4637,
"step": 72
},
{
"epoch": 10.28,
"learning_rate": 8.796296296296297e-05,
"loss": 0.4983,
"step": 75
},
{
"epoch": 11.06,
"learning_rate": 8.703703703703704e-05,
"loss": 0.3079,
"step": 78
},
{
"epoch": 11.22,
"learning_rate": 8.611111111111112e-05,
"loss": 0.3754,
"step": 81
},
{
"epoch": 11.39,
"learning_rate": 8.518518518518518e-05,
"loss": 0.3138,
"step": 84
},
{
"epoch": 12.17,
"learning_rate": 8.425925925925926e-05,
"loss": 0.3076,
"step": 87
},
{
"epoch": 12.33,
"learning_rate": 8.333333333333334e-05,
"loss": 0.3103,
"step": 90
},
{
"epoch": 13.11,
"learning_rate": 8.240740740740741e-05,
"loss": 0.2391,
"step": 93
},
{
"epoch": 13.28,
"learning_rate": 8.148148148148148e-05,
"loss": 0.2365,
"step": 96
},
{
"epoch": 14.06,
"learning_rate": 8.055555555555556e-05,
"loss": 0.195,
"step": 99
},
{
"epoch": 14.22,
"learning_rate": 7.962962962962964e-05,
"loss": 0.186,
"step": 102
},
{
"epoch": 14.39,
"learning_rate": 7.870370370370372e-05,
"loss": 0.2039,
"step": 105
},
{
"epoch": 15.17,
"learning_rate": 7.777777777777778e-05,
"loss": 0.1912,
"step": 108
},
{
"epoch": 15.33,
"learning_rate": 7.685185185185185e-05,
"loss": 0.1421,
"step": 111
},
{
"epoch": 16.11,
"learning_rate": 7.592592592592593e-05,
"loss": 0.1349,
"step": 114
},
{
"epoch": 16.28,
"learning_rate": 7.500000000000001e-05,
"loss": 0.1201,
"step": 117
},
{
"epoch": 17.06,
"learning_rate": 7.407407407407407e-05,
"loss": 0.1364,
"step": 120
},
{
"epoch": 17.22,
"learning_rate": 7.314814814814815e-05,
"loss": 0.1249,
"step": 123
},
{
"epoch": 17.39,
"learning_rate": 7.222222222222222e-05,
"loss": 0.101,
"step": 126
},
{
"epoch": 18.17,
"learning_rate": 7.12962962962963e-05,
"loss": 0.0961,
"step": 129
},
{
"epoch": 18.33,
"learning_rate": 7.037037037037038e-05,
"loss": 0.1026,
"step": 132
},
{
"epoch": 19.11,
"learning_rate": 6.944444444444444e-05,
"loss": 0.0982,
"step": 135
},
{
"epoch": 19.28,
"learning_rate": 6.851851851851852e-05,
"loss": 0.0791,
"step": 138
}
],
"logging_steps": 3,
"max_steps": 360,
"num_train_epochs": 20,
"save_steps": 500,
"total_flos": 1.13811835060224e+16,
"trial_name": null,
"trial_params": null
}