llama2-testv12 / checkpoint-60 /trainer_state.json
Flyfer's picture
Upload folder using huggingface_hub
9bd0096
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 19.428571428571427,
"eval_steps": 500,
"global_step": 60,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.14,
"learning_rate": 7.142857142857142e-05,
"loss": 1.7815,
"step": 1
},
{
"epoch": 0.29,
"learning_rate": 0.00014285714285714284,
"loss": 1.8832,
"step": 2
},
{
"epoch": 0.43,
"learning_rate": 0.00021428571428571427,
"loss": 1.8016,
"step": 3
},
{
"epoch": 1.14,
"learning_rate": 0.0002857142857142857,
"loss": 1.875,
"step": 4
},
{
"epoch": 1.29,
"learning_rate": 0.00035714285714285714,
"loss": 1.3989,
"step": 5
},
{
"epoch": 1.43,
"learning_rate": 0.00042857142857142855,
"loss": 1.5972,
"step": 6
},
{
"epoch": 2.14,
"learning_rate": 0.0005,
"loss": 1.3955,
"step": 7
},
{
"epoch": 2.29,
"learning_rate": 0.0005714285714285714,
"loss": 1.2051,
"step": 8
},
{
"epoch": 2.43,
"learning_rate": 0.0006428571428571429,
"loss": 1.5163,
"step": 9
},
{
"epoch": 3.14,
"learning_rate": 0.0007142857142857143,
"loss": 1.2729,
"step": 10
},
{
"epoch": 3.29,
"learning_rate": 0.0007857142857142857,
"loss": 1.1587,
"step": 11
},
{
"epoch": 3.43,
"learning_rate": 0.0008571428571428571,
"loss": 1.0227,
"step": 12
},
{
"epoch": 4.14,
"learning_rate": 0.0009285714285714287,
"loss": 1.1174,
"step": 13
},
{
"epoch": 4.29,
"learning_rate": 0.001,
"loss": 0.8435,
"step": 14
},
{
"epoch": 4.43,
"learning_rate": 0.000992063492063492,
"loss": 0.6047,
"step": 15
},
{
"epoch": 5.14,
"learning_rate": 0.000984126984126984,
"loss": 0.5716,
"step": 16
},
{
"epoch": 5.29,
"learning_rate": 0.0009761904761904762,
"loss": 0.6303,
"step": 17
},
{
"epoch": 5.43,
"learning_rate": 0.0009682539682539683,
"loss": 0.5562,
"step": 18
},
{
"epoch": 6.14,
"learning_rate": 0.0009603174603174604,
"loss": 0.5375,
"step": 19
},
{
"epoch": 6.29,
"learning_rate": 0.0009523809523809524,
"loss": 0.3218,
"step": 20
},
{
"epoch": 6.43,
"learning_rate": 0.0009444444444444445,
"loss": 0.3787,
"step": 21
},
{
"epoch": 7.14,
"learning_rate": 0.0009365079365079366,
"loss": 0.3622,
"step": 22
},
{
"epoch": 7.29,
"learning_rate": 0.0009285714285714287,
"loss": 0.2871,
"step": 23
},
{
"epoch": 7.43,
"learning_rate": 0.0009206349206349207,
"loss": 0.2373,
"step": 24
},
{
"epoch": 8.14,
"learning_rate": 0.0009126984126984126,
"loss": 0.2338,
"step": 25
},
{
"epoch": 8.29,
"learning_rate": 0.0009047619047619047,
"loss": 0.1994,
"step": 26
},
{
"epoch": 8.43,
"learning_rate": 0.0008968253968253968,
"loss": 0.2073,
"step": 27
},
{
"epoch": 9.14,
"learning_rate": 0.0008888888888888888,
"loss": 0.153,
"step": 28
},
{
"epoch": 9.29,
"learning_rate": 0.0008809523809523809,
"loss": 0.1608,
"step": 29
},
{
"epoch": 9.43,
"learning_rate": 0.000873015873015873,
"loss": 0.1244,
"step": 30
},
{
"epoch": 10.14,
"learning_rate": 0.0008650793650793651,
"loss": 0.1177,
"step": 31
},
{
"epoch": 10.29,
"learning_rate": 0.0008571428571428571,
"loss": 0.0993,
"step": 32
},
{
"epoch": 10.43,
"learning_rate": 0.0008492063492063492,
"loss": 0.0728,
"step": 33
},
{
"epoch": 11.14,
"learning_rate": 0.0008412698412698413,
"loss": 0.0591,
"step": 34
},
{
"epoch": 11.29,
"learning_rate": 0.0008333333333333334,
"loss": 0.0735,
"step": 35
},
{
"epoch": 11.43,
"learning_rate": 0.0008253968253968254,
"loss": 0.0483,
"step": 36
},
{
"epoch": 12.14,
"learning_rate": 0.0008174603174603175,
"loss": 0.0449,
"step": 37
},
{
"epoch": 12.29,
"learning_rate": 0.0008095238095238096,
"loss": 0.0329,
"step": 38
},
{
"epoch": 12.43,
"learning_rate": 0.0008015873015873017,
"loss": 0.0424,
"step": 39
},
{
"epoch": 13.14,
"learning_rate": 0.0007936507936507937,
"loss": 0.0327,
"step": 40
},
{
"epoch": 13.29,
"learning_rate": 0.0007857142857142857,
"loss": 0.0364,
"step": 41
},
{
"epoch": 13.43,
"learning_rate": 0.0007777777777777778,
"loss": 0.0286,
"step": 42
},
{
"epoch": 14.14,
"learning_rate": 0.0007698412698412699,
"loss": 0.0264,
"step": 43
},
{
"epoch": 14.29,
"learning_rate": 0.0007619047619047619,
"loss": 0.0225,
"step": 44
},
{
"epoch": 14.43,
"learning_rate": 0.000753968253968254,
"loss": 0.0332,
"step": 45
},
{
"epoch": 15.14,
"learning_rate": 0.000746031746031746,
"loss": 0.0214,
"step": 46
},
{
"epoch": 15.29,
"learning_rate": 0.0007380952380952381,
"loss": 0.0218,
"step": 47
},
{
"epoch": 15.43,
"learning_rate": 0.0007301587301587301,
"loss": 0.0227,
"step": 48
},
{
"epoch": 16.14,
"learning_rate": 0.0007222222222222222,
"loss": 0.0134,
"step": 49
},
{
"epoch": 16.29,
"learning_rate": 0.0007142857142857143,
"loss": 0.0185,
"step": 50
},
{
"epoch": 16.43,
"learning_rate": 0.0007063492063492064,
"loss": 0.029,
"step": 51
},
{
"epoch": 17.14,
"learning_rate": 0.0006984126984126984,
"loss": 0.0144,
"step": 52
},
{
"epoch": 17.29,
"learning_rate": 0.0006904761904761905,
"loss": 0.0171,
"step": 53
},
{
"epoch": 17.43,
"learning_rate": 0.0006825396825396826,
"loss": 0.0162,
"step": 54
},
{
"epoch": 18.14,
"learning_rate": 0.0006746031746031747,
"loss": 0.0134,
"step": 55
},
{
"epoch": 18.29,
"learning_rate": 0.0006666666666666666,
"loss": 0.0116,
"step": 56
},
{
"epoch": 18.43,
"learning_rate": 0.0006587301587301587,
"loss": 0.014,
"step": 57
},
{
"epoch": 19.14,
"learning_rate": 0.0006507936507936508,
"loss": 0.0084,
"step": 58
},
{
"epoch": 19.29,
"learning_rate": 0.0006428571428571429,
"loss": 0.0138,
"step": 59
},
{
"epoch": 19.43,
"learning_rate": 0.0006349206349206349,
"loss": 0.0173,
"step": 60
}
],
"logging_steps": 1,
"max_steps": 140,
"num_train_epochs": 20,
"save_steps": 500,
"total_flos": 1.13811835060224e+16,
"trial_name": null,
"trial_params": null
}