LLM-LoRA / last-checkpoint /trainer_state.json
cools's picture
Training in progress, step 100, checkpoint
87259cc
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.02302025782688766,
"eval_steps": 1000,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 4e-05,
"loss": 3.6137,
"step": 1
},
{
"epoch": 0.0,
"learning_rate": 8e-05,
"loss": 3.4233,
"step": 2
},
{
"epoch": 0.0,
"learning_rate": 0.00012,
"loss": 3.3927,
"step": 3
},
{
"epoch": 0.0,
"learning_rate": 0.00016,
"loss": 2.8163,
"step": 4
},
{
"epoch": 0.0,
"learning_rate": 0.0002,
"loss": 2.4858,
"step": 5
},
{
"epoch": 0.0,
"learning_rate": 0.00019789473684210526,
"loss": 1.9139,
"step": 6
},
{
"epoch": 0.0,
"learning_rate": 0.00019578947368421054,
"loss": 2.0399,
"step": 7
},
{
"epoch": 0.0,
"learning_rate": 0.0001936842105263158,
"loss": 1.9175,
"step": 8
},
{
"epoch": 0.0,
"learning_rate": 0.00019157894736842104,
"loss": 2.2095,
"step": 9
},
{
"epoch": 0.0,
"learning_rate": 0.00018947368421052632,
"loss": 1.9044,
"step": 10
},
{
"epoch": 0.0,
"learning_rate": 0.0001873684210526316,
"loss": 2.013,
"step": 11
},
{
"epoch": 0.0,
"learning_rate": 0.00018526315789473685,
"loss": 2.1313,
"step": 12
},
{
"epoch": 0.0,
"learning_rate": 0.0001831578947368421,
"loss": 1.9865,
"step": 13
},
{
"epoch": 0.0,
"learning_rate": 0.00018105263157894739,
"loss": 1.7398,
"step": 14
},
{
"epoch": 0.0,
"learning_rate": 0.00017894736842105264,
"loss": 1.693,
"step": 15
},
{
"epoch": 0.0,
"learning_rate": 0.0001768421052631579,
"loss": 1.6588,
"step": 16
},
{
"epoch": 0.0,
"learning_rate": 0.00017473684210526317,
"loss": 1.989,
"step": 17
},
{
"epoch": 0.0,
"learning_rate": 0.00017263157894736842,
"loss": 1.9944,
"step": 18
},
{
"epoch": 0.0,
"learning_rate": 0.0001705263157894737,
"loss": 1.7393,
"step": 19
},
{
"epoch": 0.0,
"learning_rate": 0.00016842105263157895,
"loss": 1.7573,
"step": 20
},
{
"epoch": 0.0,
"learning_rate": 0.00016631578947368423,
"loss": 1.546,
"step": 21
},
{
"epoch": 0.01,
"learning_rate": 0.00016421052631578948,
"loss": 1.6286,
"step": 22
},
{
"epoch": 0.01,
"learning_rate": 0.00016210526315789473,
"loss": 1.61,
"step": 23
},
{
"epoch": 0.01,
"learning_rate": 0.00016,
"loss": 1.7942,
"step": 24
},
{
"epoch": 0.01,
"learning_rate": 0.00015789473684210527,
"loss": 1.4316,
"step": 25
},
{
"epoch": 0.01,
"learning_rate": 0.00015578947368421052,
"loss": 1.5606,
"step": 26
},
{
"epoch": 0.01,
"learning_rate": 0.0001536842105263158,
"loss": 1.7435,
"step": 27
},
{
"epoch": 0.01,
"learning_rate": 0.00015157894736842108,
"loss": 1.2904,
"step": 28
},
{
"epoch": 0.01,
"learning_rate": 0.00014947368421052633,
"loss": 1.7799,
"step": 29
},
{
"epoch": 0.01,
"learning_rate": 0.00014736842105263158,
"loss": 1.3407,
"step": 30
},
{
"epoch": 0.01,
"learning_rate": 0.00014526315789473686,
"loss": 1.7537,
"step": 31
},
{
"epoch": 0.01,
"learning_rate": 0.0001431578947368421,
"loss": 1.6549,
"step": 32
},
{
"epoch": 0.01,
"learning_rate": 0.00014105263157894736,
"loss": 1.8332,
"step": 33
},
{
"epoch": 0.01,
"learning_rate": 0.00013894736842105264,
"loss": 1.4519,
"step": 34
},
{
"epoch": 0.01,
"learning_rate": 0.0001368421052631579,
"loss": 1.7497,
"step": 35
},
{
"epoch": 0.01,
"learning_rate": 0.00013473684210526317,
"loss": 1.8566,
"step": 36
},
{
"epoch": 0.01,
"learning_rate": 0.00013263157894736842,
"loss": 1.5518,
"step": 37
},
{
"epoch": 0.01,
"learning_rate": 0.0001305263157894737,
"loss": 1.2759,
"step": 38
},
{
"epoch": 0.01,
"learning_rate": 0.00012842105263157895,
"loss": 1.5634,
"step": 39
},
{
"epoch": 0.01,
"learning_rate": 0.0001263157894736842,
"loss": 1.3718,
"step": 40
},
{
"epoch": 0.01,
"learning_rate": 0.00012421052631578949,
"loss": 1.2056,
"step": 41
},
{
"epoch": 0.01,
"learning_rate": 0.00012210526315789474,
"loss": 1.3365,
"step": 42
},
{
"epoch": 0.01,
"learning_rate": 0.00012,
"loss": 1.5912,
"step": 43
},
{
"epoch": 0.01,
"learning_rate": 0.00011789473684210525,
"loss": 1.5067,
"step": 44
},
{
"epoch": 0.01,
"learning_rate": 0.00011578947368421053,
"loss": 1.8345,
"step": 45
},
{
"epoch": 0.01,
"learning_rate": 0.0001136842105263158,
"loss": 1.5727,
"step": 46
},
{
"epoch": 0.01,
"learning_rate": 0.00011157894736842105,
"loss": 1.5058,
"step": 47
},
{
"epoch": 0.01,
"learning_rate": 0.00010947368421052633,
"loss": 1.5746,
"step": 48
},
{
"epoch": 0.01,
"learning_rate": 0.00010736842105263158,
"loss": 1.4634,
"step": 49
},
{
"epoch": 0.01,
"learning_rate": 0.00010526315789473685,
"loss": 1.6622,
"step": 50
},
{
"epoch": 0.01,
"learning_rate": 0.00010315789473684211,
"loss": 1.7978,
"step": 51
},
{
"epoch": 0.01,
"learning_rate": 0.00010105263157894738,
"loss": 1.6079,
"step": 52
},
{
"epoch": 0.01,
"learning_rate": 9.894736842105263e-05,
"loss": 1.3305,
"step": 53
},
{
"epoch": 0.01,
"learning_rate": 9.68421052631579e-05,
"loss": 1.0298,
"step": 54
},
{
"epoch": 0.01,
"learning_rate": 9.473684210526316e-05,
"loss": 1.5233,
"step": 55
},
{
"epoch": 0.01,
"learning_rate": 9.263157894736843e-05,
"loss": 1.8328,
"step": 56
},
{
"epoch": 0.01,
"learning_rate": 9.052631578947369e-05,
"loss": 0.8634,
"step": 57
},
{
"epoch": 0.01,
"learning_rate": 8.842105263157894e-05,
"loss": 1.5285,
"step": 58
},
{
"epoch": 0.01,
"learning_rate": 8.631578947368421e-05,
"loss": 1.3395,
"step": 59
},
{
"epoch": 0.01,
"learning_rate": 8.421052631578948e-05,
"loss": 1.1776,
"step": 60
},
{
"epoch": 0.01,
"learning_rate": 8.210526315789474e-05,
"loss": 1.3302,
"step": 61
},
{
"epoch": 0.01,
"learning_rate": 8e-05,
"loss": 1.2749,
"step": 62
},
{
"epoch": 0.01,
"learning_rate": 7.789473684210526e-05,
"loss": 1.4488,
"step": 63
},
{
"epoch": 0.01,
"learning_rate": 7.578947368421054e-05,
"loss": 1.7981,
"step": 64
},
{
"epoch": 0.01,
"learning_rate": 7.368421052631579e-05,
"loss": 1.2097,
"step": 65
},
{
"epoch": 0.02,
"learning_rate": 7.157894736842105e-05,
"loss": 1.5589,
"step": 66
},
{
"epoch": 0.02,
"learning_rate": 6.947368421052632e-05,
"loss": 1.5017,
"step": 67
},
{
"epoch": 0.02,
"learning_rate": 6.736842105263159e-05,
"loss": 1.5159,
"step": 68
},
{
"epoch": 0.02,
"learning_rate": 6.526315789473685e-05,
"loss": 1.4459,
"step": 69
},
{
"epoch": 0.02,
"learning_rate": 6.31578947368421e-05,
"loss": 1.5433,
"step": 70
},
{
"epoch": 0.02,
"learning_rate": 6.105263157894737e-05,
"loss": 1.8857,
"step": 71
},
{
"epoch": 0.02,
"learning_rate": 5.894736842105263e-05,
"loss": 1.6394,
"step": 72
},
{
"epoch": 0.02,
"learning_rate": 5.68421052631579e-05,
"loss": 1.3062,
"step": 73
},
{
"epoch": 0.02,
"learning_rate": 5.4736842105263165e-05,
"loss": 1.5015,
"step": 74
},
{
"epoch": 0.02,
"learning_rate": 5.2631578947368424e-05,
"loss": 1.647,
"step": 75
},
{
"epoch": 0.02,
"learning_rate": 5.052631578947369e-05,
"loss": 1.6026,
"step": 76
},
{
"epoch": 0.02,
"learning_rate": 4.842105263157895e-05,
"loss": 1.4526,
"step": 77
},
{
"epoch": 0.02,
"learning_rate": 4.6315789473684214e-05,
"loss": 1.5979,
"step": 78
},
{
"epoch": 0.02,
"learning_rate": 4.421052631578947e-05,
"loss": 1.4417,
"step": 79
},
{
"epoch": 0.02,
"learning_rate": 4.210526315789474e-05,
"loss": 1.619,
"step": 80
},
{
"epoch": 0.02,
"learning_rate": 4e-05,
"loss": 1.1932,
"step": 81
},
{
"epoch": 0.02,
"learning_rate": 3.789473684210527e-05,
"loss": 1.563,
"step": 82
},
{
"epoch": 0.02,
"learning_rate": 3.578947368421053e-05,
"loss": 1.4538,
"step": 83
},
{
"epoch": 0.02,
"learning_rate": 3.368421052631579e-05,
"loss": 1.226,
"step": 84
},
{
"epoch": 0.02,
"learning_rate": 3.157894736842105e-05,
"loss": 1.3118,
"step": 85
},
{
"epoch": 0.02,
"learning_rate": 2.9473684210526314e-05,
"loss": 1.4562,
"step": 86
},
{
"epoch": 0.02,
"learning_rate": 2.7368421052631583e-05,
"loss": 1.2999,
"step": 87
},
{
"epoch": 0.02,
"learning_rate": 2.5263157894736845e-05,
"loss": 1.6079,
"step": 88
},
{
"epoch": 0.02,
"learning_rate": 2.3157894736842107e-05,
"loss": 1.3978,
"step": 89
},
{
"epoch": 0.02,
"learning_rate": 2.105263157894737e-05,
"loss": 1.334,
"step": 90
},
{
"epoch": 0.02,
"learning_rate": 1.8947368421052634e-05,
"loss": 1.8286,
"step": 91
},
{
"epoch": 0.02,
"learning_rate": 1.6842105263157896e-05,
"loss": 1.2721,
"step": 92
},
{
"epoch": 0.02,
"learning_rate": 1.4736842105263157e-05,
"loss": 1.6883,
"step": 93
},
{
"epoch": 0.02,
"learning_rate": 1.2631578947368422e-05,
"loss": 1.9406,
"step": 94
},
{
"epoch": 0.02,
"learning_rate": 1.0526315789473684e-05,
"loss": 1.4372,
"step": 95
},
{
"epoch": 0.02,
"learning_rate": 8.421052631578948e-06,
"loss": 1.4428,
"step": 96
},
{
"epoch": 0.02,
"learning_rate": 6.315789473684211e-06,
"loss": 1.696,
"step": 97
},
{
"epoch": 0.02,
"learning_rate": 4.210526315789474e-06,
"loss": 1.1901,
"step": 98
},
{
"epoch": 0.02,
"learning_rate": 2.105263157894737e-06,
"loss": 1.574,
"step": 99
},
{
"epoch": 0.02,
"learning_rate": 0.0,
"loss": 1.4564,
"step": 100
}
],
"logging_steps": 1,
"max_steps": 100,
"num_train_epochs": 1,
"save_steps": 25,
"total_flos": 2031145737289728.0,
"trial_name": null,
"trial_params": null
}