gemmalorafull / checkpoint-200 /trainer_state.json
jgayed's picture
Upload folder using huggingface_hub
5c53087 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.3333333333333335,
"eval_steps": 500,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.08333333333333333,
"grad_norm": 41.758705139160156,
"learning_rate": 4.9986614686909146e-05,
"loss": 39.5171,
"num_input_tokens_seen": 47056,
"step": 5
},
{
"epoch": 0.16666666666666666,
"grad_norm": 35.72260665893555,
"learning_rate": 4.994647308096509e-05,
"loss": 3.2466,
"num_input_tokens_seen": 93544,
"step": 10
},
{
"epoch": 0.25,
"grad_norm": 40.3800163269043,
"learning_rate": 4.987961816680492e-05,
"loss": 3.3122,
"num_input_tokens_seen": 141120,
"step": 15
},
{
"epoch": 0.3333333333333333,
"grad_norm": 22.71551513671875,
"learning_rate": 4.9786121534345265e-05,
"loss": 3.0265,
"num_input_tokens_seen": 188544,
"step": 20
},
{
"epoch": 0.4166666666666667,
"grad_norm": 16.711044311523438,
"learning_rate": 4.966608330212198e-05,
"loss": 2.6989,
"num_input_tokens_seen": 235128,
"step": 25
},
{
"epoch": 0.5,
"grad_norm": 10.093056678771973,
"learning_rate": 4.951963201008076e-05,
"loss": 2.2284,
"num_input_tokens_seen": 281904,
"step": 30
},
{
"epoch": 0.5833333333333334,
"grad_norm": 12.554229736328125,
"learning_rate": 4.934692448193334e-05,
"loss": 2.2164,
"num_input_tokens_seen": 330096,
"step": 35
},
{
"epoch": 0.6666666666666666,
"grad_norm": 20.774412155151367,
"learning_rate": 4.914814565722671e-05,
"loss": 2.6854,
"num_input_tokens_seen": 376824,
"step": 40
},
{
"epoch": 0.75,
"grad_norm": 12.924601554870605,
"learning_rate": 4.892350839330522e-05,
"loss": 2.4108,
"num_input_tokens_seen": 424048,
"step": 45
},
{
"epoch": 0.8333333333333334,
"grad_norm": 23.568218231201172,
"learning_rate": 4.867325323737765e-05,
"loss": 2.419,
"num_input_tokens_seen": 470432,
"step": 50
},
{
"epoch": 0.9166666666666666,
"grad_norm": 29.64082908630371,
"learning_rate": 4.839764816893315e-05,
"loss": 2.3183,
"num_input_tokens_seen": 517624,
"step": 55
},
{
"epoch": 1.0,
"grad_norm": 17.5496883392334,
"learning_rate": 4.8096988312782174e-05,
"loss": 1.9734,
"num_input_tokens_seen": 564920,
"step": 60
},
{
"epoch": 1.0833333333333333,
"grad_norm": 14.56460189819336,
"learning_rate": 4.7771595623029394e-05,
"loss": 1.8086,
"num_input_tokens_seen": 611240,
"step": 65
},
{
"epoch": 1.1666666666666667,
"grad_norm": 11.394974708557129,
"learning_rate": 4.742181853831721e-05,
"loss": 2.4591,
"num_input_tokens_seen": 658904,
"step": 70
},
{
"epoch": 1.25,
"grad_norm": 5.737662315368652,
"learning_rate": 4.7048031608708876e-05,
"loss": 1.7169,
"num_input_tokens_seen": 706480,
"step": 75
},
{
"epoch": 1.3333333333333333,
"grad_norm": 18.580102920532227,
"learning_rate": 4.665063509461097e-05,
"loss": 1.809,
"num_input_tokens_seen": 752904,
"step": 80
},
{
"epoch": 1.4166666666666667,
"grad_norm": 18.77831268310547,
"learning_rate": 4.6230054538164474e-05,
"loss": 2.3907,
"num_input_tokens_seen": 800304,
"step": 85
},
{
"epoch": 1.5,
"grad_norm": 7.019700527191162,
"learning_rate": 4.5786740307563636e-05,
"loss": 2.0662,
"num_input_tokens_seen": 846792,
"step": 90
},
{
"epoch": 1.5833333333333335,
"grad_norm": 12.335309028625488,
"learning_rate": 4.5321167114790385e-05,
"loss": 2.0185,
"num_input_tokens_seen": 894552,
"step": 95
},
{
"epoch": 1.6666666666666665,
"grad_norm": 8.229971885681152,
"learning_rate": 4.4833833507280884e-05,
"loss": 1.9831,
"num_input_tokens_seen": 941040,
"step": 100
},
{
"epoch": 1.75,
"grad_norm": 13.02308177947998,
"learning_rate": 4.4325261334068426e-05,
"loss": 2.0873,
"num_input_tokens_seen": 987776,
"step": 105
},
{
"epoch": 1.8333333333333335,
"grad_norm": 6.7481513023376465,
"learning_rate": 4.379599518697444e-05,
"loss": 1.9163,
"num_input_tokens_seen": 1036072,
"step": 110
},
{
"epoch": 1.9166666666666665,
"grad_norm": 8.03829288482666,
"learning_rate": 4.324660181744589e-05,
"loss": 1.4848,
"num_input_tokens_seen": 1083328,
"step": 115
},
{
"epoch": 2.0,
"grad_norm": 9.837620735168457,
"learning_rate": 4.267766952966369e-05,
"loss": 1.7719,
"num_input_tokens_seen": 1129840,
"step": 120
},
{
"epoch": 2.0833333333333335,
"grad_norm": 14.701643943786621,
"learning_rate": 4.208980755057178e-05,
"loss": 1.4241,
"num_input_tokens_seen": 1177616,
"step": 125
},
{
"epoch": 2.1666666666666665,
"grad_norm": 30.390798568725586,
"learning_rate": 4.148364537750172e-05,
"loss": 1.6277,
"num_input_tokens_seen": 1225296,
"step": 130
},
{
"epoch": 2.25,
"grad_norm": 9.536770820617676,
"learning_rate": 4.085983210409114e-05,
"loss": 0.9761,
"num_input_tokens_seen": 1272160,
"step": 135
},
{
"epoch": 2.3333333333333335,
"grad_norm": 11.45682430267334,
"learning_rate": 4.021903572521802e-05,
"loss": 1.0144,
"num_input_tokens_seen": 1319432,
"step": 140
},
{
"epoch": 2.4166666666666665,
"grad_norm": 11.267831802368164,
"learning_rate": 3.956194242169506e-05,
"loss": 1.3858,
"num_input_tokens_seen": 1365992,
"step": 145
},
{
"epoch": 2.5,
"grad_norm": 11.92896556854248,
"learning_rate": 3.888925582549006e-05,
"loss": 0.9969,
"num_input_tokens_seen": 1412712,
"step": 150
},
{
"epoch": 2.5833333333333335,
"grad_norm": 15.724352836608887,
"learning_rate": 3.82016962662592e-05,
"loss": 1.0721,
"num_input_tokens_seen": 1459448,
"step": 155
},
{
"epoch": 2.6666666666666665,
"grad_norm": 12.68454360961914,
"learning_rate": 3.7500000000000003e-05,
"loss": 1.6622,
"num_input_tokens_seen": 1506944,
"step": 160
},
{
"epoch": 2.75,
"grad_norm": 6.482985496520996,
"learning_rate": 3.678491842064995e-05,
"loss": 1.059,
"num_input_tokens_seen": 1554216,
"step": 165
},
{
"epoch": 2.8333333333333335,
"grad_norm": 15.581390380859375,
"learning_rate": 3.6057217255475034e-05,
"loss": 0.774,
"num_input_tokens_seen": 1600608,
"step": 170
},
{
"epoch": 2.9166666666666665,
"grad_norm": 3.8697454929351807,
"learning_rate": 3.5317675745109866e-05,
"loss": 1.0403,
"num_input_tokens_seen": 1647272,
"step": 175
},
{
"epoch": 3.0,
"grad_norm": 54.316123962402344,
"learning_rate": 3.456708580912725e-05,
"loss": 2.173,
"num_input_tokens_seen": 1694760,
"step": 180
},
{
"epoch": 3.0833333333333335,
"grad_norm": 57.840152740478516,
"learning_rate": 3.380625119803084e-05,
"loss": 1.9583,
"num_input_tokens_seen": 1741936,
"step": 185
},
{
"epoch": 3.1666666666666665,
"grad_norm": 26.283540725708008,
"learning_rate": 3.303598663257904e-05,
"loss": 1.8407,
"num_input_tokens_seen": 1788624,
"step": 190
},
{
"epoch": 3.25,
"grad_norm": 15.37193489074707,
"learning_rate": 3.225711693136156e-05,
"loss": 1.1398,
"num_input_tokens_seen": 1835336,
"step": 195
},
{
"epoch": 3.3333333333333335,
"grad_norm": 53.772212982177734,
"learning_rate": 3.147047612756302e-05,
"loss": 0.8378,
"num_input_tokens_seen": 1882584,
"step": 200
}
],
"logging_steps": 5,
"max_steps": 480,
"num_input_tokens_seen": 1882584,
"num_train_epochs": 8,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.9901598271122406e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}