gemmalorafull / checkpoint-100 /trainer_state.json
jgayed's picture
Upload folder using huggingface_hub
5c53087 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.6666666666666665,
"eval_steps": 500,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.08333333333333333,
"grad_norm": 41.758705139160156,
"learning_rate": 4.9986614686909146e-05,
"loss": 39.5171,
"num_input_tokens_seen": 47056,
"step": 5
},
{
"epoch": 0.16666666666666666,
"grad_norm": 35.72260665893555,
"learning_rate": 4.994647308096509e-05,
"loss": 3.2466,
"num_input_tokens_seen": 93544,
"step": 10
},
{
"epoch": 0.25,
"grad_norm": 40.3800163269043,
"learning_rate": 4.987961816680492e-05,
"loss": 3.3122,
"num_input_tokens_seen": 141120,
"step": 15
},
{
"epoch": 0.3333333333333333,
"grad_norm": 22.71551513671875,
"learning_rate": 4.9786121534345265e-05,
"loss": 3.0265,
"num_input_tokens_seen": 188544,
"step": 20
},
{
"epoch": 0.4166666666666667,
"grad_norm": 16.711044311523438,
"learning_rate": 4.966608330212198e-05,
"loss": 2.6989,
"num_input_tokens_seen": 235128,
"step": 25
},
{
"epoch": 0.5,
"grad_norm": 10.093056678771973,
"learning_rate": 4.951963201008076e-05,
"loss": 2.2284,
"num_input_tokens_seen": 281904,
"step": 30
},
{
"epoch": 0.5833333333333334,
"grad_norm": 12.554229736328125,
"learning_rate": 4.934692448193334e-05,
"loss": 2.2164,
"num_input_tokens_seen": 330096,
"step": 35
},
{
"epoch": 0.6666666666666666,
"grad_norm": 20.774412155151367,
"learning_rate": 4.914814565722671e-05,
"loss": 2.6854,
"num_input_tokens_seen": 376824,
"step": 40
},
{
"epoch": 0.75,
"grad_norm": 12.924601554870605,
"learning_rate": 4.892350839330522e-05,
"loss": 2.4108,
"num_input_tokens_seen": 424048,
"step": 45
},
{
"epoch": 0.8333333333333334,
"grad_norm": 23.568218231201172,
"learning_rate": 4.867325323737765e-05,
"loss": 2.419,
"num_input_tokens_seen": 470432,
"step": 50
},
{
"epoch": 0.9166666666666666,
"grad_norm": 29.64082908630371,
"learning_rate": 4.839764816893315e-05,
"loss": 2.3183,
"num_input_tokens_seen": 517624,
"step": 55
},
{
"epoch": 1.0,
"grad_norm": 17.5496883392334,
"learning_rate": 4.8096988312782174e-05,
"loss": 1.9734,
"num_input_tokens_seen": 564920,
"step": 60
},
{
"epoch": 1.0833333333333333,
"grad_norm": 14.56460189819336,
"learning_rate": 4.7771595623029394e-05,
"loss": 1.8086,
"num_input_tokens_seen": 611240,
"step": 65
},
{
"epoch": 1.1666666666666667,
"grad_norm": 11.394974708557129,
"learning_rate": 4.742181853831721e-05,
"loss": 2.4591,
"num_input_tokens_seen": 658904,
"step": 70
},
{
"epoch": 1.25,
"grad_norm": 5.737662315368652,
"learning_rate": 4.7048031608708876e-05,
"loss": 1.7169,
"num_input_tokens_seen": 706480,
"step": 75
},
{
"epoch": 1.3333333333333333,
"grad_norm": 18.580102920532227,
"learning_rate": 4.665063509461097e-05,
"loss": 1.809,
"num_input_tokens_seen": 752904,
"step": 80
},
{
"epoch": 1.4166666666666667,
"grad_norm": 18.77831268310547,
"learning_rate": 4.6230054538164474e-05,
"loss": 2.3907,
"num_input_tokens_seen": 800304,
"step": 85
},
{
"epoch": 1.5,
"grad_norm": 7.019700527191162,
"learning_rate": 4.5786740307563636e-05,
"loss": 2.0662,
"num_input_tokens_seen": 846792,
"step": 90
},
{
"epoch": 1.5833333333333335,
"grad_norm": 12.335309028625488,
"learning_rate": 4.5321167114790385e-05,
"loss": 2.0185,
"num_input_tokens_seen": 894552,
"step": 95
},
{
"epoch": 1.6666666666666665,
"grad_norm": 8.229971885681152,
"learning_rate": 4.4833833507280884e-05,
"loss": 1.9831,
"num_input_tokens_seen": 941040,
"step": 100
}
],
"logging_steps": 5,
"max_steps": 480,
"num_input_tokens_seen": 941040,
"num_train_epochs": 8,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.4946796550409984e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}