sr_finetuned / checkpoint-222 /trainer_state.json
rohithvadla99's picture
Upload folder using huggingface_hub
6b6c2de verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 222,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.13651877133105803,
"grad_norm": 661.1541748046875,
"learning_rate": 3.6000000000000003e-06,
"loss": 4.1112,
"step": 10
},
{
"epoch": 0.27303754266211605,
"grad_norm": 404.9569091796875,
"learning_rate": 7.600000000000001e-06,
"loss": 3.4668,
"step": 20
},
{
"epoch": 0.40955631399317405,
"grad_norm": 221.81915283203125,
"learning_rate": 1.16e-05,
"loss": 3.9445,
"step": 30
},
{
"epoch": 0.5460750853242321,
"grad_norm": 624.3930053710938,
"learning_rate": 1.5600000000000003e-05,
"loss": 4.0167,
"step": 40
},
{
"epoch": 0.6825938566552902,
"grad_norm": 144.68826293945312,
"learning_rate": 1.9600000000000002e-05,
"loss": 4.353,
"step": 50
},
{
"epoch": 0.8191126279863481,
"grad_norm": 76.5335922241211,
"learning_rate": 1.8953488372093027e-05,
"loss": 2.4911,
"step": 60
},
{
"epoch": 0.9556313993174061,
"grad_norm": 156.03607177734375,
"learning_rate": 1.7790697674418608e-05,
"loss": 3.0436,
"step": 70
},
{
"epoch": 1.0819112627986347,
"grad_norm": 429.53564453125,
"learning_rate": 1.6627906976744188e-05,
"loss": 2.6976,
"step": 80
},
{
"epoch": 1.2184300341296928,
"grad_norm": 90.31644439697266,
"learning_rate": 1.546511627906977e-05,
"loss": 2.0883,
"step": 90
},
{
"epoch": 1.3549488054607508,
"grad_norm": 189.0266571044922,
"learning_rate": 1.4302325581395352e-05,
"loss": 3.0638,
"step": 100
},
{
"epoch": 1.4914675767918089,
"grad_norm": 80.8642578125,
"learning_rate": 1.313953488372093e-05,
"loss": 1.7315,
"step": 110
},
{
"epoch": 1.627986348122867,
"grad_norm": 129.05471801757812,
"learning_rate": 1.1976744186046511e-05,
"loss": 2.8467,
"step": 120
},
{
"epoch": 1.764505119453925,
"grad_norm": 427.9339294433594,
"learning_rate": 1.0813953488372094e-05,
"loss": 2.511,
"step": 130
},
{
"epoch": 1.901023890784983,
"grad_norm": 157.96697998046875,
"learning_rate": 9.651162790697676e-06,
"loss": 2.4705,
"step": 140
},
{
"epoch": 2.0273037542662116,
"grad_norm": 76.74122619628906,
"learning_rate": 8.488372093023256e-06,
"loss": 2.1727,
"step": 150
},
{
"epoch": 2.1638225255972694,
"grad_norm": 8.750153541564941,
"learning_rate": 7.325581395348837e-06,
"loss": 0.9306,
"step": 160
},
{
"epoch": 2.3003412969283277,
"grad_norm": 130.6959228515625,
"learning_rate": 6.162790697674419e-06,
"loss": 1.1803,
"step": 170
},
{
"epoch": 2.4368600682593855,
"grad_norm": 4.233165740966797,
"learning_rate": 5e-06,
"loss": 1.1223,
"step": 180
},
{
"epoch": 2.573378839590444,
"grad_norm": 210.20428466796875,
"learning_rate": 3.837209302325582e-06,
"loss": 1.5384,
"step": 190
},
{
"epoch": 2.7098976109215016,
"grad_norm": 25.38463592529297,
"learning_rate": 2.674418604651163e-06,
"loss": 0.6051,
"step": 200
},
{
"epoch": 2.84641638225256,
"grad_norm": 189.81982421875,
"learning_rate": 1.5116279069767443e-06,
"loss": 0.9252,
"step": 210
},
{
"epoch": 2.9829351535836177,
"grad_norm": 11.452348709106445,
"learning_rate": 3.488372093023256e-07,
"loss": 1.1221,
"step": 220
}
],
"logging_steps": 10,
"max_steps": 222,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.091492960403456e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}