ROPE-MLP-GQA_Changes / last-checkpoint /trainer_state.json
selsayed2003's picture
Training in progress, step 600, checkpoint
d6b022c verified
{
"best_global_step": 600,
"best_metric": 2.494161605834961,
"best_model_checkpoint": "business_qa_flan_t5_xl_rope_LoRA_model/checkpoint-600",
"epoch": 0.8300907911802854,
"eval_steps": 200,
"global_step": 600,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.06917423259835712,
"grad_norm": 1.7663676738739014,
"learning_rate": 9.3875e-05,
"loss": 7.0007,
"step": 50
},
{
"epoch": 0.13834846519671423,
"grad_norm": 1.1584688425064087,
"learning_rate": 8.7625e-05,
"loss": 4.2926,
"step": 100
},
{
"epoch": 0.20752269779507135,
"grad_norm": 1.6167352199554443,
"learning_rate": 8.1375e-05,
"loss": 3.5178,
"step": 150
},
{
"epoch": 0.27669693039342846,
"grad_norm": 1.4269593954086304,
"learning_rate": 7.5125e-05,
"loss": 3.2704,
"step": 200
},
{
"epoch": 0.27669693039342846,
"eval_loss": 2.961073637008667,
"eval_runtime": 534.2317,
"eval_samples_per_second": 2.405,
"eval_steps_per_second": 0.603,
"step": 200
},
{
"epoch": 0.34587116299178555,
"grad_norm": 1.6946117877960205,
"learning_rate": 6.887500000000001e-05,
"loss": 3.0671,
"step": 250
},
{
"epoch": 0.4150453955901427,
"grad_norm": 1.938078761100769,
"learning_rate": 6.2625e-05,
"loss": 2.9921,
"step": 300
},
{
"epoch": 0.4842196281884998,
"grad_norm": 2.3420886993408203,
"learning_rate": 5.6375e-05,
"loss": 2.9324,
"step": 350
},
{
"epoch": 0.5533938607868569,
"grad_norm": 2.5413014888763428,
"learning_rate": 5.0125e-05,
"loss": 2.8571,
"step": 400
},
{
"epoch": 0.5533938607868569,
"eval_loss": 2.6418941020965576,
"eval_runtime": 531.7902,
"eval_samples_per_second": 2.416,
"eval_steps_per_second": 0.606,
"step": 400
},
{
"epoch": 0.622568093385214,
"grad_norm": 2.8039212226867676,
"learning_rate": 4.3875e-05,
"loss": 2.7732,
"step": 450
},
{
"epoch": 0.6917423259835711,
"grad_norm": 2.8921995162963867,
"learning_rate": 3.7625e-05,
"loss": 2.7173,
"step": 500
},
{
"epoch": 0.7609165585819282,
"grad_norm": 3.0461184978485107,
"learning_rate": 3.1375e-05,
"loss": 2.6135,
"step": 550
},
{
"epoch": 0.8300907911802854,
"grad_norm": 3.964676856994629,
"learning_rate": 2.5124999999999997e-05,
"loss": 2.6357,
"step": 600
},
{
"epoch": 0.8300907911802854,
"eval_loss": 2.494161605834961,
"eval_runtime": 532.8559,
"eval_samples_per_second": 2.412,
"eval_steps_per_second": 0.604,
"step": 600
}
],
"logging_steps": 50,
"max_steps": 800,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 200,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 8.23805463232512e+16,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}