WafaaFraih's picture
Training in progress, step 375, checkpoint
d0f30b5 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 50,
"global_step": 375,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.2,
"grad_norm": 2.5308005809783936,
"learning_rate": 1.99625729663483e-05,
"loss": 8.2688,
"step": 25
},
{
"epoch": 0.4,
"grad_norm": 2.6729016304016113,
"learning_rate": 1.95447296407255e-05,
"loss": 7.7962,
"step": 50
},
{
"epoch": 0.4,
"eval_loss": 7.424994945526123,
"eval_runtime": 36.7897,
"eval_samples_per_second": 8.154,
"eval_steps_per_second": 1.033,
"step": 50
},
{
"epoch": 0.6,
"grad_norm": 2.4165093898773193,
"learning_rate": 1.868180920098644e-05,
"loss": 7.272,
"step": 75
},
{
"epoch": 0.8,
"grad_norm": 3.073392152786255,
"learning_rate": 1.7414050203223092e-05,
"loss": 7.1306,
"step": 100
},
{
"epoch": 0.8,
"eval_loss": 6.984020233154297,
"eval_runtime": 37.416,
"eval_samples_per_second": 8.018,
"eval_steps_per_second": 1.016,
"step": 100
},
{
"epoch": 1.0,
"grad_norm": 3.286832332611084,
"learning_rate": 1.5800569095711983e-05,
"loss": 7.0359,
"step": 125
},
{
"epoch": 1.2,
"grad_norm": 2.9184694290161133,
"learning_rate": 1.3916603579471705e-05,
"loss": 6.9816,
"step": 150
},
{
"epoch": 1.2,
"eval_loss": 6.894983768463135,
"eval_runtime": 35.6543,
"eval_samples_per_second": 8.414,
"eval_steps_per_second": 1.066,
"step": 150
},
{
"epoch": 1.4,
"grad_norm": 3.036834478378296,
"learning_rate": 1.1850004224044315e-05,
"loss": 6.9832,
"step": 175
},
{
"epoch": 1.6,
"grad_norm": 2.8202149868011475,
"learning_rate": 9.697137936798635e-06,
"loss": 6.9095,
"step": 200
},
{
"epoch": 1.6,
"eval_loss": 6.850904941558838,
"eval_runtime": 36.7127,
"eval_samples_per_second": 8.172,
"eval_steps_per_second": 1.035,
"step": 200
},
{
"epoch": 1.8,
"grad_norm": 3.1002156734466553,
"learning_rate": 7.558394309716088e-06,
"loss": 6.8941,
"step": 225
},
{
"epoch": 2.0,
"grad_norm": 3.674051523208618,
"learning_rate": 5.533504385708024e-06,
"loss": 6.8801,
"step": 250
},
{
"epoch": 2.0,
"eval_loss": 6.828614711761475,
"eval_runtime": 37.0462,
"eval_samples_per_second": 8.098,
"eval_steps_per_second": 1.026,
"step": 250
},
{
"epoch": 2.2,
"grad_norm": 2.9793779850006104,
"learning_rate": 3.7168901335157313e-06,
"loss": 6.8425,
"step": 275
},
{
"epoch": 2.4,
"grad_norm": 3.072126865386963,
"learning_rate": 2.1932614882827196e-06,
"loss": 6.8726,
"step": 300
},
{
"epoch": 2.4,
"eval_loss": 6.8157854080200195,
"eval_runtime": 35.7147,
"eval_samples_per_second": 8.4,
"eval_steps_per_second": 1.064,
"step": 300
},
{
"epoch": 2.6,
"grad_norm": 2.8476767539978027,
"learning_rate": 1.0336662707363287e-06,
"loss": 6.9329,
"step": 325
},
{
"epoch": 2.8,
"grad_norm": 3.1517913341522217,
"learning_rate": 2.921771798838069e-07,
"loss": 6.861,
"step": 350
},
{
"epoch": 2.8,
"eval_loss": 6.814030170440674,
"eval_runtime": 36.1954,
"eval_samples_per_second": 8.288,
"eval_steps_per_second": 1.05,
"step": 350
},
{
"epoch": 3.0,
"grad_norm": 3.607140064239502,
"learning_rate": 3.3703469648760367e-09,
"loss": 6.8769,
"step": 375
}
],
"logging_steps": 25,
"max_steps": 375,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 6930376445952000.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}