LamaDiab's picture
Training in progress, epoch 1, checkpoint
542b85d verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 1000,
"global_step": 3352,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0002983293556085919,
"grad_norm": 6.0359320640563965,
"learning_rate": 0.0,
"loss": 2.2793,
"step": 1
},
{
"epoch": 0.29832935560859186,
"grad_norm": 3.87988543510437,
"learning_rate": 5.960620525059666e-06,
"loss": 2.265,
"step": 1000
},
{
"epoch": 0.29832935560859186,
"eval_cosine_accuracy": 0.9628772735595703,
"eval_loss": 0.8168219923973083,
"eval_runtime": 23.679,
"eval_samples_per_second": 401.579,
"eval_steps_per_second": 1.605,
"step": 1000
},
{
"epoch": 0.5966587112171837,
"grad_norm": 3.6651225090026855,
"learning_rate": 1.1927207637231503e-05,
"loss": 1.8687,
"step": 2000
},
{
"epoch": 0.5966587112171837,
"eval_cosine_accuracy": 0.9732884764671326,
"eval_loss": 0.7446231842041016,
"eval_runtime": 23.5526,
"eval_samples_per_second": 403.734,
"eval_steps_per_second": 1.613,
"step": 2000
},
{
"epoch": 0.8949880668257757,
"grad_norm": 3.408712863922119,
"learning_rate": 1.7893794749403345e-05,
"loss": 1.6653,
"step": 3000
},
{
"epoch": 0.8949880668257757,
"eval_cosine_accuracy": 0.9795982837677002,
"eval_loss": 0.6802141666412354,
"eval_runtime": 23.5108,
"eval_samples_per_second": 404.453,
"eval_steps_per_second": 1.616,
"step": 3000
}
],
"logging_steps": 1000,
"max_steps": 16760,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 256,
"trial_name": null,
"trial_params": null
}