blip-lora-vqa / trainer_state.json
sohith18's picture
Upload folder using huggingface_hub
eee2bb0 verified
{
"best_global_step": 8000,
"best_metric": 0.11000050604343414,
"best_model_checkpoint": "./blip-vqa-lora-final-1/checkpoint-8000",
"epoch": 1.8479039150017322,
"eval_steps": 1000,
"global_step": 8000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.23097355352812102,
"grad_norm": 0.8786712884902954,
"learning_rate": 0.00017701547701547702,
"loss": 0.7467,
"step": 1000
},
{
"epoch": 0.23097355352812102,
"eval_loss": 0.16969867050647736,
"eval_runtime": 989.2451,
"eval_samples_per_second": 6.564,
"eval_steps_per_second": 0.821,
"step": 1000
},
{
"epoch": 0.46194710705624203,
"grad_norm": 0.578987717628479,
"learning_rate": 0.0001539154539154539,
"loss": 0.1516,
"step": 2000
},
{
"epoch": 0.46194710705624203,
"eval_loss": 0.146992489695549,
"eval_runtime": 964.1326,
"eval_samples_per_second": 6.735,
"eval_steps_per_second": 0.842,
"step": 2000
},
{
"epoch": 0.6929206605843631,
"grad_norm": 1.0143482685089111,
"learning_rate": 0.00013081543081543083,
"loss": 0.1343,
"step": 3000
},
{
"epoch": 0.6929206605843631,
"eval_loss": 0.1319289207458496,
"eval_runtime": 958.9433,
"eval_samples_per_second": 6.771,
"eval_steps_per_second": 0.847,
"step": 3000
},
{
"epoch": 0.9238942141124841,
"grad_norm": 0.3963450789451599,
"learning_rate": 0.00010771540771540771,
"loss": 0.124,
"step": 4000
},
{
"epoch": 0.9238942141124841,
"eval_loss": 0.1267184019088745,
"eval_runtime": 1076.6607,
"eval_samples_per_second": 6.031,
"eval_steps_per_second": 0.754,
"step": 4000
},
{
"epoch": 1.1549832544173693,
"grad_norm": 0.6118173003196716,
"learning_rate": 8.461538461538461e-05,
"loss": 0.1012,
"step": 5000
},
{
"epoch": 1.1549832544173693,
"eval_loss": 0.12135909497737885,
"eval_runtime": 1032.2551,
"eval_samples_per_second": 6.29,
"eval_steps_per_second": 0.787,
"step": 5000
},
{
"epoch": 1.3859568079454903,
"grad_norm": 0.6660658121109009,
"learning_rate": 6.151536151536152e-05,
"loss": 0.0862,
"step": 6000
},
{
"epoch": 1.3859568079454903,
"eval_loss": 0.11788077652454376,
"eval_runtime": 1024.8116,
"eval_samples_per_second": 6.336,
"eval_steps_per_second": 0.792,
"step": 6000
},
{
"epoch": 1.6169303614736112,
"grad_norm": 0.7107754349708557,
"learning_rate": 3.841533841533842e-05,
"loss": 0.0832,
"step": 7000
},
{
"epoch": 1.6169303614736112,
"eval_loss": 0.11289198696613312,
"eval_runtime": 1022.4664,
"eval_samples_per_second": 6.35,
"eval_steps_per_second": 0.794,
"step": 7000
},
{
"epoch": 1.8479039150017322,
"grad_norm": 0.6195062398910522,
"learning_rate": 1.5315315315315316e-05,
"loss": 0.0809,
"step": 8000
},
{
"epoch": 1.8479039150017322,
"eval_loss": 0.11000050604343414,
"eval_runtime": 1027.0762,
"eval_samples_per_second": 6.322,
"eval_steps_per_second": 0.791,
"step": 8000
}
],
"logging_steps": 1000,
"max_steps": 8658,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 1000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 3.416643625195315e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}