dima806's picture
Upload folder using huggingface_hub
da6479c verified
{
"best_metric": 6.77062463760376,
"best_model_checkpoint": "flowers_image_detection/checkpoint-9593",
"epoch": 1.0,
"eval_steps": 500,
"global_step": 9593,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.05,
"grad_norm": 1.298277735710144,
"learning_rate": 2.858535051870481e-07,
"loss": 6.8065,
"step": 500
},
{
"epoch": 0.1,
"grad_norm": 1.2105151414871216,
"learning_rate": 2.7013517761710155e-07,
"loss": 6.8025,
"step": 1000
},
{
"epoch": 0.16,
"grad_norm": 1.2060508728027344,
"learning_rate": 2.54416850047155e-07,
"loss": 6.7964,
"step": 1500
},
{
"epoch": 0.21,
"grad_norm": 1.1686173677444458,
"learning_rate": 2.3869852247720844e-07,
"loss": 6.7953,
"step": 2000
},
{
"epoch": 0.26,
"grad_norm": 1.2845182418823242,
"learning_rate": 2.2298019490726185e-07,
"loss": 6.7913,
"step": 2500
},
{
"epoch": 0.31,
"grad_norm": 1.1189314126968384,
"learning_rate": 2.072618673373153e-07,
"loss": 6.79,
"step": 3000
},
{
"epoch": 0.36,
"grad_norm": 1.2160365581512451,
"learning_rate": 1.9154353976736874e-07,
"loss": 6.7849,
"step": 3500
},
{
"epoch": 0.42,
"grad_norm": 1.1838222742080688,
"learning_rate": 1.7582521219742218e-07,
"loss": 6.7817,
"step": 4000
},
{
"epoch": 0.47,
"grad_norm": 1.2202887535095215,
"learning_rate": 1.6010688462747562e-07,
"loss": 6.7807,
"step": 4500
},
{
"epoch": 0.52,
"grad_norm": 1.2335509061813354,
"learning_rate": 1.4438855705752907e-07,
"loss": 6.7775,
"step": 5000
},
{
"epoch": 0.57,
"grad_norm": 1.1666423082351685,
"learning_rate": 1.286702294875825e-07,
"loss": 6.7741,
"step": 5500
},
{
"epoch": 0.63,
"grad_norm": 1.179144024848938,
"learning_rate": 1.1295190191763597e-07,
"loss": 6.7744,
"step": 6000
},
{
"epoch": 0.68,
"grad_norm": 1.2437212467193604,
"learning_rate": 9.723357434768941e-08,
"loss": 6.7714,
"step": 6500
},
{
"epoch": 0.73,
"grad_norm": 1.177861213684082,
"learning_rate": 8.151524677774285e-08,
"loss": 6.771,
"step": 7000
},
{
"epoch": 0.78,
"grad_norm": 1.1653987169265747,
"learning_rate": 6.57969192077963e-08,
"loss": 6.7686,
"step": 7500
},
{
"epoch": 0.83,
"grad_norm": 1.1824182271957397,
"learning_rate": 5.0078591637849724e-08,
"loss": 6.7678,
"step": 8000
},
{
"epoch": 0.89,
"grad_norm": 1.2102723121643066,
"learning_rate": 3.436026406790317e-08,
"loss": 6.7662,
"step": 8500
},
{
"epoch": 0.94,
"grad_norm": 1.2629424333572388,
"learning_rate": 1.8641936497956616e-08,
"loss": 6.7669,
"step": 9000
},
{
"epoch": 0.99,
"grad_norm": 1.2050453424453735,
"learning_rate": 2.92360892801006e-09,
"loss": 6.7688,
"step": 9500
},
{
"epoch": 1.0,
"eval_accuracy": 0.004085422469823584,
"eval_loss": 6.77062463760376,
"eval_runtime": 177.2104,
"eval_samples_per_second": 91.163,
"eval_steps_per_second": 11.399,
"step": 9593
}
],
"logging_steps": 500,
"max_steps": 9593,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"total_flos": 2.397721835373613e+19,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}