dima806's picture
Upload folder using huggingface_hub
e18389c verified
{
"best_metric": 0.24626319110393524,
"best_model_checkpoint": "faces_age_detection/checkpoint-4864",
"epoch": 16.0,
"eval_steps": 500,
"global_step": 4864,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.0,
"eval_accuracy": 0.9048206710374084,
"eval_loss": 0.25544658303260803,
"eval_model_preparation_time": 0.0056,
"eval_runtime": 115.8575,
"eval_samples_per_second": 111.905,
"eval_steps_per_second": 3.504,
"step": 304
},
{
"epoch": 1.6447368421052633,
"grad_norm": 9.665897369384766,
"learning_rate": 9.253731343283582e-07,
"loss": 0.1128,
"step": 500
},
{
"epoch": 2.0,
"eval_accuracy": 0.9051291939838025,
"eval_loss": 0.25322863459587097,
"eval_model_preparation_time": 0.0056,
"eval_runtime": 116.0042,
"eval_samples_per_second": 111.763,
"eval_steps_per_second": 3.5,
"step": 608
},
{
"epoch": 3.0,
"eval_accuracy": 0.909371384496722,
"eval_loss": 0.24770796298980713,
"eval_model_preparation_time": 0.0056,
"eval_runtime": 115.4652,
"eval_samples_per_second": 112.285,
"eval_steps_per_second": 3.516,
"step": 912
},
{
"epoch": 3.2894736842105265,
"grad_norm": 5.963768482208252,
"learning_rate": 8.424543946932007e-07,
"loss": 0.1092,
"step": 1000
},
{
"epoch": 4.0,
"eval_accuracy": 0.9075973775549556,
"eval_loss": 0.2508331835269928,
"eval_model_preparation_time": 0.0056,
"eval_runtime": 110.7185,
"eval_samples_per_second": 117.099,
"eval_steps_per_second": 3.667,
"step": 1216
},
{
"epoch": 4.934210526315789,
"grad_norm": 6.369262218475342,
"learning_rate": 7.595356550580431e-07,
"loss": 0.1013,
"step": 1500
},
{
"epoch": 5.0,
"eval_accuracy": 0.9069803316621674,
"eval_loss": 0.24949955940246582,
"eval_model_preparation_time": 0.0056,
"eval_runtime": 114.7896,
"eval_samples_per_second": 112.946,
"eval_steps_per_second": 3.537,
"step": 1520
},
{
"epoch": 6.0,
"eval_accuracy": 0.9052834554569996,
"eval_loss": 0.2557480037212372,
"eval_model_preparation_time": 0.0056,
"eval_runtime": 117.3931,
"eval_samples_per_second": 110.441,
"eval_steps_per_second": 3.458,
"step": 1824
},
{
"epoch": 6.578947368421053,
"grad_norm": 8.901986122131348,
"learning_rate": 6.766169154228856e-07,
"loss": 0.097,
"step": 2000
},
{
"epoch": 7.0,
"eval_accuracy": 0.9085229463941381,
"eval_loss": 0.2521224617958069,
"eval_model_preparation_time": 0.0056,
"eval_runtime": 116.8685,
"eval_samples_per_second": 110.937,
"eval_steps_per_second": 3.474,
"step": 2128
},
{
"epoch": 8.0,
"eval_accuracy": 0.9035865792518318,
"eval_loss": 0.2614166736602783,
"eval_model_preparation_time": 0.0056,
"eval_runtime": 117.7658,
"eval_samples_per_second": 110.091,
"eval_steps_per_second": 3.448,
"step": 2432
},
{
"epoch": 8.223684210526315,
"grad_norm": 9.241392135620117,
"learning_rate": 5.93698175787728e-07,
"loss": 0.0981,
"step": 2500
},
{
"epoch": 9.0,
"eval_accuracy": 0.9066718087157732,
"eval_loss": 0.25424298644065857,
"eval_model_preparation_time": 0.0056,
"eval_runtime": 117.1049,
"eval_samples_per_second": 110.713,
"eval_steps_per_second": 3.467,
"step": 2736
},
{
"epoch": 9.868421052631579,
"grad_norm": 15.468152046203613,
"learning_rate": 5.107794361525704e-07,
"loss": 0.0928,
"step": 3000
},
{
"epoch": 10.0,
"eval_accuracy": 0.9087543386039336,
"eval_loss": 0.2496512234210968,
"eval_model_preparation_time": 0.0056,
"eval_runtime": 117.518,
"eval_samples_per_second": 110.324,
"eval_steps_per_second": 3.455,
"step": 3040
},
{
"epoch": 11.0,
"eval_accuracy": 0.9098341689163132,
"eval_loss": 0.2480880618095398,
"eval_model_preparation_time": 0.0056,
"eval_runtime": 119.13,
"eval_samples_per_second": 108.831,
"eval_steps_per_second": 3.408,
"step": 3344
},
{
"epoch": 11.513157894736842,
"grad_norm": 9.02236557006836,
"learning_rate": 4.278606965174129e-07,
"loss": 0.088,
"step": 3500
},
{
"epoch": 12.0,
"eval_accuracy": 0.9090628615503278,
"eval_loss": 0.2512376010417938,
"eval_model_preparation_time": 0.0056,
"eval_runtime": 117.373,
"eval_samples_per_second": 110.46,
"eval_steps_per_second": 3.459,
"step": 3648
},
{
"epoch": 13.0,
"eval_accuracy": 0.9090628615503278,
"eval_loss": 0.2498735636472702,
"eval_model_preparation_time": 0.0056,
"eval_runtime": 116.0104,
"eval_samples_per_second": 111.757,
"eval_steps_per_second": 3.5,
"step": 3952
},
{
"epoch": 13.157894736842104,
"grad_norm": 10.029730796813965,
"learning_rate": 3.4494195688225535e-07,
"loss": 0.0855,
"step": 4000
},
{
"epoch": 14.0,
"eval_accuracy": 0.9099112996529117,
"eval_loss": 0.24805885553359985,
"eval_model_preparation_time": 0.0056,
"eval_runtime": 116.2889,
"eval_samples_per_second": 111.49,
"eval_steps_per_second": 3.491,
"step": 4256
},
{
"epoch": 14.802631578947368,
"grad_norm": 7.256553649902344,
"learning_rate": 2.620232172470978e-07,
"loss": 0.0847,
"step": 4500
},
{
"epoch": 15.0,
"eval_accuracy": 0.9097570381797146,
"eval_loss": 0.24851545691490173,
"eval_model_preparation_time": 0.0056,
"eval_runtime": 115.2188,
"eval_samples_per_second": 112.525,
"eval_steps_per_second": 3.524,
"step": 4560
},
{
"epoch": 16.0,
"eval_accuracy": 0.9106054762822985,
"eval_loss": 0.24626319110393524,
"eval_model_preparation_time": 0.0056,
"eval_runtime": 114.9847,
"eval_samples_per_second": 112.754,
"eval_steps_per_second": 3.531,
"step": 4864
}
],
"logging_steps": 500,
"max_steps": 6080,
"num_input_tokens_seen": 0,
"num_train_epochs": 20,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.4112003658884735e+19,
"train_batch_size": 64,
"trial_name": null,
"trial_params": null
}