Java-OCL-v0.2 / trainer_state.json
HA-Siala's picture
Upload folder using huggingface_hub
4ddc71f verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 654,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.22935779816513763,
"grad_norm": 0.4296875,
"learning_rate": 4.942660550458716e-05,
"loss": 0.338,
"step": 50
},
{
"epoch": 0.45871559633027525,
"grad_norm": 0.328125,
"learning_rate": 4.8853211009174314e-05,
"loss": 0.2119,
"step": 100
},
{
"epoch": 0.6880733944954128,
"grad_norm": 0.25390625,
"learning_rate": 4.827981651376147e-05,
"loss": 0.2016,
"step": 150
},
{
"epoch": 0.9174311926605505,
"grad_norm": 0.22265625,
"learning_rate": 4.7706422018348626e-05,
"loss": 0.1936,
"step": 200
},
{
"epoch": 1.0,
"eval_loss": 0.18668922781944275,
"eval_runtime": 53.2527,
"eval_samples_per_second": 14.459,
"eval_steps_per_second": 0.469,
"step": 218
},
{
"epoch": 1.146788990825688,
"grad_norm": 0.25,
"learning_rate": 4.713302752293578e-05,
"loss": 0.1842,
"step": 250
},
{
"epoch": 1.3761467889908257,
"grad_norm": 0.236328125,
"learning_rate": 4.655963302752294e-05,
"loss": 0.1835,
"step": 300
},
{
"epoch": 1.6055045871559632,
"grad_norm": 0.236328125,
"learning_rate": 4.5986238532110096e-05,
"loss": 0.1762,
"step": 350
},
{
"epoch": 1.834862385321101,
"grad_norm": 0.236328125,
"learning_rate": 4.541284403669725e-05,
"loss": 0.1741,
"step": 400
},
{
"epoch": 2.0,
"eval_loss": 0.1787685602903366,
"eval_runtime": 52.7872,
"eval_samples_per_second": 14.587,
"eval_steps_per_second": 0.474,
"step": 436
},
{
"epoch": 2.0642201834862384,
"grad_norm": 0.2236328125,
"learning_rate": 4.483944954128441e-05,
"loss": 0.171,
"step": 450
},
{
"epoch": 2.293577981651376,
"grad_norm": 0.26171875,
"learning_rate": 4.426605504587156e-05,
"loss": 0.1656,
"step": 500
},
{
"epoch": 2.522935779816514,
"grad_norm": 0.2392578125,
"learning_rate": 4.369266055045872e-05,
"loss": 0.1629,
"step": 550
},
{
"epoch": 2.7522935779816513,
"grad_norm": 0.2294921875,
"learning_rate": 4.311926605504588e-05,
"loss": 0.1642,
"step": 600
},
{
"epoch": 2.981651376146789,
"grad_norm": 0.2451171875,
"learning_rate": 4.2545871559633024e-05,
"loss": 0.1604,
"step": 650
},
{
"epoch": 3.0,
"eval_loss": 0.17591096460819244,
"eval_runtime": 52.9863,
"eval_samples_per_second": 14.532,
"eval_steps_per_second": 0.472,
"step": 654
}
],
"logging_steps": 50,
"max_steps": 4360,
"num_input_tokens_seen": 0,
"num_train_epochs": 20,
"save_steps": 500,
"total_flos": 4.600868691247104e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}