molt5-base-fine-tune / trainer_state.json
Phudish's picture
Upload folder using huggingface_hub
3077a7a verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 5.0,
"eval_steps": 500,
"global_step": 9915,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.25,
"grad_norm": 0.6509245038032532,
"learning_rate": 1.9495713565305096e-05,
"loss": 3.3172,
"step": 500
},
{
"epoch": 0.5,
"grad_norm": 0.3449840843677521,
"learning_rate": 1.8991427130610186e-05,
"loss": 0.6868,
"step": 1000
},
{
"epoch": 0.76,
"grad_norm": 0.3152226507663727,
"learning_rate": 1.8487140695915284e-05,
"loss": 0.556,
"step": 1500
},
{
"epoch": 1.0,
"eval_loss": 0.40746939182281494,
"eval_runtime": 1.4293,
"eval_samples_per_second": 139.932,
"eval_steps_per_second": 2.799,
"step": 1983
},
{
"epoch": 1.01,
"grad_norm": 0.2724578380584717,
"learning_rate": 1.7982854261220374e-05,
"loss": 0.4964,
"step": 2000
},
{
"epoch": 1.26,
"grad_norm": 0.2669610381126404,
"learning_rate": 1.747856782652547e-05,
"loss": 0.4694,
"step": 2500
},
{
"epoch": 1.51,
"grad_norm": 0.30558499693870544,
"learning_rate": 1.697428139183056e-05,
"loss": 0.4483,
"step": 3000
},
{
"epoch": 1.77,
"grad_norm": 0.24562540650367737,
"learning_rate": 1.6469994957135657e-05,
"loss": 0.4312,
"step": 3500
},
{
"epoch": 2.0,
"eval_loss": 0.3607844114303589,
"eval_runtime": 1.4442,
"eval_samples_per_second": 138.488,
"eval_steps_per_second": 2.77,
"step": 3966
},
{
"epoch": 2.02,
"grad_norm": 0.19550573825836182,
"learning_rate": 1.5965708522440747e-05,
"loss": 0.4217,
"step": 4000
},
{
"epoch": 2.27,
"grad_norm": 0.20502419769763947,
"learning_rate": 1.546142208774584e-05,
"loss": 0.4102,
"step": 4500
},
{
"epoch": 2.52,
"grad_norm": 0.20957614481449127,
"learning_rate": 1.4957135653050934e-05,
"loss": 0.4002,
"step": 5000
},
{
"epoch": 2.77,
"grad_norm": 0.251958429813385,
"learning_rate": 1.4452849218356026e-05,
"loss": 0.3971,
"step": 5500
},
{
"epoch": 3.0,
"eval_loss": 0.34329766035079956,
"eval_runtime": 1.412,
"eval_samples_per_second": 141.644,
"eval_steps_per_second": 2.833,
"step": 5949
},
{
"epoch": 3.03,
"grad_norm": 0.24571850895881653,
"learning_rate": 1.3948562783661122e-05,
"loss": 0.3927,
"step": 6000
},
{
"epoch": 3.28,
"grad_norm": 0.30599531531333923,
"learning_rate": 1.3444276348966214e-05,
"loss": 0.3817,
"step": 6500
},
{
"epoch": 3.53,
"grad_norm": 0.20877033472061157,
"learning_rate": 1.2939989914271307e-05,
"loss": 0.3822,
"step": 7000
},
{
"epoch": 3.78,
"grad_norm": 0.23654815554618835,
"learning_rate": 1.2435703479576399e-05,
"loss": 0.3742,
"step": 7500
},
{
"epoch": 4.0,
"eval_loss": 0.33198702335357666,
"eval_runtime": 1.3925,
"eval_samples_per_second": 143.629,
"eval_steps_per_second": 2.873,
"step": 7932
},
{
"epoch": 4.03,
"grad_norm": 0.19346973299980164,
"learning_rate": 1.1931417044881495e-05,
"loss": 0.3723,
"step": 8000
},
{
"epoch": 4.29,
"grad_norm": 0.15517400205135345,
"learning_rate": 1.1427130610186587e-05,
"loss": 0.3634,
"step": 8500
},
{
"epoch": 4.54,
"grad_norm": 0.20213666558265686,
"learning_rate": 1.0922844175491681e-05,
"loss": 0.3656,
"step": 9000
},
{
"epoch": 4.79,
"grad_norm": 0.20277400314807892,
"learning_rate": 1.0418557740796773e-05,
"loss": 0.3638,
"step": 9500
},
{
"epoch": 5.0,
"eval_loss": 0.3258039355278015,
"eval_runtime": 1.2465,
"eval_samples_per_second": 160.451,
"eval_steps_per_second": 3.209,
"step": 9915
}
],
"logging_steps": 500,
"max_steps": 19830,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"total_flos": 1.3277425805166182e+17,
"train_batch_size": 64,
"trial_name": null,
"trial_params": null
}