Java-UML-v0.4 / trainer_state.json
HA-Siala's picture
Upload folder using huggingface_hub
a2a17dd verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 4.0,
"eval_steps": 50,
"global_step": 2073,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0964785335262904,
"grad_norm": 0.7586275935173035,
"learning_rate": 2.364864864864865e-06,
"loss": 0.8531,
"step": 50
},
{
"epoch": 0.1929570670525808,
"grad_norm": 1.5941067934036255,
"learning_rate": 4.7779922779922784e-06,
"loss": 0.5954,
"step": 100
},
{
"epoch": 0.2894356005788712,
"grad_norm": 0.40901508927345276,
"learning_rate": 7.191119691119691e-06,
"loss": 0.3425,
"step": 150
},
{
"epoch": 0.3859141341051616,
"grad_norm": 0.567613959312439,
"learning_rate": 9.604247104247105e-06,
"loss": 0.3213,
"step": 200
},
{
"epoch": 0.482392667631452,
"grad_norm": 0.7472495436668396,
"learning_rate": 1.2017374517374517e-05,
"loss": 0.3009,
"step": 250
},
{
"epoch": 0.5788712011577424,
"grad_norm": 0.8184729218482971,
"learning_rate": 1.443050193050193e-05,
"loss": 0.2567,
"step": 300
},
{
"epoch": 0.6753497346840328,
"grad_norm": 0.6335634589195251,
"learning_rate": 1.6843629343629346e-05,
"loss": 0.2685,
"step": 350
},
{
"epoch": 0.7718282682103232,
"grad_norm": 0.4045080840587616,
"learning_rate": 1.916023166023166e-05,
"loss": 0.289,
"step": 400
},
{
"epoch": 0.8683068017366136,
"grad_norm": 0.5611971020698547,
"learning_rate": 2.1573359073359074e-05,
"loss": 0.2925,
"step": 450
},
{
"epoch": 0.964785335262904,
"grad_norm": 0.7453213930130005,
"learning_rate": 2.398648648648649e-05,
"loss": 0.263,
"step": 500
},
{
"epoch": 0.9995176073323685,
"eval_loss": 0.3949769139289856,
"eval_runtime": 193.7983,
"eval_samples_per_second": 1.192,
"eval_steps_per_second": 0.15,
"step": 518
},
{
"epoch": 1.0612638687891944,
"grad_norm": 0.6185714602470398,
"learning_rate": 2.6399613899613903e-05,
"loss": 0.2808,
"step": 550
},
{
"epoch": 1.1577424023154848,
"grad_norm": 0.7272781729698181,
"learning_rate": 2.8812741312741313e-05,
"loss": 0.222,
"step": 600
},
{
"epoch": 1.2542209358417753,
"grad_norm": 0.48480555415153503,
"learning_rate": 3.122586872586873e-05,
"loss": 0.252,
"step": 650
},
{
"epoch": 1.3506994693680656,
"grad_norm": 0.6064177751541138,
"learning_rate": 3.3638996138996145e-05,
"loss": 0.2599,
"step": 700
},
{
"epoch": 1.447178002894356,
"grad_norm": 0.5045720934867859,
"learning_rate": 3.605212355212355e-05,
"loss": 0.2588,
"step": 750
},
{
"epoch": 1.5436565364206465,
"grad_norm": 0.4221721291542053,
"learning_rate": 3.8465250965250966e-05,
"loss": 0.2612,
"step": 800
},
{
"epoch": 1.6401350699469368,
"grad_norm": 0.4660094976425171,
"learning_rate": 4.087837837837838e-05,
"loss": 0.2209,
"step": 850
},
{
"epoch": 1.7366136034732271,
"grad_norm": 0.6750354766845703,
"learning_rate": 4.3291505791505795e-05,
"loss": 0.2152,
"step": 900
},
{
"epoch": 1.8330921369995177,
"grad_norm": 0.522652268409729,
"learning_rate": 4.5704633204633205e-05,
"loss": 0.2418,
"step": 950
},
{
"epoch": 1.929570670525808,
"grad_norm": 1.7187763452529907,
"learning_rate": 4.811776061776062e-05,
"loss": 0.2496,
"step": 1000
},
{
"epoch": 1.9990352146647372,
"eval_loss": 0.36865848302841187,
"eval_runtime": 192.2269,
"eval_samples_per_second": 1.202,
"eval_steps_per_second": 0.151,
"step": 1036
},
{
"epoch": 2.0260492040520983,
"grad_norm": 0.980817973613739,
"learning_rate": 4.9867277992277996e-05,
"loss": 0.2725,
"step": 1050
},
{
"epoch": 2.122527737578389,
"grad_norm": 0.4250291585922241,
"learning_rate": 4.9263996138996145e-05,
"loss": 0.2423,
"step": 1100
},
{
"epoch": 2.2190062711046794,
"grad_norm": 0.4263651669025421,
"learning_rate": 4.866071428571429e-05,
"loss": 0.2347,
"step": 1150
},
{
"epoch": 2.3154848046309695,
"grad_norm": 0.43781450390815735,
"learning_rate": 4.8057432432432437e-05,
"loss": 0.2025,
"step": 1200
},
{
"epoch": 2.41196333815726,
"grad_norm": 0.49764326214790344,
"learning_rate": 4.745415057915058e-05,
"loss": 0.2336,
"step": 1250
},
{
"epoch": 2.5084418716835506,
"grad_norm": 0.5718632340431213,
"learning_rate": 4.685086872586873e-05,
"loss": 0.2025,
"step": 1300
},
{
"epoch": 2.6049204052098407,
"grad_norm": 0.5540716648101807,
"learning_rate": 4.624758687258687e-05,
"loss": 0.2312,
"step": 1350
},
{
"epoch": 2.7013989387361312,
"grad_norm": 0.5893882513046265,
"learning_rate": 4.5644305019305026e-05,
"loss": 0.1978,
"step": 1400
},
{
"epoch": 2.797877472262422,
"grad_norm": 0.5099292397499084,
"learning_rate": 4.504102316602317e-05,
"loss": 0.204,
"step": 1450
},
{
"epoch": 2.894356005788712,
"grad_norm": 0.5618692636489868,
"learning_rate": 4.443774131274132e-05,
"loss": 0.225,
"step": 1500
},
{
"epoch": 2.9908345393150024,
"grad_norm": 0.41460365056991577,
"learning_rate": 4.383445945945946e-05,
"loss": 0.2233,
"step": 1550
},
{
"epoch": 2.9985528219971056,
"eval_loss": 0.356653094291687,
"eval_runtime": 192.3283,
"eval_samples_per_second": 1.201,
"eval_steps_per_second": 0.151,
"step": 1554
},
{
"epoch": 3.087313072841293,
"grad_norm": 0.5067611336708069,
"learning_rate": 4.323117760617761e-05,
"loss": 0.1889,
"step": 1600
},
{
"epoch": 3.183791606367583,
"grad_norm": 0.4298361539840698,
"learning_rate": 4.262789575289575e-05,
"loss": 0.1774,
"step": 1650
},
{
"epoch": 3.2802701398938736,
"grad_norm": 0.2998252213001251,
"learning_rate": 4.20246138996139e-05,
"loss": 0.1961,
"step": 1700
},
{
"epoch": 3.376748673420164,
"grad_norm": 0.3195660412311554,
"learning_rate": 4.142133204633205e-05,
"loss": 0.2116,
"step": 1750
},
{
"epoch": 3.4732272069464543,
"grad_norm": 0.39473968744277954,
"learning_rate": 4.08180501930502e-05,
"loss": 0.1959,
"step": 1800
},
{
"epoch": 3.569705740472745,
"grad_norm": 0.3995039165019989,
"learning_rate": 4.0214768339768347e-05,
"loss": 0.202,
"step": 1850
},
{
"epoch": 3.6661842739990353,
"grad_norm": 0.8156320452690125,
"learning_rate": 3.961148648648649e-05,
"loss": 0.2374,
"step": 1900
},
{
"epoch": 3.7626628075253254,
"grad_norm": 0.3621225655078888,
"learning_rate": 3.900820463320464e-05,
"loss": 0.2155,
"step": 1950
},
{
"epoch": 3.859141341051616,
"grad_norm": 0.6666299700737,
"learning_rate": 3.840492277992278e-05,
"loss": 0.2013,
"step": 2000
},
{
"epoch": 3.9556198745779065,
"grad_norm": 0.5147814154624939,
"learning_rate": 3.780164092664093e-05,
"loss": 0.1901,
"step": 2050
},
{
"epoch": 4.0,
"eval_loss": 0.35413017868995667,
"eval_runtime": 192.2471,
"eval_samples_per_second": 1.202,
"eval_steps_per_second": 0.151,
"step": 2073
}
],
"logging_steps": 50,
"max_steps": 5180,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"total_flos": 8.872900451228713e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}