TEST / checkpoints /checkpoint-30 /trainer_state.json
annasoli's picture
Upload folder using huggingface_hub
628d28d verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.07566204287515763,
"eval_steps": 100,
"global_step": 30,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0025220680958385876,
"grad_norm": 119.76318359375,
"kl_loss": -1.1687562835330993e-15,
"kl_weight": 1000000.0,
"learning_rate": 0.0,
"loss": 2.6394360065460205,
"step": 1,
"total_loss": 2.6394360065460205
},
{
"epoch": 0.005044136191677175,
"grad_norm": 116.01831817626953,
"kl_loss": -1.280914393650412e-14,
"kl_weight": 1000000.0,
"learning_rate": 0.0001,
"loss": 3.2936160564422607,
"step": 2,
"total_loss": 3.2936160564422607
},
{
"epoch": 0.007566204287515763,
"grad_norm": 104.04817962646484,
"kl_loss": 7.10318071028837e-09,
"kl_weight": 1000000.0,
"learning_rate": 0.0002,
"loss": 3.084439992904663,
"step": 3,
"total_loss": 3.091543197631836
},
{
"epoch": 0.01008827238335435,
"grad_norm": 68.36679077148438,
"kl_loss": 2.8489626657801637e-08,
"kl_weight": 1000000.0,
"learning_rate": 0.0003,
"loss": 3.105210304260254,
"step": 4,
"total_loss": 3.133699893951416
},
{
"epoch": 0.012610340479192938,
"grad_norm": 61.00284957885742,
"kl_loss": 4.923957774849441e-08,
"kl_weight": 1000000.0,
"learning_rate": 0.0004,
"loss": 3.345022678375244,
"step": 5,
"total_loss": 3.3942623138427734
},
{
"epoch": 0.015132408575031526,
"grad_norm": 65.48960876464844,
"kl_loss": 1.43211394743048e-07,
"kl_weight": 1000000.0,
"learning_rate": 0.0005,
"loss": 2.3467514514923096,
"step": 6,
"total_loss": 2.4899628162384033
},
{
"epoch": 0.017654476670870115,
"grad_norm": 63.001102447509766,
"kl_loss": 9.109995602329946e-08,
"kl_weight": 1000000.0,
"learning_rate": 0.0004993662864385298,
"loss": 2.5077083110809326,
"step": 7,
"total_loss": 2.5988082885742188
},
{
"epoch": 0.0201765447667087,
"grad_norm": 58.6073112487793,
"kl_loss": 2.3511624647198914e-07,
"kl_weight": 1000000.0,
"learning_rate": 0.0004987325728770596,
"loss": 2.2668278217315674,
"step": 8,
"total_loss": 2.501944065093994
},
{
"epoch": 0.02269861286254729,
"grad_norm": 97.743896484375,
"kl_loss": 2.1175161180053692e-07,
"kl_weight": 1000000.0,
"learning_rate": 0.0004980988593155894,
"loss": 2.352029800415039,
"step": 9,
"total_loss": 2.563781499862671
},
{
"epoch": 0.025220680958385876,
"grad_norm": 60.91500473022461,
"kl_loss": 1.2846226127294358e-07,
"kl_weight": 1000000.0,
"learning_rate": 0.0004974651457541192,
"loss": 2.2376697063446045,
"step": 10,
"total_loss": 2.3661320209503174
},
{
"epoch": 0.027742749054224466,
"grad_norm": 55.095516204833984,
"kl_loss": 1.4181343033214944e-07,
"kl_weight": 1000000.0,
"learning_rate": 0.000496831432192649,
"loss": 2.8243818283081055,
"step": 11,
"total_loss": 2.9661953449249268
},
{
"epoch": 0.03026481715006305,
"grad_norm": 44.97727966308594,
"kl_loss": 1.545683971926337e-07,
"kl_weight": 1000000.0,
"learning_rate": 0.0004961977186311787,
"loss": 2.4689197540283203,
"step": 12,
"total_loss": 2.623488187789917
},
{
"epoch": 0.03278688524590164,
"grad_norm": 51.62504196166992,
"kl_loss": 2.2357993145760702e-07,
"kl_weight": 1000000.0,
"learning_rate": 0.0004955640050697085,
"loss": 2.2227847576141357,
"step": 13,
"total_loss": 2.446364641189575
},
{
"epoch": 0.03530895334174023,
"grad_norm": 42.21575927734375,
"kl_loss": 1.6229765265052265e-07,
"kl_weight": 1000000.0,
"learning_rate": 0.0004949302915082382,
"loss": 2.4396450519561768,
"step": 14,
"total_loss": 2.601942777633667
},
{
"epoch": 0.03783102143757881,
"grad_norm": 40.02684783935547,
"kl_loss": 1.4151250127270032e-07,
"kl_weight": 1000000.0,
"learning_rate": 0.0004942965779467681,
"loss": 2.509690761566162,
"step": 15,
"total_loss": 2.651203155517578
},
{
"epoch": 0.0403530895334174,
"grad_norm": 44.62814712524414,
"kl_loss": 1.450005981951108e-07,
"kl_weight": 1000000.0,
"learning_rate": 0.0004936628643852978,
"loss": 2.4844541549682617,
"step": 16,
"total_loss": 2.6294548511505127
},
{
"epoch": 0.04287515762925599,
"grad_norm": 41.87761688232422,
"kl_loss": 1.397227009647395e-07,
"kl_weight": 1000000.0,
"learning_rate": 0.0004930291508238277,
"loss": 2.7985713481903076,
"step": 17,
"total_loss": 2.938293933868408
},
{
"epoch": 0.04539722572509458,
"grad_norm": 39.647457122802734,
"kl_loss": 1.0770181546604363e-07,
"kl_weight": 1000000.0,
"learning_rate": 0.0004923954372623574,
"loss": 2.1876273155212402,
"step": 18,
"total_loss": 2.2953290939331055
},
{
"epoch": 0.04791929382093316,
"grad_norm": 44.82719039916992,
"kl_loss": 1.325549447983576e-07,
"kl_weight": 1000000.0,
"learning_rate": 0.0004917617237008873,
"loss": 2.344290256500244,
"step": 19,
"total_loss": 2.4768452644348145
},
{
"epoch": 0.05044136191677175,
"grad_norm": 35.45253372192383,
"kl_loss": 1.3449634650442022e-07,
"kl_weight": 1000000.0,
"learning_rate": 0.0004911280101394169,
"loss": 2.393965244293213,
"step": 20,
"total_loss": 2.5284616947174072
},
{
"epoch": 0.05296343001261034,
"grad_norm": 36.362369537353516,
"kl_loss": 1.552224659917556e-07,
"kl_weight": 1000000.0,
"learning_rate": 0.0004904942965779467,
"loss": 2.1951944828033447,
"step": 21,
"total_loss": 2.350416898727417
},
{
"epoch": 0.05548549810844893,
"grad_norm": 42.16935348510742,
"kl_loss": 1.1523614062980414e-07,
"kl_weight": 1000000.0,
"learning_rate": 0.0004898605830164765,
"loss": 2.4038805961608887,
"step": 22,
"total_loss": 2.5191166400909424
},
{
"epoch": 0.058007566204287514,
"grad_norm": 39.14812088012695,
"kl_loss": 1.1659390963814076e-07,
"kl_weight": 1000000.0,
"learning_rate": 0.0004892268694550063,
"loss": 2.6124515533447266,
"step": 23,
"total_loss": 2.7290453910827637
},
{
"epoch": 0.0605296343001261,
"grad_norm": 49.780704498291016,
"kl_loss": 2.0625684271635691e-07,
"kl_weight": 1000000.0,
"learning_rate": 0.0004885931558935361,
"loss": 2.9930167198181152,
"step": 24,
"total_loss": 3.1992735862731934
},
{
"epoch": 0.06305170239596469,
"grad_norm": 53.23894500732422,
"kl_loss": 1.4698964889703348e-07,
"kl_weight": 1000000.0,
"learning_rate": 0.0004879594423320659,
"loss": 2.4087769985198975,
"step": 25,
"total_loss": 2.5557665824890137
},
{
"epoch": 0.06557377049180328,
"grad_norm": 50.209110260009766,
"kl_loss": 1.2840492047416774e-07,
"kl_weight": 1000000.0,
"learning_rate": 0.0004873257287705957,
"loss": 2.918276309967041,
"step": 26,
"total_loss": 3.0466811656951904
},
{
"epoch": 0.06809583858764187,
"grad_norm": 41.90302658081055,
"kl_loss": 1.8274477042723447e-07,
"kl_weight": 1000000.0,
"learning_rate": 0.00048669201520912546,
"loss": 2.272730588912964,
"step": 27,
"total_loss": 2.455475330352783
},
{
"epoch": 0.07061790668348046,
"grad_norm": 39.8343620300293,
"kl_loss": 1.3182453528770566e-07,
"kl_weight": 1000000.0,
"learning_rate": 0.00048605830164765525,
"loss": 2.6464455127716064,
"step": 28,
"total_loss": 2.7782700061798096
},
{
"epoch": 0.07313997477931904,
"grad_norm": 36.19342803955078,
"kl_loss": 1.3146133426289452e-07,
"kl_weight": 1000000.0,
"learning_rate": 0.00048542458808618503,
"loss": 2.6744461059570312,
"step": 29,
"total_loss": 2.8059074878692627
},
{
"epoch": 0.07566204287515763,
"grad_norm": 39.07732391357422,
"kl_loss": 1.3345737670533708e-07,
"kl_weight": 1000000.0,
"learning_rate": 0.0004847908745247148,
"loss": 2.624727249145508,
"step": 30,
"total_loss": 2.7581846714019775
}
],
"logging_steps": 1,
"max_steps": 794,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 5,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": false,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.0314842112e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}