emilykd / trainer_state.json
ArtemisTAO's picture
Upload folder using huggingface_hub
2755ed5 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.836317135549872,
"eval_steps": 500,
"global_step": 1500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.1278772378516624,
"grad_norm": 0.15987633168697357,
"learning_rate": 1.9498721227621484e-05,
"loss": 0.0362,
"step": 50
},
{
"epoch": 0.2557544757033248,
"grad_norm": 0.04608326032757759,
"learning_rate": 1.8987212276214835e-05,
"loss": 0.0168,
"step": 100
},
{
"epoch": 0.3836317135549872,
"grad_norm": 0.03889036923646927,
"learning_rate": 1.8475703324808185e-05,
"loss": 0.0254,
"step": 150
},
{
"epoch": 0.5115089514066496,
"grad_norm": 0.05562173202633858,
"learning_rate": 1.7964194373401536e-05,
"loss": 0.0502,
"step": 200
},
{
"epoch": 0.639386189258312,
"grad_norm": 0.11458363384008408,
"learning_rate": 1.7452685421994886e-05,
"loss": 0.0353,
"step": 250
},
{
"epoch": 0.7672634271099744,
"grad_norm": 0.0313524566590786,
"learning_rate": 1.6941176470588237e-05,
"loss": 0.0423,
"step": 300
},
{
"epoch": 0.8951406649616368,
"grad_norm": 0.06919269263744354,
"learning_rate": 1.6429667519181587e-05,
"loss": 0.0414,
"step": 350
},
{
"epoch": 1.0230179028132993,
"grad_norm": 0.04081420600414276,
"learning_rate": 1.5918158567774937e-05,
"loss": 0.0444,
"step": 400
},
{
"epoch": 1.1508951406649617,
"grad_norm": 0.2047465294599533,
"learning_rate": 1.5406649616368288e-05,
"loss": 0.0103,
"step": 450
},
{
"epoch": 1.278772378516624,
"grad_norm": 0.025791389867663383,
"learning_rate": 1.4895140664961638e-05,
"loss": 0.0209,
"step": 500
},
{
"epoch": 1.4066496163682864,
"grad_norm": 0.9749135375022888,
"learning_rate": 1.4383631713554989e-05,
"loss": 0.0443,
"step": 550
},
{
"epoch": 1.5345268542199488,
"grad_norm": 0.049570031464099884,
"learning_rate": 1.3872122762148339e-05,
"loss": 0.0334,
"step": 600
},
{
"epoch": 1.6624040920716112,
"grad_norm": 0.03414261341094971,
"learning_rate": 1.336061381074169e-05,
"loss": 0.0263,
"step": 650
},
{
"epoch": 1.7902813299232738,
"grad_norm": 0.05060505494475365,
"learning_rate": 1.284910485933504e-05,
"loss": 0.0324,
"step": 700
},
{
"epoch": 1.918158567774936,
"grad_norm": 0.02642514370381832,
"learning_rate": 1.233759590792839e-05,
"loss": 0.0135,
"step": 750
},
{
"epoch": 2.0460358056265986,
"grad_norm": 0.025779355317354202,
"learning_rate": 1.182608695652174e-05,
"loss": 0.0077,
"step": 800
},
{
"epoch": 2.1739130434782608,
"grad_norm": 0.02975759282708168,
"learning_rate": 1.1314578005115091e-05,
"loss": 0.019,
"step": 850
},
{
"epoch": 2.3017902813299234,
"grad_norm": 0.022426923736929893,
"learning_rate": 1.0803069053708442e-05,
"loss": 0.0122,
"step": 900
},
{
"epoch": 2.4296675191815855,
"grad_norm": 0.02004999853670597,
"learning_rate": 1.0291560102301792e-05,
"loss": 0.0067,
"step": 950
},
{
"epoch": 2.557544757033248,
"grad_norm": 0.01824093610048294,
"learning_rate": 9.78005115089514e-06,
"loss": 0.0266,
"step": 1000
},
{
"epoch": 2.6854219948849103,
"grad_norm": 0.018175149336457253,
"learning_rate": 9.278772378516625e-06,
"loss": 0.0055,
"step": 1050
},
{
"epoch": 2.813299232736573,
"grad_norm": 0.024839840829372406,
"learning_rate": 8.767263427109976e-06,
"loss": 0.0279,
"step": 1100
},
{
"epoch": 2.9411764705882355,
"grad_norm": 0.03608064725995064,
"learning_rate": 8.255754475703326e-06,
"loss": 0.0061,
"step": 1150
},
{
"epoch": 3.0690537084398977,
"grad_norm": 0.02351670153439045,
"learning_rate": 7.744245524296677e-06,
"loss": 0.0072,
"step": 1200
},
{
"epoch": 3.1969309462915603,
"grad_norm": 0.023821713402867317,
"learning_rate": 7.232736572890025e-06,
"loss": 0.0074,
"step": 1250
},
{
"epoch": 3.3248081841432224,
"grad_norm": 0.05748629570007324,
"learning_rate": 6.721227621483376e-06,
"loss": 0.018,
"step": 1300
},
{
"epoch": 3.452685421994885,
"grad_norm": 0.028405383229255676,
"learning_rate": 6.209718670076726e-06,
"loss": 0.0046,
"step": 1350
},
{
"epoch": 3.580562659846547,
"grad_norm": 0.02582070790231228,
"learning_rate": 5.6982097186700766e-06,
"loss": 0.0022,
"step": 1400
},
{
"epoch": 3.70843989769821,
"grad_norm": 0.10114799439907074,
"learning_rate": 5.186700767263427e-06,
"loss": 0.0158,
"step": 1450
},
{
"epoch": 3.836317135549872,
"grad_norm": 0.019817985594272614,
"learning_rate": 4.675191815856777e-06,
"loss": 0.0111,
"step": 1500
}
],
"logging_steps": 50,
"max_steps": 1955,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.858645370905989e+18,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}