Llama-3.1-8B-Instruct_holistic_20 / trainer_state.json
Incomple's picture
End of training
ec49c99 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9997056226081837,
"eval_steps": 500,
"global_step": 2547,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.05024040820331665,
"grad_norm": 6.086460590362549,
"learning_rate": 5.019607843137255e-07,
"logits/chosen": -0.5333898067474365,
"logits/rejected": -0.47814399003982544,
"logps/chosen": -75.330810546875,
"logps/rejected": -13.868017196655273,
"loss": 0.6901,
"rewards/accuracies": 0.5107421875,
"rewards/chosen": 0.005103742238134146,
"rewards/margins": 0.0064795538783073425,
"rewards/rejected": -0.0013758119894191623,
"step": 128
},
{
"epoch": 0.1004808164066333,
"grad_norm": 3.6039109230041504,
"learning_rate": 9.995636998254798e-07,
"logits/chosen": -0.5320765972137451,
"logits/rejected": -0.466244101524353,
"logps/chosen": -74.39834594726562,
"logps/rejected": -12.819503784179688,
"loss": 0.6417,
"rewards/accuracies": 0.8037109375,
"rewards/chosen": 0.10177542269229889,
"rewards/margins": 0.11421097815036774,
"rewards/rejected": -0.012435557320713997,
"step": 256
},
{
"epoch": 0.15072122460994997,
"grad_norm": 2.8107080459594727,
"learning_rate": 9.43717277486911e-07,
"logits/chosen": -0.5339048504829407,
"logits/rejected": -0.475521981716156,
"logps/chosen": -67.63614654541016,
"logps/rejected": -14.525527954101562,
"loss": 0.4265,
"rewards/accuracies": 0.83984375,
"rewards/chosen": 0.691648006439209,
"rewards/margins": 0.9047117233276367,
"rewards/rejected": -0.21306368708610535,
"step": 384
},
{
"epoch": 0.2009616328132666,
"grad_norm": 7.228174686431885,
"learning_rate": 8.87870855148342e-07,
"logits/chosen": -0.5555779337882996,
"logits/rejected": -0.5105082988739014,
"logps/chosen": -59.40736770629883,
"logps/rejected": -20.558443069458008,
"loss": 0.2512,
"rewards/accuracies": 0.9453125,
"rewards/chosen": 1.2393351793289185,
"rewards/margins": 1.9693442583084106,
"rewards/rejected": -0.7300090193748474,
"step": 512
},
{
"epoch": 0.2512020410165833,
"grad_norm": 6.92218542098999,
"learning_rate": 8.320244328097731e-07,
"logits/chosen": -0.5576226115226746,
"logits/rejected": -0.5113691091537476,
"logps/chosen": -60.53861618041992,
"logps/rejected": -32.28628921508789,
"loss": 0.0926,
"rewards/accuracies": 0.98828125,
"rewards/chosen": 1.4149188995361328,
"rewards/margins": 3.2507734298706055,
"rewards/rejected": -1.8358546495437622,
"step": 640
},
{
"epoch": 0.30144244921989993,
"grad_norm": 0.33116015791893005,
"learning_rate": 7.761780104712042e-07,
"logits/chosen": -0.5273003578186035,
"logits/rejected": -0.4682571291923523,
"logps/chosen": -59.363250732421875,
"logps/rejected": -40.3122444152832,
"loss": 0.0343,
"rewards/accuracies": 0.9921875,
"rewards/chosen": 1.5962406396865845,
"rewards/margins": 4.371469020843506,
"rewards/rejected": -2.77522873878479,
"step": 768
},
{
"epoch": 0.3516828574232166,
"grad_norm": 0.6943575143814087,
"learning_rate": 7.203315881326351e-07,
"logits/chosen": -0.48832786083221436,
"logits/rejected": -0.42331841588020325,
"logps/chosen": -56.96063232421875,
"logps/rejected": -44.680931091308594,
"loss": 0.0303,
"rewards/accuracies": 0.990234375,
"rewards/chosen": 1.5980381965637207,
"rewards/margins": 4.763439178466797,
"rewards/rejected": -3.165400981903076,
"step": 896
},
{
"epoch": 0.4019232656265332,
"grad_norm": 0.07658185809850693,
"learning_rate": 6.644851657940663e-07,
"logits/chosen": -0.4915071725845337,
"logits/rejected": -0.4179531931877136,
"logps/chosen": -54.62388610839844,
"logps/rejected": -47.38861846923828,
"loss": 0.0313,
"rewards/accuracies": 0.990234375,
"rewards/chosen": 1.6683292388916016,
"rewards/margins": 5.199033737182617,
"rewards/rejected": -3.5307040214538574,
"step": 1024
},
{
"epoch": 0.45216367382984984,
"grad_norm": 0.13849562406539917,
"learning_rate": 6.086387434554974e-07,
"logits/chosen": -0.4723713994026184,
"logits/rejected": -0.38997870683670044,
"logps/chosen": -56.00676727294922,
"logps/rejected": -48.90182876586914,
"loss": 0.0423,
"rewards/accuracies": 0.982421875,
"rewards/chosen": 1.662773609161377,
"rewards/margins": 5.358356475830078,
"rewards/rejected": -3.695582628250122,
"step": 1152
},
{
"epoch": 0.5024040820331666,
"grad_norm": 0.09821192175149918,
"learning_rate": 5.527923211169285e-07,
"logits/chosen": -0.4637073874473572,
"logits/rejected": -0.3821738064289093,
"logps/chosen": -56.77195739746094,
"logps/rejected": -53.689002990722656,
"loss": 0.0199,
"rewards/accuracies": 0.9921875,
"rewards/chosen": 1.7342756986618042,
"rewards/margins": 5.696838855743408,
"rewards/rejected": -3.9625630378723145,
"step": 1280
},
{
"epoch": 0.5526444902364832,
"grad_norm": 0.7402496933937073,
"learning_rate": 4.969458987783594e-07,
"logits/chosen": -0.4427691102027893,
"logits/rejected": -0.3622562885284424,
"logps/chosen": -58.04669189453125,
"logps/rejected": -54.428367614746094,
"loss": 0.0248,
"rewards/accuracies": 0.990234375,
"rewards/chosen": 1.715148687362671,
"rewards/margins": 5.825463771820068,
"rewards/rejected": -4.110315322875977,
"step": 1408
},
{
"epoch": 0.6028848984397999,
"grad_norm": 0.46330779790878296,
"learning_rate": 4.410994764397906e-07,
"logits/chosen": -0.43754109740257263,
"logits/rejected": -0.33878153562545776,
"logps/chosen": -57.78377914428711,
"logps/rejected": -54.78404235839844,
"loss": 0.0342,
"rewards/accuracies": 0.98828125,
"rewards/chosen": 1.7349567413330078,
"rewards/margins": 5.961076736450195,
"rewards/rejected": -4.2261199951171875,
"step": 1536
},
{
"epoch": 0.6531253066431165,
"grad_norm": 0.07320141792297363,
"learning_rate": 3.852530541012216e-07,
"logits/chosen": -0.4277573823928833,
"logits/rejected": -0.31976205110549927,
"logps/chosen": -54.81957244873047,
"logps/rejected": -55.96826171875,
"loss": 0.0183,
"rewards/accuracies": 0.9921875,
"rewards/chosen": 1.6823066473007202,
"rewards/margins": 6.112739562988281,
"rewards/rejected": -4.43043327331543,
"step": 1664
},
{
"epoch": 0.7033657148464332,
"grad_norm": 0.5408441424369812,
"learning_rate": 3.2940663176265273e-07,
"logits/chosen": -0.4249654710292816,
"logits/rejected": -0.3083575367927551,
"logps/chosen": -55.527496337890625,
"logps/rejected": -58.188899993896484,
"loss": 0.013,
"rewards/accuracies": 0.994140625,
"rewards/chosen": 1.7380454540252686,
"rewards/margins": 6.380348205566406,
"rewards/rejected": -4.642302989959717,
"step": 1792
},
{
"epoch": 0.7536061230497498,
"grad_norm": 0.04812052845954895,
"learning_rate": 2.7356020942408376e-07,
"logits/chosen": -0.4335082471370697,
"logits/rejected": -0.3139256536960602,
"logps/chosen": -54.48476028442383,
"logps/rejected": -60.73431396484375,
"loss": 0.0241,
"rewards/accuracies": 0.98828125,
"rewards/chosen": 1.6781396865844727,
"rewards/margins": 6.47487735748291,
"rewards/rejected": -4.7967376708984375,
"step": 1920
},
{
"epoch": 0.8038465312530664,
"grad_norm": 0.055905986577272415,
"learning_rate": 2.1771378708551484e-07,
"logits/chosen": -0.3921366333961487,
"logits/rejected": -0.2739506959915161,
"logps/chosen": -56.08161544799805,
"logps/rejected": -62.1962890625,
"loss": 0.0297,
"rewards/accuracies": 0.986328125,
"rewards/chosen": 1.7258753776550293,
"rewards/margins": 6.677608489990234,
"rewards/rejected": -4.951733589172363,
"step": 2048
},
{
"epoch": 0.854086939456383,
"grad_norm": 0.1123403012752533,
"learning_rate": 1.618673647469459e-07,
"logits/chosen": -0.4205915331840515,
"logits/rejected": -0.31026044487953186,
"logps/chosen": -56.832523345947266,
"logps/rejected": -63.64851760864258,
"loss": 0.0154,
"rewards/accuracies": 0.9921875,
"rewards/chosen": 1.7784671783447266,
"rewards/margins": 6.823487281799316,
"rewards/rejected": -5.04502010345459,
"step": 2176
},
{
"epoch": 0.9043273476596997,
"grad_norm": 0.17749281227588654,
"learning_rate": 1.0602094240837696e-07,
"logits/chosen": -0.3997223973274231,
"logits/rejected": -0.2778712511062622,
"logps/chosen": -55.459022521972656,
"logps/rejected": -63.700008392333984,
"loss": 0.0313,
"rewards/accuracies": 0.9853515625,
"rewards/chosen": 1.6784813404083252,
"rewards/margins": 6.7875847816467285,
"rewards/rejected": -5.109103202819824,
"step": 2304
},
{
"epoch": 0.9545677558630163,
"grad_norm": 0.04166420176625252,
"learning_rate": 5.0174520069808025e-08,
"logits/chosen": -0.35927632451057434,
"logits/rejected": -0.23961657285690308,
"logps/chosen": -55.96198272705078,
"logps/rejected": -64.85810089111328,
"loss": 0.0223,
"rewards/accuracies": 0.9921875,
"rewards/chosen": 1.656056523323059,
"rewards/margins": 6.852350234985352,
"rewards/rejected": -5.196293830871582,
"step": 2432
},
{
"epoch": 0.9997056226081837,
"step": 2547,
"total_flos": 1.5461894723437855e+18,
"train_loss": 0.1253820424179119,
"train_runtime": 39602.0987,
"train_samples_per_second": 0.515,
"train_steps_per_second": 0.064
}
],
"logging_steps": 128,
"max_steps": 2547,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.5461894723437855e+18,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}