| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 7.207207207207207, | |
| "eval_steps": 500, | |
| "global_step": 200, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.36036036036036034, | |
| "grad_norm": 4.098762512207031, | |
| "learning_rate": 9.259259259259259e-07, | |
| "logits/chosen": -2.332144260406494, | |
| "logits/rejected": -2.3385167121887207, | |
| "logps/chosen": -80.89369201660156, | |
| "logps/rejected": -70.11573791503906, | |
| "loss": 0.6929, | |
| "rewards/accuracies": 0.4625000059604645, | |
| "rewards/chosen": 0.00037987352698110044, | |
| "rewards/margins": 0.004227532539516687, | |
| "rewards/rejected": -0.003847658634185791, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.7207207207207207, | |
| "grad_norm": 3.641324281692505, | |
| "learning_rate": 1.8518518518518519e-06, | |
| "logits/chosen": -2.323789119720459, | |
| "logits/rejected": -2.351041793823242, | |
| "logps/chosen": -73.2725601196289, | |
| "logps/rejected": -81.80250549316406, | |
| "loss": 0.6932, | |
| "rewards/accuracies": 0.5874999761581421, | |
| "rewards/chosen": 0.0012983012711629272, | |
| "rewards/margins": 0.004207946360111237, | |
| "rewards/rejected": -0.0029096449725329876, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 1.0810810810810811, | |
| "grad_norm": 3.9276015758514404, | |
| "learning_rate": 2.7777777777777783e-06, | |
| "logits/chosen": -2.3353028297424316, | |
| "logits/rejected": -2.3445916175842285, | |
| "logps/chosen": -69.34381103515625, | |
| "logps/rejected": -74.37530517578125, | |
| "loss": 0.6941, | |
| "rewards/accuracies": 0.4375, | |
| "rewards/chosen": -0.01035231165587902, | |
| "rewards/margins": -0.006389107555150986, | |
| "rewards/rejected": -0.00396320316940546, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 1.4414414414414414, | |
| "grad_norm": 4.809000492095947, | |
| "learning_rate": 3.7037037037037037e-06, | |
| "logits/chosen": -2.343184232711792, | |
| "logits/rejected": -2.360262393951416, | |
| "logps/chosen": -77.91002655029297, | |
| "logps/rejected": -76.27156066894531, | |
| "loss": 0.6902, | |
| "rewards/accuracies": 0.5625, | |
| "rewards/chosen": -0.008437180891633034, | |
| "rewards/margins": 0.008391124196350574, | |
| "rewards/rejected": -0.016828304156661034, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 1.8018018018018018, | |
| "grad_norm": 4.504117012023926, | |
| "learning_rate": 4.62962962962963e-06, | |
| "logits/chosen": -2.3394973278045654, | |
| "logits/rejected": -2.3635268211364746, | |
| "logps/chosen": -83.62376403808594, | |
| "logps/rejected": -267.64569091796875, | |
| "loss": 0.6851, | |
| "rewards/accuracies": 0.48750001192092896, | |
| "rewards/chosen": 0.01695835217833519, | |
| "rewards/margins": 0.14291717112064362, | |
| "rewards/rejected": -0.12595881521701813, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 2.1621621621621623, | |
| "grad_norm": 4.033559322357178, | |
| "learning_rate": 4.998119881260576e-06, | |
| "logits/chosen": -2.32966685295105, | |
| "logits/rejected": -2.3370490074157715, | |
| "logps/chosen": -78.54629516601562, | |
| "logps/rejected": -82.67992401123047, | |
| "loss": 0.6767, | |
| "rewards/accuracies": 0.637499988079071, | |
| "rewards/chosen": -0.03485359251499176, | |
| "rewards/margins": 0.035757362842559814, | |
| "rewards/rejected": -0.07061095535755157, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 2.5225225225225225, | |
| "grad_norm": 4.979142189025879, | |
| "learning_rate": 4.9866405060165044e-06, | |
| "logits/chosen": -2.364291191101074, | |
| "logits/rejected": -2.376107931137085, | |
| "logps/chosen": -70.61842346191406, | |
| "logps/rejected": -81.80282592773438, | |
| "loss": 0.6636, | |
| "rewards/accuracies": 0.675000011920929, | |
| "rewards/chosen": -0.06025966256856918, | |
| "rewards/margins": 0.03742799907922745, | |
| "rewards/rejected": -0.09768766909837723, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 2.8828828828828827, | |
| "grad_norm": 4.0428690910339355, | |
| "learning_rate": 4.964774158361991e-06, | |
| "logits/chosen": -2.3341965675354004, | |
| "logits/rejected": -2.3440909385681152, | |
| "logps/chosen": -86.3591537475586, | |
| "logps/rejected": -77.45347595214844, | |
| "loss": 0.6519, | |
| "rewards/accuracies": 0.7124999761581421, | |
| "rewards/chosen": -0.09531830251216888, | |
| "rewards/margins": 0.09854079782962799, | |
| "rewards/rejected": -0.19385910034179688, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 3.2432432432432434, | |
| "grad_norm": 4.31919527053833, | |
| "learning_rate": 4.93261217644956e-06, | |
| "logits/chosen": -2.351658821105957, | |
| "logits/rejected": -2.3437318801879883, | |
| "logps/chosen": -77.31346130371094, | |
| "logps/rejected": -80.43277740478516, | |
| "loss": 0.6243, | |
| "rewards/accuracies": 0.800000011920929, | |
| "rewards/chosen": -0.1381106823682785, | |
| "rewards/margins": 0.16527524590492249, | |
| "rewards/rejected": -0.3033859133720398, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 3.6036036036036037, | |
| "grad_norm": 5.029748439788818, | |
| "learning_rate": 4.8902889044347e-06, | |
| "logits/chosen": -2.3354241847991943, | |
| "logits/rejected": -2.358518600463867, | |
| "logps/chosen": -75.03588104248047, | |
| "logps/rejected": -86.44483947753906, | |
| "loss": 0.6025, | |
| "rewards/accuracies": 0.699999988079071, | |
| "rewards/chosen": -0.22239580750465393, | |
| "rewards/margins": 0.1877792775630951, | |
| "rewards/rejected": -0.41017502546310425, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 3.963963963963964, | |
| "grad_norm": 4.6208648681640625, | |
| "learning_rate": 4.837981131305475e-06, | |
| "logits/chosen": -2.3195366859436035, | |
| "logits/rejected": -2.3129196166992188, | |
| "logps/chosen": -72.09532928466797, | |
| "logps/rejected": -73.18878936767578, | |
| "loss": 0.5955, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -0.22240504622459412, | |
| "rewards/margins": 0.22803232073783875, | |
| "rewards/rejected": -0.45043739676475525, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 4.324324324324325, | |
| "grad_norm": 4.163040637969971, | |
| "learning_rate": 4.775907352415367e-06, | |
| "logits/chosen": -2.3427720069885254, | |
| "logits/rejected": -2.3731276988983154, | |
| "logps/chosen": -85.9415283203125, | |
| "logps/rejected": -93.53765869140625, | |
| "loss": 0.5506, | |
| "rewards/accuracies": 0.8500000238418579, | |
| "rewards/chosen": -0.24449148774147034, | |
| "rewards/margins": 0.3563767373561859, | |
| "rewards/rejected": -0.6008682250976562, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 4.684684684684685, | |
| "grad_norm": 4.228254795074463, | |
| "learning_rate": 4.70432685680402e-06, | |
| "logits/chosen": -2.336733341217041, | |
| "logits/rejected": -2.3446521759033203, | |
| "logps/chosen": -81.07231140136719, | |
| "logps/rejected": -90.82849884033203, | |
| "loss": 0.5248, | |
| "rewards/accuracies": 0.8125, | |
| "rewards/chosen": -0.005195322446525097, | |
| "rewards/margins": 0.6936509609222412, | |
| "rewards/rejected": -0.6988462209701538, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 5.045045045045045, | |
| "grad_norm": 4.454960346221924, | |
| "learning_rate": 4.623538644118244e-06, | |
| "logits/chosen": -2.3331754207611084, | |
| "logits/rejected": -2.3434836864471436, | |
| "logps/chosen": -83.67604064941406, | |
| "logps/rejected": -82.92774200439453, | |
| "loss": 0.5288, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -0.2716079652309418, | |
| "rewards/margins": 0.4593236446380615, | |
| "rewards/rejected": -0.7309316396713257, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 5.405405405405405, | |
| "grad_norm": 5.223482608795166, | |
| "learning_rate": 4.533880175657419e-06, | |
| "logits/chosen": -2.362809658050537, | |
| "logits/rejected": -2.3657679557800293, | |
| "logps/chosen": -73.20018768310547, | |
| "logps/rejected": -85.37998962402344, | |
| "loss": 0.4682, | |
| "rewards/accuracies": 0.8500000238418579, | |
| "rewards/chosen": -0.3221462368965149, | |
| "rewards/margins": 0.5968301892280579, | |
| "rewards/rejected": -0.9189764261245728, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 5.7657657657657655, | |
| "grad_norm": 4.905521869659424, | |
| "learning_rate": 4.435725964760331e-06, | |
| "logits/chosen": -2.3808655738830566, | |
| "logits/rejected": -2.368286609649658, | |
| "logps/chosen": -68.88943481445312, | |
| "logps/rejected": -82.69029235839844, | |
| "loss": 0.4586, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -0.3172217011451721, | |
| "rewards/margins": 0.7665462493896484, | |
| "rewards/rejected": -1.0837678909301758, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 6.126126126126126, | |
| "grad_norm": 5.399628162384033, | |
| "learning_rate": 4.329486012421531e-06, | |
| "logits/chosen": -2.365935802459717, | |
| "logits/rejected": -2.363004684448242, | |
| "logps/chosen": -70.47642517089844, | |
| "logps/rejected": -84.02542877197266, | |
| "loss": 0.4462, | |
| "rewards/accuracies": 0.8374999761581421, | |
| "rewards/chosen": -0.45835933089256287, | |
| "rewards/margins": 0.8438631892204285, | |
| "rewards/rejected": -1.302222490310669, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 6.486486486486487, | |
| "grad_norm": 4.843445777893066, | |
| "learning_rate": 4.215604094671835e-06, | |
| "logits/chosen": -2.357231855392456, | |
| "logits/rejected": -2.360239028930664, | |
| "logps/chosen": -78.67561340332031, | |
| "logps/rejected": -88.39659118652344, | |
| "loss": 0.3976, | |
| "rewards/accuracies": 0.9125000238418579, | |
| "rewards/chosen": -0.4842923581600189, | |
| "rewards/margins": 0.8027322888374329, | |
| "rewards/rejected": -1.2870244979858398, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 6.846846846846847, | |
| "grad_norm": 4.972764015197754, | |
| "learning_rate": 4.094555908876765e-06, | |
| "logits/chosen": -2.3751468658447266, | |
| "logits/rejected": -2.3993237018585205, | |
| "logps/chosen": -73.63652038574219, | |
| "logps/rejected": -278.0970458984375, | |
| "loss": 0.3959, | |
| "rewards/accuracies": 0.8500000238418579, | |
| "rewards/chosen": -0.4291106164455414, | |
| "rewards/margins": 0.9967883229255676, | |
| "rewards/rejected": -1.4258991479873657, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 7.207207207207207, | |
| "grad_norm": 5.071193218231201, | |
| "learning_rate": 3.966847086696045e-06, | |
| "logits/chosen": -2.3572330474853516, | |
| "logits/rejected": -2.357269763946533, | |
| "logps/chosen": -84.92713928222656, | |
| "logps/rejected": -98.15062713623047, | |
| "loss": 0.3544, | |
| "rewards/accuracies": 0.9375, | |
| "rewards/chosen": -0.5852295756340027, | |
| "rewards/margins": 1.2983506917953491, | |
| "rewards/rejected": -1.883580207824707, | |
| "step": 200 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 540, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 20, | |
| "save_steps": 100, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 7.302906796614615e+17, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |