| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "eval_steps": 500, | |
| "global_step": 46, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.10869565217391304, | |
| "grad_norm": 1.5240029096603394, | |
| "learning_rate": 4.000000000000001e-06, | |
| "logits/chosen": -3.1438066959381104, | |
| "logits/rejected": -2.998044490814209, | |
| "logps/chosen": -24.425586700439453, | |
| "logps/rejected": -109.31417083740234, | |
| "loss": 0.2266, | |
| "rewards/accuracies": 0.9750000238418579, | |
| "rewards/chosen": 1.709136962890625, | |
| "rewards/margins": 1.7067525386810303, | |
| "rewards/rejected": 0.0023843557573854923, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.21739130434782608, | |
| "grad_norm": 0.8518787622451782, | |
| "learning_rate": 4.883490980137327e-06, | |
| "logits/chosen": -3.2125651836395264, | |
| "logits/rejected": -3.0023326873779297, | |
| "logps/chosen": -24.60028648376465, | |
| "logps/rejected": -109.94728088378906, | |
| "loss": 0.1743, | |
| "rewards/accuracies": 0.987500011920929, | |
| "rewards/chosen": 1.6953380107879639, | |
| "rewards/margins": 2.0240752696990967, | |
| "rewards/rejected": -0.32873719930648804, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.32608695652173914, | |
| "grad_norm": 0.5248653888702393, | |
| "learning_rate": 4.428722949554858e-06, | |
| "logits/chosen": -3.3195624351501465, | |
| "logits/rejected": -3.0724987983703613, | |
| "logps/chosen": -23.181148529052734, | |
| "logps/rejected": -118.40291595458984, | |
| "loss": 0.0617, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 1.5336229801177979, | |
| "rewards/margins": 3.1812326908111572, | |
| "rewards/rejected": -1.6476097106933594, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.43478260869565216, | |
| "grad_norm": 0.5495370626449585, | |
| "learning_rate": 3.6942995462806574e-06, | |
| "logits/chosen": -3.4612669944763184, | |
| "logits/rejected": -3.146296501159668, | |
| "logps/chosen": -25.121320724487305, | |
| "logps/rejected": -125.0635986328125, | |
| "loss": 0.0497, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 1.540027141571045, | |
| "rewards/margins": 3.769411563873291, | |
| "rewards/rejected": -2.229383945465088, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.5434782608695652, | |
| "grad_norm": 0.3498240113258362, | |
| "learning_rate": 2.786708563496002e-06, | |
| "logits/chosen": -3.4969794750213623, | |
| "logits/rejected": -3.1035914421081543, | |
| "logps/chosen": -26.37749671936035, | |
| "logps/rejected": -134.34664916992188, | |
| "loss": 0.0391, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 1.542913794517517, | |
| "rewards/margins": 4.45734167098999, | |
| "rewards/rejected": -2.914428234100342, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.6521739130434783, | |
| "grad_norm": 0.21621277928352356, | |
| "learning_rate": 1.8375462445083464e-06, | |
| "logits/chosen": -3.5592663288116455, | |
| "logits/rejected": -3.240027666091919, | |
| "logps/chosen": -27.510631561279297, | |
| "logps/rejected": -126.76861572265625, | |
| "loss": 0.0459, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 1.4070671796798706, | |
| "rewards/margins": 4.541848659515381, | |
| "rewards/rejected": -3.1347813606262207, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.7608695652173914, | |
| "grad_norm": 0.2057134360074997, | |
| "learning_rate": 9.844364725834058e-07, | |
| "logits/chosen": -3.556133270263672, | |
| "logits/rejected": -3.2353644371032715, | |
| "logps/chosen": -26.806324005126953, | |
| "logps/rejected": -128.81968688964844, | |
| "loss": 0.056, | |
| "rewards/accuracies": 0.987500011920929, | |
| "rewards/chosen": 1.3792188167572021, | |
| "rewards/margins": 4.967148780822754, | |
| "rewards/rejected": -3.5879299640655518, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.8695652173913043, | |
| "grad_norm": 0.21634213626384735, | |
| "learning_rate": 3.510759825319976e-07, | |
| "logits/chosen": -3.608375072479248, | |
| "logits/rejected": -3.2185797691345215, | |
| "logps/chosen": -26.37796974182129, | |
| "logps/rejected": -136.79074096679688, | |
| "loss": 0.036, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 1.4289178848266602, | |
| "rewards/margins": 4.961454391479492, | |
| "rewards/rejected": -3.532536745071411, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.9782608695652174, | |
| "grad_norm": 0.40187039971351624, | |
| "learning_rate": 2.9298940549128962e-08, | |
| "logits/chosen": -3.6041157245635986, | |
| "logits/rejected": -3.2674224376678467, | |
| "logps/chosen": -27.230480194091797, | |
| "logps/rejected": -134.53451538085938, | |
| "loss": 0.0598, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 1.397599697113037, | |
| "rewards/margins": 4.8428955078125, | |
| "rewards/rejected": -3.4452953338623047, | |
| "step": 45 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 46, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 0.0, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |