| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.986666666666667, | |
| "eval_steps": 500, | |
| "global_step": 84, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.35555555555555557, | |
| "grad_norm": 4.364832401275635, | |
| "learning_rate": 4.997807075247147e-06, | |
| "logits/chosen": -2.3599021434783936, | |
| "logits/rejected": -2.3986287117004395, | |
| "logps/chosen": -1.2810232639312744, | |
| "logps/rejected": -1.6647472381591797, | |
| "loss": 1.334, | |
| "odds_ratio_loss": 11.882925987243652, | |
| "rewards/accuracies": 0.7875000238418579, | |
| "rewards/chosen": -0.12810233235359192, | |
| "rewards/margins": 0.03837240859866142, | |
| "rewards/rejected": -0.16647472977638245, | |
| "sft_loss": 0.14570708572864532, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.7111111111111111, | |
| "grad_norm": 3.797199010848999, | |
| "learning_rate": 4.7392794005985324e-06, | |
| "logits/chosen": -2.176560878753662, | |
| "logits/rejected": -2.2147622108459473, | |
| "logps/chosen": -1.1564085483551025, | |
| "logps/rejected": -1.6085771322250366, | |
| "loss": 1.2054, | |
| "odds_ratio_loss": 10.778497695922852, | |
| "rewards/accuracies": 0.8062499761581421, | |
| "rewards/chosen": -0.11564085632562637, | |
| "rewards/margins": 0.04521685466170311, | |
| "rewards/rejected": -0.160857692360878, | |
| "sft_loss": 0.1275218427181244, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 1.0666666666666667, | |
| "grad_norm": 3.668826103210449, | |
| "learning_rate": 4.093559974371725e-06, | |
| "logits/chosen": -2.3584961891174316, | |
| "logits/rejected": -2.3714914321899414, | |
| "logps/chosen": -1.0279743671417236, | |
| "logps/rejected": -1.439477562904358, | |
| "loss": 1.0773, | |
| "odds_ratio_loss": 9.591609001159668, | |
| "rewards/accuracies": 0.8062499761581421, | |
| "rewards/chosen": -0.10279743373394012, | |
| "rewards/margins": 0.041150324046611786, | |
| "rewards/rejected": -0.1439477652311325, | |
| "sft_loss": 0.11812162399291992, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 1.4222222222222223, | |
| "grad_norm": 2.7432360649108887, | |
| "learning_rate": 3.1722995515381644e-06, | |
| "logits/chosen": -2.2854158878326416, | |
| "logits/rejected": -2.3222243785858154, | |
| "logps/chosen": -0.8480159640312195, | |
| "logps/rejected": -1.3128085136413574, | |
| "loss": 0.8927, | |
| "odds_ratio_loss": 8.068161010742188, | |
| "rewards/accuracies": 0.8187500238418579, | |
| "rewards/chosen": -0.08480159938335419, | |
| "rewards/margins": 0.0464792437851429, | |
| "rewards/rejected": -0.1312808394432068, | |
| "sft_loss": 0.08589236438274384, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 1.7777777777777777, | |
| "grad_norm": 2.6973206996917725, | |
| "learning_rate": 2.134792428593971e-06, | |
| "logits/chosen": -2.3095250129699707, | |
| "logits/rejected": -2.336907386779785, | |
| "logps/chosen": -0.6922627687454224, | |
| "logps/rejected": -1.121113896369934, | |
| "loss": 0.7399, | |
| "odds_ratio_loss": 6.732758522033691, | |
| "rewards/accuracies": 0.7875000238418579, | |
| "rewards/chosen": -0.06922627240419388, | |
| "rewards/margins": 0.042885102331638336, | |
| "rewards/rejected": -0.11211137473583221, | |
| "sft_loss": 0.06660701334476471, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 2.1333333333333333, | |
| "grad_norm": 1.8098381757736206, | |
| "learning_rate": 1.160433012552508e-06, | |
| "logits/chosen": -2.3899528980255127, | |
| "logits/rejected": -2.4369609355926514, | |
| "logps/chosen": -0.5784354209899902, | |
| "logps/rejected": -1.0445263385772705, | |
| "loss": 0.63, | |
| "odds_ratio_loss": 5.705307960510254, | |
| "rewards/accuracies": 0.7875000238418579, | |
| "rewards/chosen": -0.05784354358911514, | |
| "rewards/margins": 0.04660910367965698, | |
| "rewards/rejected": -0.10445265471935272, | |
| "sft_loss": 0.05942065268754959, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 2.488888888888889, | |
| "grad_norm": 2.6440157890319824, | |
| "learning_rate": 4.1769689822475147e-07, | |
| "logits/chosen": -2.3521196842193604, | |
| "logits/rejected": -2.39290189743042, | |
| "logps/chosen": -0.5662254095077515, | |
| "logps/rejected": -1.0197746753692627, | |
| "loss": 0.6132, | |
| "odds_ratio_loss": 5.592169761657715, | |
| "rewards/accuracies": 0.8062499761581421, | |
| "rewards/chosen": -0.056622546166181564, | |
| "rewards/margins": 0.045354925096035004, | |
| "rewards/rejected": -0.10197745263576508, | |
| "sft_loss": 0.05396850034594536, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 2.8444444444444446, | |
| "grad_norm": 2.6655380725860596, | |
| "learning_rate": 3.5009907323737826e-08, | |
| "logits/chosen": -2.3952314853668213, | |
| "logits/rejected": -2.4219796657562256, | |
| "logps/chosen": -0.5315676927566528, | |
| "logps/rejected": -0.9738686680793762, | |
| "loss": 0.5745, | |
| "odds_ratio_loss": 5.228327751159668, | |
| "rewards/accuracies": 0.8062499761581421, | |
| "rewards/chosen": -0.053156763315200806, | |
| "rewards/margins": 0.044230107218027115, | |
| "rewards/rejected": -0.09738686680793762, | |
| "sft_loss": 0.05164428427815437, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 2.986666666666667, | |
| "step": 84, | |
| "total_flos": 2.2783332409344e+16, | |
| "train_loss": 0.8738818849836077, | |
| "train_runtime": 261.5721, | |
| "train_samples_per_second": 5.161, | |
| "train_steps_per_second": 0.321 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 84, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2.2783332409344e+16, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |