| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 306, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 1.4516129032258065e-05, | |
| "logits/chosen": -2.4829204082489014, | |
| "logits/rejected": -2.623030185699463, | |
| "logps/chosen": -55.93037033081055, | |
| "logps/rejected": -33.53595733642578, | |
| "loss": 0.5107, | |
| "rewards/accuracies": 0.6499999761581421, | |
| "rewards/chosen": 0.15422411262989044, | |
| "rewards/margins": 0.5176956057548523, | |
| "rewards/rejected": -0.36347147822380066, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "learning_rate": 2.9563636363636365e-05, | |
| "logits/chosen": -2.385016918182373, | |
| "logits/rejected": -2.5468831062316895, | |
| "logps/chosen": -34.7657585144043, | |
| "logps/rejected": -52.43476104736328, | |
| "loss": 0.0274, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 2.29936146736145, | |
| "rewards/margins": 4.539745807647705, | |
| "rewards/rejected": -2.240384578704834, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.59, | |
| "learning_rate": 2.7381818181818182e-05, | |
| "logits/chosen": -2.2574870586395264, | |
| "logits/rejected": -2.420015811920166, | |
| "logps/chosen": -13.00920295715332, | |
| "logps/rejected": -70.32115936279297, | |
| "loss": 0.0003, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 4.496087074279785, | |
| "rewards/margins": 8.517866134643555, | |
| "rewards/rejected": -4.021780490875244, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.78, | |
| "learning_rate": 2.52e-05, | |
| "logits/chosen": -2.2138588428497314, | |
| "logits/rejected": -2.3903181552886963, | |
| "logps/chosen": -8.080777168273926, | |
| "logps/rejected": -77.32413482666016, | |
| "loss": 0.0001, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 5.011228084564209, | |
| "rewards/margins": 9.755220413208008, | |
| "rewards/rejected": -4.743992805480957, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.98, | |
| "learning_rate": 2.3018181818181816e-05, | |
| "logits/chosen": -2.1865017414093018, | |
| "logits/rejected": -2.3809409141540527, | |
| "logps/chosen": -7.149311065673828, | |
| "logps/rejected": -79.04914093017578, | |
| "loss": 0.0, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 5.135758399963379, | |
| "rewards/margins": 10.058463096618652, | |
| "rewards/rejected": -4.922704696655273, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.18, | |
| "learning_rate": 2.0836363636363637e-05, | |
| "logits/chosen": -2.204026699066162, | |
| "logits/rejected": -2.377389430999756, | |
| "logps/chosen": -6.087798118591309, | |
| "logps/rejected": -80.18804931640625, | |
| "loss": 0.0, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 5.203327655792236, | |
| "rewards/margins": 10.240509986877441, | |
| "rewards/rejected": -5.037181854248047, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.37, | |
| "learning_rate": 1.8654545454545454e-05, | |
| "logits/chosen": -2.190598964691162, | |
| "logits/rejected": -2.3737359046936035, | |
| "logps/chosen": -5.493344306945801, | |
| "logps/rejected": -81.6736831665039, | |
| "loss": 0.0, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 5.198002815246582, | |
| "rewards/margins": 10.370179176330566, | |
| "rewards/rejected": -5.172175884246826, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 1.57, | |
| "learning_rate": 1.647272727272727e-05, | |
| "logits/chosen": -2.198334217071533, | |
| "logits/rejected": -2.3750014305114746, | |
| "logps/chosen": -5.27658224105835, | |
| "logps/rejected": -83.0462417602539, | |
| "loss": 0.0, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 5.275361061096191, | |
| "rewards/margins": 10.579935073852539, | |
| "rewards/rejected": -5.304574012756348, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 1.76, | |
| "learning_rate": 1.4290909090909092e-05, | |
| "logits/chosen": -2.1648800373077393, | |
| "logits/rejected": -2.3531603813171387, | |
| "logps/chosen": -5.335925579071045, | |
| "logps/rejected": -83.67100524902344, | |
| "loss": 0.0, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 5.311135768890381, | |
| "rewards/margins": 10.685991287231445, | |
| "rewards/rejected": -5.374855041503906, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 1.96, | |
| "learning_rate": 1.210909090909091e-05, | |
| "logits/chosen": -2.1698648929595947, | |
| "logits/rejected": -2.350588798522949, | |
| "logps/chosen": -5.037841796875, | |
| "logps/rejected": -84.4847183227539, | |
| "loss": 0.0, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 5.284677982330322, | |
| "rewards/margins": 10.73720932006836, | |
| "rewards/rejected": -5.4525299072265625, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 2.16, | |
| "learning_rate": 9.927272727272728e-06, | |
| "logits/chosen": -2.172761917114258, | |
| "logits/rejected": -2.3548429012298584, | |
| "logps/chosen": -4.5183844566345215, | |
| "logps/rejected": -85.39585876464844, | |
| "loss": 0.0, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 5.35943603515625, | |
| "rewards/margins": 10.907907485961914, | |
| "rewards/rejected": -5.5484724044799805, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 2.35, | |
| "learning_rate": 7.745454545454545e-06, | |
| "logits/chosen": -2.176955223083496, | |
| "logits/rejected": -2.3584964275360107, | |
| "logps/chosen": -4.64106559753418, | |
| "logps/rejected": -85.82803344726562, | |
| "loss": 0.0, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 5.32342004776001, | |
| "rewards/margins": 10.914727210998535, | |
| "rewards/rejected": -5.591307640075684, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 2.55, | |
| "learning_rate": 5.563636363636363e-06, | |
| "logits/chosen": -2.137197494506836, | |
| "logits/rejected": -2.3419229984283447, | |
| "logps/chosen": -4.342270374298096, | |
| "logps/rejected": -85.80111694335938, | |
| "loss": 0.0, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 5.375523567199707, | |
| "rewards/margins": 10.970113754272461, | |
| "rewards/rejected": -5.594590663909912, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 2.75, | |
| "learning_rate": 3.381818181818182e-06, | |
| "logits/chosen": -2.18381404876709, | |
| "logits/rejected": -2.362435817718506, | |
| "logps/chosen": -4.8146867752075195, | |
| "logps/rejected": -86.47946166992188, | |
| "loss": 0.0, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 5.325329303741455, | |
| "rewards/margins": 10.978459358215332, | |
| "rewards/rejected": -5.653130531311035, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 2.94, | |
| "learning_rate": 1.2000000000000002e-06, | |
| "logits/chosen": -2.179290294647217, | |
| "logits/rejected": -2.3513832092285156, | |
| "logps/chosen": -4.198988437652588, | |
| "logps/rejected": -86.9154052734375, | |
| "loss": 0.0, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 5.389545917510986, | |
| "rewards/margins": 11.077142715454102, | |
| "rewards/rejected": -5.687596321105957, | |
| "step": 300 | |
| } | |
| ], | |
| "logging_steps": 20, | |
| "max_steps": 306, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "total_flos": 0.0, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |