| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "eval_steps": 500, | |
| "global_step": 252, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.01984126984126984, | |
| "grad_norm": 18.047539550790006, | |
| "learning_rate": 5.7692307692307695e-08, | |
| "logits/chosen": -1.703125, | |
| "logits/rejected": -1.6953125, | |
| "logps/chosen": -0.376953125, | |
| "logps/rejected": -0.427734375, | |
| "loss": 2.4555, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -3.765625, | |
| "rewards/margins": 0.50390625, | |
| "rewards/rejected": -4.28125, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.03968253968253968, | |
| "grad_norm": 17.185434459136403, | |
| "learning_rate": 1.1538461538461539e-07, | |
| "logits/chosen": -1.2578125, | |
| "logits/rejected": -1.2578125, | |
| "logps/chosen": -0.32421875, | |
| "logps/rejected": -0.390625, | |
| "loss": 2.4857, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -3.234375, | |
| "rewards/margins": 0.6640625, | |
| "rewards/rejected": -3.90625, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.05952380952380952, | |
| "grad_norm": 17.039133125856026, | |
| "learning_rate": 1.7307692307692305e-07, | |
| "logits/chosen": -1.5078125, | |
| "logits/rejected": -1.5078125, | |
| "logps/chosen": -0.279296875, | |
| "logps/rejected": -0.34765625, | |
| "loss": 2.402, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -2.78125, | |
| "rewards/margins": 0.6796875, | |
| "rewards/rejected": -3.46875, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.07936507936507936, | |
| "grad_norm": 17.10226682999204, | |
| "learning_rate": 2.3076923076923078e-07, | |
| "logits/chosen": -1.4765625, | |
| "logits/rejected": -1.4765625, | |
| "logps/chosen": -0.310546875, | |
| "logps/rejected": -0.39453125, | |
| "loss": 2.4703, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -3.09375, | |
| "rewards/margins": 0.84375, | |
| "rewards/rejected": -3.9375, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.0992063492063492, | |
| "grad_norm": 14.47393175785549, | |
| "learning_rate": 2.8846153846153846e-07, | |
| "logits/chosen": -1.78125, | |
| "logits/rejected": -1.78125, | |
| "logps/chosen": -0.34375, | |
| "logps/rejected": -0.40234375, | |
| "loss": 2.3167, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -3.421875, | |
| "rewards/margins": 0.5859375, | |
| "rewards/rejected": -4.0, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.11904761904761904, | |
| "grad_norm": 16.9612236979206, | |
| "learning_rate": 2.997681792980754e-07, | |
| "logits/chosen": -1.3359375, | |
| "logits/rejected": -1.3359375, | |
| "logps/chosen": -0.3046875, | |
| "logps/rejected": -0.357421875, | |
| "loss": 2.3476, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -3.03125, | |
| "rewards/margins": 0.54296875, | |
| "rewards/rejected": -3.578125, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.1388888888888889, | |
| "grad_norm": 16.48721079930822, | |
| "learning_rate": 2.9882763562415516e-07, | |
| "logits/chosen": -1.5078125, | |
| "logits/rejected": -1.5078125, | |
| "logps/chosen": -0.408203125, | |
| "logps/rejected": -0.482421875, | |
| "loss": 2.391, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -4.0625, | |
| "rewards/margins": 0.75390625, | |
| "rewards/rejected": -4.8125, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.15873015873015872, | |
| "grad_norm": 21.37387348387286, | |
| "learning_rate": 2.9716841847369104e-07, | |
| "logits/chosen": -1.5, | |
| "logits/rejected": -1.5, | |
| "logps/chosen": -0.2890625, | |
| "logps/rejected": -0.365234375, | |
| "loss": 2.3684, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -2.890625, | |
| "rewards/margins": 0.7578125, | |
| "rewards/rejected": -3.640625, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.17857142857142858, | |
| "grad_norm": 17.871422332796683, | |
| "learning_rate": 2.94798540037113e-07, | |
| "logits/chosen": -1.546875, | |
| "logits/rejected": -1.546875, | |
| "logps/chosen": -0.287109375, | |
| "logps/rejected": -0.3515625, | |
| "loss": 2.3699, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -2.859375, | |
| "rewards/margins": 0.640625, | |
| "rewards/rejected": -3.5, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.1984126984126984, | |
| "grad_norm": 29.190246701669263, | |
| "learning_rate": 2.917294442157359e-07, | |
| "logits/chosen": -1.3828125, | |
| "logits/rejected": -1.3828125, | |
| "logps/chosen": -0.265625, | |
| "logps/rejected": -0.34375, | |
| "loss": 2.3773, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -2.65625, | |
| "rewards/margins": 0.78125, | |
| "rewards/rejected": -3.4375, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.21825396825396826, | |
| "grad_norm": 20.420712184187625, | |
| "learning_rate": 2.8797595136032675e-07, | |
| "logits/chosen": -1.4765625, | |
| "logits/rejected": -1.4765625, | |
| "logps/chosen": -0.31640625, | |
| "logps/rejected": -0.39453125, | |
| "loss": 2.3586, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -3.171875, | |
| "rewards/margins": 0.7734375, | |
| "rewards/rejected": -3.953125, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.23809523809523808, | |
| "grad_norm": 17.704268736560135, | |
| "learning_rate": 2.8355618670514256e-07, | |
| "logits/chosen": -1.2890625, | |
| "logits/rejected": -1.2890625, | |
| "logps/chosen": -0.27734375, | |
| "logps/rejected": -0.328125, | |
| "loss": 2.3299, | |
| "rewards/accuracies": 0.800000011920929, | |
| "rewards/chosen": -2.765625, | |
| "rewards/margins": 0.515625, | |
| "rewards/rejected": -3.28125, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.25793650793650796, | |
| "grad_norm": 15.278992243473061, | |
| "learning_rate": 2.784914928430218e-07, | |
| "logits/chosen": -1.40625, | |
| "logits/rejected": -1.40625, | |
| "logps/chosen": -0.294921875, | |
| "logps/rejected": -0.37109375, | |
| "loss": 2.3791, | |
| "rewards/accuracies": 0.800000011920929, | |
| "rewards/chosen": -2.953125, | |
| "rewards/margins": 0.7578125, | |
| "rewards/rejected": -3.703125, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.2777777777777778, | |
| "grad_norm": 21.138997719744832, | |
| "learning_rate": 2.728063266641801e-07, | |
| "logits/chosen": -1.6484375, | |
| "logits/rejected": -1.6484375, | |
| "logps/chosen": -0.26953125, | |
| "logps/rejected": -0.361328125, | |
| "loss": 2.3719, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -2.703125, | |
| "rewards/margins": 0.9140625, | |
| "rewards/rejected": -3.625, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.2976190476190476, | |
| "grad_norm": 26.097345440204215, | |
| "learning_rate": 2.665281412563814e-07, | |
| "logits/chosen": -1.875, | |
| "logits/rejected": -1.875, | |
| "logps/chosen": -0.26171875, | |
| "logps/rejected": -0.37109375, | |
| "loss": 2.318, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -2.625, | |
| "rewards/margins": 1.0859375, | |
| "rewards/rejected": -3.703125, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.31746031746031744, | |
| "grad_norm": 21.061149357230455, | |
| "learning_rate": 2.596872533367763e-07, | |
| "logits/chosen": -1.5, | |
| "logits/rejected": -1.5, | |
| "logps/chosen": -0.28125, | |
| "logps/rejected": -0.373046875, | |
| "loss": 2.2812, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -2.828125, | |
| "rewards/margins": 0.89453125, | |
| "rewards/rejected": -3.71875, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.3373015873015873, | |
| "grad_norm": 34.0182184293618, | |
| "learning_rate": 2.5231669685556633e-07, | |
| "logits/chosen": -1.1875, | |
| "logits/rejected": -1.1875, | |
| "logps/chosen": -0.27734375, | |
| "logps/rejected": -0.39453125, | |
| "loss": 2.166, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -2.78125, | |
| "rewards/margins": 1.15625, | |
| "rewards/rejected": -3.9375, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.35714285714285715, | |
| "grad_norm": 20.821123274301424, | |
| "learning_rate": 2.444520634784271e-07, | |
| "logits/chosen": -1.3359375, | |
| "logits/rejected": -1.3359375, | |
| "logps/chosen": -0.267578125, | |
| "logps/rejected": -0.361328125, | |
| "loss": 2.1671, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -2.671875, | |
| "rewards/margins": 0.9453125, | |
| "rewards/rejected": -3.625, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.376984126984127, | |
| "grad_norm": 22.90486064935448, | |
| "learning_rate": 2.361313307179837e-07, | |
| "logits/chosen": -1.7265625, | |
| "logits/rejected": -1.7265625, | |
| "logps/chosen": -0.279296875, | |
| "logps/rejected": -0.353515625, | |
| "loss": 2.0894, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -2.78125, | |
| "rewards/margins": 0.74609375, | |
| "rewards/rejected": -3.53125, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.3968253968253968, | |
| "grad_norm": 27.68913080972186, | |
| "learning_rate": 2.2739467854427513e-07, | |
| "logits/chosen": -1.65625, | |
| "logits/rejected": -1.65625, | |
| "logps/chosen": -0.353515625, | |
| "logps/rejected": -0.46875, | |
| "loss": 2.1033, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -3.546875, | |
| "rewards/margins": 1.1484375, | |
| "rewards/rejected": -4.6875, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.4166666666666667, | |
| "grad_norm": 29.979641972346496, | |
| "learning_rate": 2.1828429535977582e-07, | |
| "logits/chosen": -1.6796875, | |
| "logits/rejected": -1.6796875, | |
| "logps/chosen": -0.3203125, | |
| "logps/rejected": -0.4296875, | |
| "loss": 2.0674, | |
| "rewards/accuracies": 0.800000011920929, | |
| "rewards/chosen": -3.1875, | |
| "rewards/margins": 1.09375, | |
| "rewards/rejected": -4.28125, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.4365079365079365, | |
| "grad_norm": 44.14626475514875, | |
| "learning_rate": 2.0884417427590215e-07, | |
| "logits/chosen": -1.65625, | |
| "logits/rejected": -1.65625, | |
| "logps/chosen": -0.34375, | |
| "logps/rejected": -0.46875, | |
| "loss": 2.0088, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -3.4375, | |
| "rewards/margins": 1.2578125, | |
| "rewards/rejected": -4.6875, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.45634920634920634, | |
| "grad_norm": 33.80229631943301, | |
| "learning_rate": 1.9911990067476334e-07, | |
| "logits/chosen": -1.15625, | |
| "logits/rejected": -1.15625, | |
| "logps/chosen": -0.23828125, | |
| "logps/rejected": -0.34375, | |
| "loss": 1.9458, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -2.390625, | |
| "rewards/margins": 1.046875, | |
| "rewards/rejected": -3.4375, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.47619047619047616, | |
| "grad_norm": 39.86801120602329, | |
| "learning_rate": 1.8915843208199963e-07, | |
| "logits/chosen": -1.625, | |
| "logits/rejected": -1.625, | |
| "logps/chosen": -0.4296875, | |
| "logps/rejected": -0.546875, | |
| "loss": 1.9224, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -4.3125, | |
| "rewards/margins": 1.171875, | |
| "rewards/rejected": -5.46875, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.49603174603174605, | |
| "grad_norm": 33.949958184137344, | |
| "learning_rate": 1.7900787141367918e-07, | |
| "logits/chosen": -1.453125, | |
| "logits/rejected": -1.453125, | |
| "logps/chosen": -0.357421875, | |
| "logps/rejected": -0.4765625, | |
| "loss": 2.0021, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -3.578125, | |
| "rewards/margins": 1.1875, | |
| "rewards/rejected": -4.78125, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.5158730158730159, | |
| "grad_norm": 37.35189108229666, | |
| "learning_rate": 1.687172346922213e-07, | |
| "logits/chosen": -1.546875, | |
| "logits/rejected": -1.546875, | |
| "logps/chosen": -0.419921875, | |
| "logps/rejected": -0.546875, | |
| "loss": 1.7533, | |
| "rewards/accuracies": 0.800000011920929, | |
| "rewards/chosen": -4.1875, | |
| "rewards/margins": 1.2578125, | |
| "rewards/rejected": -5.46875, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.5357142857142857, | |
| "grad_norm": 33.938784524888526, | |
| "learning_rate": 1.5833621435302245e-07, | |
| "logits/chosen": -1.734375, | |
| "logits/rejected": -1.734375, | |
| "logps/chosen": -0.421875, | |
| "logps/rejected": -0.63671875, | |
| "loss": 1.716, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -4.21875, | |
| "rewards/margins": 2.15625, | |
| "rewards/rejected": -6.34375, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.5555555555555556, | |
| "grad_norm": 35.12217870275606, | |
| "learning_rate": 1.4791493928475273e-07, | |
| "logits/chosen": -1.5234375, | |
| "logits/rejected": -1.5234375, | |
| "logps/chosen": -0.30078125, | |
| "logps/rejected": -0.578125, | |
| "loss": 1.7479, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -3.015625, | |
| "rewards/margins": 2.765625, | |
| "rewards/rejected": -5.78125, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.5753968253968254, | |
| "grad_norm": 34.0474825303708, | |
| "learning_rate": 1.3750373276206429e-07, | |
| "logits/chosen": -1.59375, | |
| "logits/rejected": -1.59375, | |
| "logps/chosen": -0.404296875, | |
| "logps/rejected": -0.60546875, | |
| "loss": 1.5267, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -4.03125, | |
| "rewards/margins": 2.015625, | |
| "rewards/rejected": -6.0625, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.5952380952380952, | |
| "grad_norm": 40.81441971424475, | |
| "learning_rate": 1.2715286943962924e-07, | |
| "logits/chosen": -1.484375, | |
| "logits/rejected": -1.484375, | |
| "logps/chosen": -0.36328125, | |
| "logps/rejected": -0.5546875, | |
| "loss": 1.4843, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -3.625, | |
| "rewards/margins": 1.9375, | |
| "rewards/rejected": -5.5625, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.6150793650793651, | |
| "grad_norm": 34.79982376085805, | |
| "learning_rate": 1.1691233258095889e-07, | |
| "logits/chosen": -1.484375, | |
| "logits/rejected": -1.484375, | |
| "logps/chosen": -0.306640625, | |
| "logps/rejected": -0.51953125, | |
| "loss": 1.3835, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -3.0625, | |
| "rewards/margins": 2.125, | |
| "rewards/rejected": -5.1875, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 0.6349206349206349, | |
| "grad_norm": 42.42547672022662, | |
| "learning_rate": 1.0683157269432096e-07, | |
| "logits/chosen": -1.8046875, | |
| "logits/rejected": -1.8046875, | |
| "logps/chosen": -0.408203125, | |
| "logps/rejected": -0.5859375, | |
| "loss": 1.4352, | |
| "rewards/accuracies": 0.800000011920929, | |
| "rewards/chosen": -4.0625, | |
| "rewards/margins": 1.796875, | |
| "rewards/rejected": -5.875, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.6547619047619048, | |
| "grad_norm": 31.589553355930356, | |
| "learning_rate": 9.695926874127765e-08, | |
| "logits/chosen": -1.609375, | |
| "logits/rejected": -1.609375, | |
| "logps/chosen": -0.3125, | |
| "logps/rejected": -0.5546875, | |
| "loss": 1.2363, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -3.109375, | |
| "rewards/margins": 2.4375, | |
| "rewards/rejected": -5.5625, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 0.6746031746031746, | |
| "grad_norm": 49.83108895416612, | |
| "learning_rate": 8.734309307094381e-08, | |
| "logits/chosen": -1.515625, | |
| "logits/rejected": -1.515625, | |
| "logps/chosen": -0.255859375, | |
| "logps/rejected": -0.39453125, | |
| "loss": 1.7128, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -2.5625, | |
| "rewards/margins": 1.3984375, | |
| "rewards/rejected": -3.953125, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.6944444444444444, | |
| "grad_norm": 53.80975894133238, | |
| "learning_rate": 7.802948121507461e-08, | |
| "logits/chosen": -1.7265625, | |
| "logits/rejected": -1.7265625, | |
| "logps/chosen": -0.33203125, | |
| "logps/rejected": -0.52734375, | |
| "loss": 1.7579, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -3.3125, | |
| "rewards/margins": 1.953125, | |
| "rewards/rejected": -5.28125, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.7142857142857143, | |
| "grad_norm": 26.370363764214527, | |
| "learning_rate": 6.906340765561734e-08, | |
| "logits/chosen": -1.265625, | |
| "logits/rejected": -1.265625, | |
| "logps/chosen": -0.42578125, | |
| "logps/rejected": -0.69140625, | |
| "loss": 1.1591, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -4.25, | |
| "rewards/margins": 2.65625, | |
| "rewards/rejected": -6.90625, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.7341269841269841, | |
| "grad_norm": 36.18162633880284, | |
| "learning_rate": 6.048816864752422e-08, | |
| "logits/chosen": -1.609375, | |
| "logits/rejected": -1.609375, | |
| "logps/chosen": -0.421875, | |
| "logps/rejected": -0.7421875, | |
| "loss": 1.2978, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -4.21875, | |
| "rewards/margins": 3.1875, | |
| "rewards/rejected": -7.40625, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 0.753968253968254, | |
| "grad_norm": 42.731435648770926, | |
| "learning_rate": 5.2345173145552125e-08, | |
| "logits/chosen": -1.78125, | |
| "logits/rejected": -1.78125, | |
| "logps/chosen": -0.32421875, | |
| "logps/rejected": -0.55859375, | |
| "loss": 1.5185, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -3.234375, | |
| "rewards/margins": 2.34375, | |
| "rewards/rejected": -5.5625, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.7738095238095238, | |
| "grad_norm": 47.99973669165908, | |
| "learning_rate": 4.467374284464271e-08, | |
| "logits/chosen": -1.6171875, | |
| "logits/rejected": -1.6171875, | |
| "logps/chosen": -0.35546875, | |
| "logps/rejected": -0.62890625, | |
| "loss": 1.2557, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -3.546875, | |
| "rewards/margins": 2.75, | |
| "rewards/rejected": -6.28125, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 0.7936507936507936, | |
| "grad_norm": 31.799179942413893, | |
| "learning_rate": 3.751092229946681e-08, | |
| "logits/chosen": -1.7421875, | |
| "logits/rejected": -1.7421875, | |
| "logps/chosen": -0.37890625, | |
| "logps/rejected": -0.578125, | |
| "loss": 1.3694, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -3.78125, | |
| "rewards/margins": 1.984375, | |
| "rewards/rejected": -5.78125, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.8134920634920635, | |
| "grad_norm": 32.210040778412214, | |
| "learning_rate": 3.0891300040047544e-08, | |
| "logits/chosen": -1.390625, | |
| "logits/rejected": -1.390625, | |
| "logps/chosen": -0.423828125, | |
| "logps/rejected": -0.68359375, | |
| "loss": 1.3582, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -4.25, | |
| "rewards/margins": 2.578125, | |
| "rewards/rejected": -6.8125, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 0.8333333333333334, | |
| "grad_norm": 41.74711583345937, | |
| "learning_rate": 2.4846841547275915e-08, | |
| "logits/chosen": -1.1640625, | |
| "logits/rejected": -1.1640625, | |
| "logps/chosen": -0.259765625, | |
| "logps/rejected": -0.396484375, | |
| "loss": 1.3447, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -2.59375, | |
| "rewards/margins": 1.3671875, | |
| "rewards/rejected": -3.96875, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.8531746031746031, | |
| "grad_norm": 46.7470602788351, | |
| "learning_rate": 1.9406734894862847e-08, | |
| "logits/chosen": -1.1875, | |
| "logits/rejected": -1.1875, | |
| "logps/chosen": -0.412109375, | |
| "logps/rejected": -0.71484375, | |
| "loss": 1.2536, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -4.125, | |
| "rewards/margins": 3.03125, | |
| "rewards/rejected": -7.15625, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 0.873015873015873, | |
| "grad_norm": 60.766181922921575, | |
| "learning_rate": 1.459724980310767e-08, | |
| "logits/chosen": -1.4765625, | |
| "logits/rejected": -1.484375, | |
| "logps/chosen": -0.34765625, | |
| "logps/rejected": -0.625, | |
| "loss": 1.2171, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -3.46875, | |
| "rewards/margins": 2.8125, | |
| "rewards/rejected": -6.28125, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.8928571428571429, | |
| "grad_norm": 31.72970074806701, | |
| "learning_rate": 1.0441610785097471e-08, | |
| "logits/chosen": -1.3125, | |
| "logits/rejected": -1.3125, | |
| "logps/chosen": -0.34765625, | |
| "logps/rejected": -0.453125, | |
| "loss": 1.2928, | |
| "rewards/accuracies": 0.800000011920929, | |
| "rewards/chosen": -3.46875, | |
| "rewards/margins": 1.0625, | |
| "rewards/rejected": -4.53125, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 0.9126984126984127, | |
| "grad_norm": 35.90988337509379, | |
| "learning_rate": 6.959884997901705e-09, | |
| "logits/chosen": -1.203125, | |
| "logits/rejected": -1.203125, | |
| "logps/chosen": -0.2451171875, | |
| "logps/rejected": -0.484375, | |
| "loss": 1.2009, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -2.453125, | |
| "rewards/margins": 2.390625, | |
| "rewards/rejected": -4.84375, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.9325396825396826, | |
| "grad_norm": 39.759595151629654, | |
| "learning_rate": 4.168885340316719e-09, | |
| "logits/chosen": -1.4296875, | |
| "logits/rejected": -1.4296875, | |
| "logps/chosen": -0.4453125, | |
| "logps/rejected": -0.5390625, | |
| "loss": 1.4925, | |
| "rewards/accuracies": 0.800000011920929, | |
| "rewards/chosen": -4.4375, | |
| "rewards/margins": 0.9296875, | |
| "rewards/rejected": -5.375, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 0.9523809523809523, | |
| "grad_norm": 45.2835359838723, | |
| "learning_rate": 2.0820892650920686e-09, | |
| "logits/chosen": -1.40625, | |
| "logits/rejected": -1.40625, | |
| "logps/chosen": -0.4296875, | |
| "logps/rejected": -0.8671875, | |
| "loss": 1.0995, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -4.3125, | |
| "rewards/margins": 4.375, | |
| "rewards/rejected": -8.6875, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.9722222222222222, | |
| "grad_norm": 38.0802575489829, | |
| "learning_rate": 7.095736976853894e-10, | |
| "logits/chosen": -1.2578125, | |
| "logits/rejected": -1.2578125, | |
| "logps/chosen": -0.48046875, | |
| "logps/rejected": -0.890625, | |
| "loss": 1.3758, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -4.78125, | |
| "rewards/margins": 4.125, | |
| "rewards/rejected": -8.875, | |
| "step": 245 | |
| }, | |
| { | |
| "epoch": 0.9920634920634921, | |
| "grad_norm": 54.22110017894445, | |
| "learning_rate": 5.796637581689245e-11, | |
| "logits/chosen": -1.484375, | |
| "logits/rejected": -1.484375, | |
| "logps/chosen": -0.44921875, | |
| "logps/rejected": -0.6015625, | |
| "loss": 1.466, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -4.5, | |
| "rewards/margins": 1.515625, | |
| "rewards/rejected": -6.0, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "step": 252, | |
| "total_flos": 0.0, | |
| "train_loss": 1.8338732454511855, | |
| "train_runtime": 6099.2148, | |
| "train_samples_per_second": 0.33, | |
| "train_steps_per_second": 0.041 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 252, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 0.0, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |