File size: 11,700 Bytes
fc5ed99 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 | {
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9964513839602555,
"eval_steps": 500,
"global_step": 162,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"eta": 0.0010000000474974513,
"grad_norm": 22.254964893351787,
"learning_rate": 2.941176470588235e-08,
"logits/chosen": -2.4821925163269043,
"logits/rejected": -2.235710382461548,
"logps/chosen": -280.2124328613281,
"logps/pi_response": -436.6075744628906,
"logps/ref_response": -436.6075744628906,
"logps/rejected": -273.4680480957031,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.06,
"eta": 0.0010000000474974513,
"grad_norm": 23.328818767881323,
"learning_rate": 2.941176470588235e-07,
"logits/chosen": -2.3237369060516357,
"logits/rejected": -2.2622690200805664,
"logps/chosen": -239.0484619140625,
"logps/pi_response": -371.8123474121094,
"logps/ref_response": -370.81756591796875,
"logps/rejected": -253.18133544921875,
"loss": 0.6927,
"rewards/accuracies": 0.4444444477558136,
"rewards/chosen": -0.009806615300476551,
"rewards/margins": 0.001764079206623137,
"rewards/rejected": -0.011570693925023079,
"step": 10
},
{
"epoch": 0.12,
"eta": 0.0010000000474974513,
"grad_norm": 31.176954929712068,
"learning_rate": 4.99472085783721e-07,
"logits/chosen": -2.121244430541992,
"logits/rejected": -2.1339948177337646,
"logps/chosen": -252.5571746826172,
"logps/pi_response": -376.398193359375,
"logps/ref_response": -357.7356872558594,
"logps/rejected": -262.6872863769531,
"loss": 0.6923,
"rewards/accuracies": 0.5076923370361328,
"rewards/chosen": -0.21289017796516418,
"rewards/margins": -0.0055158319883048534,
"rewards/rejected": -0.20737436413764954,
"step": 20
},
{
"epoch": 0.18,
"eta": 0.0010000000474974513,
"grad_norm": 29.374574206663485,
"learning_rate": 4.901488388458247e-07,
"logits/chosen": -2.0692079067230225,
"logits/rejected": -2.033362865447998,
"logps/chosen": -240.89402770996094,
"logps/pi_response": -404.3094787597656,
"logps/ref_response": -375.89013671875,
"logps/rejected": -253.24954223632812,
"loss": 0.6895,
"rewards/accuracies": 0.5038461685180664,
"rewards/chosen": -0.07824164628982544,
"rewards/margins": 0.01707359030842781,
"rewards/rejected": -0.09531523287296295,
"step": 30
},
{
"epoch": 0.25,
"eta": 0.0010000000474974513,
"grad_norm": 30.44936672759969,
"learning_rate": 4.695964991097616e-07,
"logits/chosen": -1.8107295036315918,
"logits/rejected": -2.005711555480957,
"logps/chosen": -303.3603515625,
"logps/pi_response": -455.744873046875,
"logps/ref_response": -391.4946594238281,
"logps/rejected": -326.20098876953125,
"loss": 0.6898,
"rewards/accuracies": 0.5307692289352417,
"rewards/chosen": -0.6260919570922852,
"rewards/margins": 0.034338854253292084,
"rewards/rejected": -0.6604308485984802,
"step": 40
},
{
"epoch": 0.31,
"eta": 0.0010000000474974513,
"grad_norm": 23.599398572955327,
"learning_rate": 4.3877607113930516e-07,
"logits/chosen": -1.907410979270935,
"logits/rejected": -1.855884075164795,
"logps/chosen": -248.3097686767578,
"logps/pi_response": -406.1575012207031,
"logps/ref_response": -367.650390625,
"logps/rejected": -256.7185974121094,
"loss": 0.7017,
"rewards/accuracies": 0.48076921701431274,
"rewards/chosen": -0.12874366343021393,
"rewards/margins": -0.0029275938868522644,
"rewards/rejected": -0.12581607699394226,
"step": 50
},
{
"epoch": 0.37,
"eta": 0.0010000000474974513,
"grad_norm": 23.113382825281022,
"learning_rate": 3.991286838919086e-07,
"logits/chosen": -1.7167693376541138,
"logits/rejected": -1.6973148584365845,
"logps/chosen": -320.3218688964844,
"logps/pi_response": -487.9947204589844,
"logps/ref_response": -405.68280029296875,
"logps/rejected": -322.4259948730469,
"loss": 0.6945,
"rewards/accuracies": 0.5615384578704834,
"rewards/chosen": -0.5926837921142578,
"rewards/margins": 0.038080714643001556,
"rewards/rejected": -0.6307645440101624,
"step": 60
},
{
"epoch": 0.43,
"eta": 0.0010000000474974513,
"grad_norm": 31.784636012816986,
"learning_rate": 3.52508205130354e-07,
"logits/chosen": -1.5467545986175537,
"logits/rejected": -1.5477180480957031,
"logps/chosen": -306.8106994628906,
"logps/pi_response": -480.5911865234375,
"logps/ref_response": -377.5798034667969,
"logps/rejected": -334.5196533203125,
"loss": 0.6801,
"rewards/accuracies": 0.6000000238418579,
"rewards/chosen": -0.787551760673523,
"rewards/margins": 0.06417767703533173,
"rewards/rejected": -0.8517293930053711,
"step": 70
},
{
"epoch": 0.49,
"eta": 0.0010000000474974513,
"grad_norm": 35.2255241905511,
"learning_rate": 3.010945566265912e-07,
"logits/chosen": -1.584012508392334,
"logits/rejected": -1.7565481662750244,
"logps/chosen": -292.3354797363281,
"logps/pi_response": -463.38507080078125,
"logps/ref_response": -351.9272766113281,
"logps/rejected": -315.16680908203125,
"loss": 0.6865,
"rewards/accuracies": 0.5961538553237915,
"rewards/chosen": -0.6284902691841125,
"rewards/margins": 0.07952240109443665,
"rewards/rejected": -0.7080127596855164,
"step": 80
},
{
"epoch": 0.55,
"eta": 0.0010000000474974513,
"grad_norm": 24.755608967697253,
"learning_rate": 2.4729178344249006e-07,
"logits/chosen": -1.6239264011383057,
"logits/rejected": -1.7038928270339966,
"logps/chosen": -333.8011474609375,
"logps/pi_response": -518.841064453125,
"logps/ref_response": -389.2355651855469,
"logps/rejected": -362.0622253417969,
"loss": 0.6905,
"rewards/accuracies": 0.5538461804389954,
"rewards/chosen": -0.9721028804779053,
"rewards/margins": 0.06480876356363297,
"rewards/rejected": -1.0369116067886353,
"step": 90
},
{
"epoch": 0.62,
"eta": 0.0010000000474974513,
"grad_norm": 28.32503077830092,
"learning_rate": 1.9361564345465145e-07,
"logits/chosen": -1.6863784790039062,
"logits/rejected": -1.576540470123291,
"logps/chosen": -324.2234802246094,
"logps/pi_response": -477.94110107421875,
"logps/ref_response": -352.850830078125,
"logps/rejected": -337.5308532714844,
"loss": 0.6817,
"rewards/accuracies": 0.5692307949066162,
"rewards/chosen": -0.95728600025177,
"rewards/margins": 0.05953861400485039,
"rewards/rejected": -1.0168246030807495,
"step": 100
},
{
"epoch": 0.68,
"eta": 0.0010000000474974513,
"grad_norm": 24.75749478420196,
"learning_rate": 1.4257597331216208e-07,
"logits/chosen": -1.5470343828201294,
"logits/rejected": -1.6905183792114258,
"logps/chosen": -339.9206848144531,
"logps/pi_response": -517.6537475585938,
"logps/ref_response": -383.1593322753906,
"logps/rejected": -354.73150634765625,
"loss": 0.6791,
"rewards/accuracies": 0.4923076927661896,
"rewards/chosen": -1.0359457731246948,
"rewards/margins": 0.02877073362469673,
"rewards/rejected": -1.0647164583206177,
"step": 110
},
{
"epoch": 0.74,
"eta": 0.0010000000474974513,
"grad_norm": 27.374335177920894,
"learning_rate": 9.655933126436563e-08,
"logits/chosen": -1.7334721088409424,
"logits/rejected": -1.6775918006896973,
"logps/chosen": -323.307861328125,
"logps/pi_response": -500.0389709472656,
"logps/ref_response": -373.8336181640625,
"logps/rejected": -337.35687255859375,
"loss": 0.6743,
"rewards/accuracies": 0.5730769038200378,
"rewards/chosen": -0.884036660194397,
"rewards/margins": 0.05840235576033592,
"rewards/rejected": -0.9424390196800232,
"step": 120
},
{
"epoch": 0.8,
"eta": 0.0010000000474974513,
"grad_norm": 29.06627362451846,
"learning_rate": 5.771740434959277e-08,
"logits/chosen": -1.7559294700622559,
"logits/rejected": -1.656353235244751,
"logps/chosen": -322.9090270996094,
"logps/pi_response": -490.60504150390625,
"logps/ref_response": -358.2728271484375,
"logps/rejected": -327.773681640625,
"loss": 0.677,
"rewards/accuracies": 0.5884615182876587,
"rewards/chosen": -0.8856447339057922,
"rewards/margins": 0.060253795236349106,
"rewards/rejected": -0.9458985328674316,
"step": 130
},
{
"epoch": 0.86,
"eta": 0.0010000000474974513,
"grad_norm": 29.276601449078793,
"learning_rate": 2.7866397900677185e-08,
"logits/chosen": -1.5905346870422363,
"logits/rejected": -1.5064502954483032,
"logps/chosen": -327.3086242675781,
"logps/pi_response": -507.3574523925781,
"logps/ref_response": -365.5458984375,
"logps/rejected": -337.4426574707031,
"loss": 0.6696,
"rewards/accuracies": 0.6307692527770996,
"rewards/chosen": -0.9561350345611572,
"rewards/margins": 0.08353108912706375,
"rewards/rejected": -1.0396660566329956,
"step": 140
},
{
"epoch": 0.92,
"eta": 0.0010000000474974513,
"grad_norm": 30.496204004580633,
"learning_rate": 8.402111802159412e-09,
"logits/chosen": -1.74466872215271,
"logits/rejected": -1.6440544128417969,
"logps/chosen": -315.1639709472656,
"logps/pi_response": -511.9558410644531,
"logps/ref_response": -375.8587951660156,
"logps/rejected": -326.924560546875,
"loss": 0.6791,
"rewards/accuracies": 0.5846154093742371,
"rewards/chosen": -0.8547829389572144,
"rewards/margins": 0.06981422752141953,
"rewards/rejected": -0.9245970845222473,
"step": 150
},
{
"epoch": 0.98,
"eta": 0.0010000000474974513,
"grad_norm": 29.5797991936398,
"learning_rate": 2.3467443900582197e-10,
"logits/chosen": -1.836174726486206,
"logits/rejected": -1.7468369007110596,
"logps/chosen": -307.39251708984375,
"logps/pi_response": -489.5311279296875,
"logps/ref_response": -352.3800354003906,
"logps/rejected": -338.456787109375,
"loss": 0.6738,
"rewards/accuracies": 0.5846154093742371,
"rewards/chosen": -0.8461953401565552,
"rewards/margins": 0.09575623273849487,
"rewards/rejected": -0.9419515132904053,
"step": 160
},
{
"epoch": 1.0,
"step": 162,
"total_flos": 0.0,
"train_loss": 0.6842105340810469,
"train_runtime": 43334.2513,
"train_samples_per_second": 0.488,
"train_steps_per_second": 0.004
}
],
"logging_steps": 10,
"max_steps": 162,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 0.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}
|