File size: 11,331 Bytes
dfafc69 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 | {
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 6.896551724137931,
"eval_steps": 500,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.3448275862068966,
"grad_norm": 3.9107234477996826,
"learning_rate": 8.620689655172415e-07,
"logits/chosen": -2.363854169845581,
"logits/rejected": -2.338671922683716,
"logps/chosen": -76.8819351196289,
"logps/rejected": -75.86869049072266,
"loss": 0.6942,
"rewards/accuracies": 0.375,
"rewards/chosen": -0.003996999468654394,
"rewards/margins": -0.004145228303968906,
"rewards/rejected": 0.00014822949015069753,
"step": 10
},
{
"epoch": 0.6896551724137931,
"grad_norm": 4.328212738037109,
"learning_rate": 1.724137931034483e-06,
"logits/chosen": -2.3617663383483887,
"logits/rejected": -2.3477540016174316,
"logps/chosen": -93.76543426513672,
"logps/rejected": -75.16656494140625,
"loss": 0.6931,
"rewards/accuracies": 0.48750001192092896,
"rewards/chosen": 0.002653368515893817,
"rewards/margins": 0.006352785974740982,
"rewards/rejected": -0.003699416993185878,
"step": 20
},
{
"epoch": 1.0344827586206897,
"grad_norm": 3.473827838897705,
"learning_rate": 2.5862068965517246e-06,
"logits/chosen": -2.3354058265686035,
"logits/rejected": -2.3330740928649902,
"logps/chosen": -138.9960479736328,
"logps/rejected": -67.00438690185547,
"loss": 0.6927,
"rewards/accuracies": 0.44999998807907104,
"rewards/chosen": 0.01745142787694931,
"rewards/margins": 0.013760591857135296,
"rewards/rejected": 0.0036908381152898073,
"step": 30
},
{
"epoch": 1.3793103448275863,
"grad_norm": 3.9458820819854736,
"learning_rate": 3.448275862068966e-06,
"logits/chosen": -2.3478751182556152,
"logits/rejected": -2.3700289726257324,
"logps/chosen": -75.59983825683594,
"logps/rejected": -67.20357513427734,
"loss": 0.69,
"rewards/accuracies": 0.5,
"rewards/chosen": 0.00024204826331697404,
"rewards/margins": 0.005866119172424078,
"rewards/rejected": -0.005624071694910526,
"step": 40
},
{
"epoch": 1.7241379310344827,
"grad_norm": 4.299890041351318,
"learning_rate": 4.310344827586207e-06,
"logits/chosen": -2.338815212249756,
"logits/rejected": -2.3532769680023193,
"logps/chosen": -72.74342346191406,
"logps/rejected": -87.63409423828125,
"loss": 0.6899,
"rewards/accuracies": 0.574999988079071,
"rewards/chosen": -0.003249581903219223,
"rewards/margins": 0.006888723932206631,
"rewards/rejected": -0.010138307698071003,
"step": 50
},
{
"epoch": 2.0689655172413794,
"grad_norm": 4.957555294036865,
"learning_rate": 4.999818897894192e-06,
"logits/chosen": -2.344572067260742,
"logits/rejected": -2.345888614654541,
"logps/chosen": -80.16046142578125,
"logps/rejected": -73.05986022949219,
"loss": 0.6802,
"rewards/accuracies": 0.637499988079071,
"rewards/chosen": -0.013288321904838085,
"rewards/margins": 0.020968889817595482,
"rewards/rejected": -0.03425721079111099,
"step": 60
},
{
"epoch": 2.413793103448276,
"grad_norm": 4.212078094482422,
"learning_rate": 4.9934830787948756e-06,
"logits/chosen": -2.3330090045928955,
"logits/rejected": -2.328575372695923,
"logps/chosen": -73.63365173339844,
"logps/rejected": -74.45356750488281,
"loss": 0.6693,
"rewards/accuracies": 0.7124999761581421,
"rewards/chosen": -0.009874681942164898,
"rewards/margins": 0.04785541445016861,
"rewards/rejected": -0.05773010104894638,
"step": 70
},
{
"epoch": 2.7586206896551726,
"grad_norm": 4.446742534637451,
"learning_rate": 4.978118375700895e-06,
"logits/chosen": -2.3472495079040527,
"logits/rejected": -2.3697409629821777,
"logps/chosen": -73.44490814208984,
"logps/rejected": -89.69837951660156,
"loss": 0.6553,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": -0.022817375138401985,
"rewards/margins": 0.09045850485563278,
"rewards/rejected": -0.11327588558197021,
"step": 80
},
{
"epoch": 3.103448275862069,
"grad_norm": 4.435715198516846,
"learning_rate": 4.953780424089803e-06,
"logits/chosen": -2.346717596054077,
"logits/rejected": -2.3626608848571777,
"logps/chosen": -85.64796447753906,
"logps/rejected": -80.06268310546875,
"loss": 0.6432,
"rewards/accuracies": 0.737500011920929,
"rewards/chosen": -0.0543200746178627,
"rewards/margins": 0.1085677295923233,
"rewards/rejected": -0.1628878116607666,
"step": 90
},
{
"epoch": 3.4482758620689653,
"grad_norm": 4.5200910568237305,
"learning_rate": 4.920557351506409e-06,
"logits/chosen": -2.358900547027588,
"logits/rejected": -2.3591980934143066,
"logps/chosen": -82.83473205566406,
"logps/rejected": -80.66204071044922,
"loss": 0.5996,
"rewards/accuracies": 0.875,
"rewards/chosen": -0.046993009746074677,
"rewards/margins": 0.20401433110237122,
"rewards/rejected": -0.2510073184967041,
"step": 100
},
{
"epoch": 3.793103448275862,
"grad_norm": 4.106562614440918,
"learning_rate": 4.878569458453592e-06,
"logits/chosen": -2.347762107849121,
"logits/rejected": -2.33540415763855,
"logps/chosen": -72.23506164550781,
"logps/rejected": -85.77088928222656,
"loss": 0.6003,
"rewards/accuracies": 0.824999988079071,
"rewards/chosen": -0.051100969314575195,
"rewards/margins": 0.24443332850933075,
"rewards/rejected": -0.29553431272506714,
"step": 110
},
{
"epoch": 4.137931034482759,
"grad_norm": 4.67632532119751,
"learning_rate": 4.827968782785062e-06,
"logits/chosen": -2.353370428085327,
"logits/rejected": -2.378964900970459,
"logps/chosen": -67.91919708251953,
"logps/rejected": -86.26390838623047,
"loss": 0.5716,
"rewards/accuracies": 0.7875000238418579,
"rewards/chosen": -0.11952020972967148,
"rewards/margins": 0.30119410157203674,
"rewards/rejected": -0.42071428894996643,
"step": 120
},
{
"epoch": 4.482758620689655,
"grad_norm": 4.5131025314331055,
"learning_rate": 4.7689385491773934e-06,
"logits/chosen": -2.3820648193359375,
"logits/rejected": -2.403965473175049,
"logps/chosen": -71.66841125488281,
"logps/rejected": -77.90869140625,
"loss": 0.5362,
"rewards/accuracies": 0.875,
"rewards/chosen": -0.0694696456193924,
"rewards/margins": 0.47986412048339844,
"rewards/rejected": -0.5493337512016296,
"step": 130
},
{
"epoch": 4.827586206896552,
"grad_norm": 4.446605205535889,
"learning_rate": 4.70169250567482e-06,
"logits/chosen": -2.372788906097412,
"logits/rejected": -2.379612684249878,
"logps/chosen": -69.44779968261719,
"logps/rejected": -80.1120376586914,
"loss": 0.5186,
"rewards/accuracies": 0.862500011920929,
"rewards/chosen": -0.12896183133125305,
"rewards/margins": 0.5137210488319397,
"rewards/rejected": -0.6426829099655151,
"step": 140
},
{
"epoch": 5.172413793103448,
"grad_norm": 4.605374813079834,
"learning_rate": 4.626474149709127e-06,
"logits/chosen": -2.3140041828155518,
"logits/rejected": -2.336975574493408,
"logps/chosen": -87.05641174316406,
"logps/rejected": -94.53668212890625,
"loss": 0.4794,
"rewards/accuracies": 0.8374999761581421,
"rewards/chosen": -0.15513856709003448,
"rewards/margins": 0.6417296528816223,
"rewards/rejected": -0.7968682050704956,
"step": 150
},
{
"epoch": 5.517241379310345,
"grad_norm": 6.027809143066406,
"learning_rate": 4.54355584639723e-06,
"logits/chosen": -2.365192413330078,
"logits/rejected": -2.3872745037078857,
"logps/chosen": -73.7781753540039,
"logps/rejected": -82.1873550415039,
"loss": 0.4599,
"rewards/accuracies": 0.8125,
"rewards/chosen": -0.25834327936172485,
"rewards/margins": 0.5800828337669373,
"rewards/rejected": -0.8384261131286621,
"step": 160
},
{
"epoch": 5.862068965517241,
"grad_norm": 5.196141242980957,
"learning_rate": 4.45323784230908e-06,
"logits/chosen": -2.391653299331665,
"logits/rejected": -2.394854784011841,
"logps/chosen": -72.33963775634766,
"logps/rejected": -86.77699279785156,
"loss": 0.4467,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": -0.2543284595012665,
"rewards/margins": 0.7806032299995422,
"rewards/rejected": -1.0349315404891968,
"step": 170
},
{
"epoch": 6.206896551724138,
"grad_norm": 5.285089015960693,
"learning_rate": 4.355847178277025e-06,
"logits/chosen": -2.3918795585632324,
"logits/rejected": -2.4050662517547607,
"logps/chosen": -77.20832061767578,
"logps/rejected": -98.5592041015625,
"loss": 0.4052,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": -0.35020238161087036,
"rewards/margins": 1.0169063806533813,
"rewards/rejected": -1.367108702659607,
"step": 180
},
{
"epoch": 6.551724137931035,
"grad_norm": 5.308840751647949,
"learning_rate": 4.2517365051833564e-06,
"logits/chosen": -2.3778843879699707,
"logits/rejected": -2.3865675926208496,
"logps/chosen": -69.90904235839844,
"logps/rejected": -86.66047668457031,
"loss": 0.3809,
"rewards/accuracies": 0.9375,
"rewards/chosen": -0.42647725343704224,
"rewards/margins": 0.9949015378952026,
"rewards/rejected": -1.4213788509368896,
"step": 190
},
{
"epoch": 6.896551724137931,
"grad_norm": 6.095545768737793,
"learning_rate": 4.141282807014034e-06,
"logits/chosen": -2.407437801361084,
"logits/rejected": -2.4296727180480957,
"logps/chosen": -82.95701599121094,
"logps/rejected": -83.4615478515625,
"loss": 0.3742,
"rewards/accuracies": 0.875,
"rewards/chosen": -0.6070786714553833,
"rewards/margins": 0.968428909778595,
"rewards/rejected": -1.5755075216293335,
"step": 200
}
],
"logging_steps": 10,
"max_steps": 580,
"num_input_tokens_seen": 0,
"num_train_epochs": 20,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 7.2868315071198e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}
|