File size: 9,908 Bytes
c11bc9e | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 | {
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.6766169154228856,
"eval_steps": 500,
"global_step": 1700,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03980099502487562,
"grad_norm": 9.524229049682617,
"learning_rate": 4.980474401576887e-07,
"logits/chosen": -0.1925463080406189,
"logits/rejected": -0.15873177349567413,
"logps/chosen": -239.6322021484375,
"logps/rejected": -242.57594299316406,
"loss": 0.8819,
"rewards/accuracies": 0.49281251430511475,
"rewards/chosen": -1.3072433471679688,
"rewards/margins": 0.008615786209702492,
"rewards/rejected": -1.315859079360962,
"step": 100
},
{
"epoch": 0.07960199004975124,
"grad_norm": 7.333236217498779,
"learning_rate": 4.922202605502572e-07,
"logits/chosen": 0.046361636370420456,
"logits/rejected": 0.08236894011497498,
"logps/chosen": -231.10765075683594,
"logps/rejected": -233.24984741210938,
"loss": 0.8361,
"rewards/accuracies": 0.48875001072883606,
"rewards/chosen": -0.9621695876121521,
"rewards/margins": -0.021255964413285255,
"rewards/rejected": -0.9409136772155762,
"step": 200
},
{
"epoch": 0.11940298507462686,
"grad_norm": 5.164106369018555,
"learning_rate": 4.82609484512869e-07,
"logits/chosen": 0.0250965878367424,
"logits/rejected": 0.054141998291015625,
"logps/chosen": -229.79965209960938,
"logps/rejected": -231.60562133789062,
"loss": 0.8114,
"rewards/accuracies": 0.48750001192092896,
"rewards/chosen": -0.8564431667327881,
"rewards/margins": -0.01034404058009386,
"rewards/rejected": -0.8460990786552429,
"step": 300
},
{
"epoch": 0.15920398009950248,
"grad_norm": 5.328795909881592,
"learning_rate": 4.6936523696827614e-07,
"logits/chosen": 0.039505548775196075,
"logits/rejected": 0.05482972040772438,
"logps/chosen": -233.69537353515625,
"logps/rejected": -236.25210571289062,
"loss": 0.7907,
"rewards/accuracies": 0.49406251311302185,
"rewards/chosen": -0.8005841970443726,
"rewards/margins": 0.00799934659153223,
"rewards/rejected": -0.8085834980010986,
"step": 400
},
{
"epoch": 0.19900497512437812,
"grad_norm": 8.521681785583496,
"learning_rate": 4.5269439940365644e-07,
"logits/chosen": 0.11151163280010223,
"logits/rejected": 0.1460810899734497,
"logps/chosen": -227.23020935058594,
"logps/rejected": -230.19451904296875,
"loss": 0.7849,
"rewards/accuracies": 0.4946874976158142,
"rewards/chosen": -0.7644023299217224,
"rewards/margins": -0.004693666007369757,
"rewards/rejected": -0.7597086429595947,
"step": 500
},
{
"epoch": 0.23880597014925373,
"grad_norm": 7.143113136291504,
"learning_rate": 4.328573782827409e-07,
"logits/chosen": 0.09673156589269638,
"logits/rejected": 0.14390873908996582,
"logps/chosen": -225.45919799804688,
"logps/rejected": -225.44052124023438,
"loss": 0.77,
"rewards/accuracies": 0.5045312643051147,
"rewards/chosen": -0.7256423830986023,
"rewards/margins": 0.004257932770997286,
"rewards/rejected": -0.7299003005027771,
"step": 600
},
{
"epoch": 0.27860696517412936,
"grad_norm": 6.525391101837158,
"learning_rate": 4.1016403737218373e-07,
"logits/chosen": 0.05486857891082764,
"logits/rejected": 0.09620587527751923,
"logps/chosen": -228.3573760986328,
"logps/rejected": -223.32395935058594,
"loss": 0.7696,
"rewards/accuracies": 0.5034375190734863,
"rewards/chosen": -0.7243590354919434,
"rewards/margins": 0.0008349200943484902,
"rewards/rejected": -0.7251940369606018,
"step": 700
},
{
"epoch": 0.31840796019900497,
"grad_norm": 8.071100234985352,
"learning_rate": 3.849688575211836e-07,
"logits/chosen": 0.004134657327085733,
"logits/rejected": 0.02180260606110096,
"logps/chosen": -234.44007873535156,
"logps/rejected": -232.92913818359375,
"loss": 0.7682,
"rewards/accuracies": 0.5073437690734863,
"rewards/chosen": -0.7022367119789124,
"rewards/margins": 0.0076367598958313465,
"rewards/rejected": -0.7098734974861145,
"step": 800
},
{
"epoch": 0.3582089552238806,
"grad_norm": 9.765380859375,
"learning_rate": 3.576653995009154e-07,
"logits/chosen": 0.018308693543076515,
"logits/rejected": 0.04190211370587349,
"logps/chosen": -224.4905242919922,
"logps/rejected": -228.26222229003906,
"loss": 0.7467,
"rewards/accuracies": 0.5228124856948853,
"rewards/chosen": -0.7309367656707764,
"rewards/margins": 0.04116936773061752,
"rewards/rejected": -0.7721061110496521,
"step": 900
},
{
"epoch": 0.39800995024875624,
"grad_norm": 6.442622184753418,
"learning_rate": 3.286801563968721e-07,
"logits/chosen": -0.018274417147040367,
"logits/rejected": -0.015018883161246777,
"logps/chosen": -230.45724487304688,
"logps/rejected": -229.37107849121094,
"loss": 0.7517,
"rewards/accuracies": 0.5123437643051147,
"rewards/chosen": -0.738405704498291,
"rewards/margins": 0.026958582922816277,
"rewards/rejected": -0.765364408493042,
"step": 1000
},
{
"epoch": 0.43781094527363185,
"grad_norm": 3.437434434890747,
"learning_rate": 2.9846589158269034e-07,
"logits/chosen": -0.03212662786245346,
"logits/rejected": -0.016695672646164894,
"logps/chosen": -233.62451171875,
"logps/rejected": -230.5555419921875,
"loss": 0.7495,
"rewards/accuracies": 0.5182812213897705,
"rewards/chosen": -0.6797711253166199,
"rewards/margins": 0.029431035742163658,
"rewards/rejected": -0.7092021107673645,
"step": 1100
},
{
"epoch": 0.47761194029850745,
"grad_norm": 9.21783447265625,
"learning_rate": 2.674945663394993e-07,
"logits/chosen": -0.022503716871142387,
"logits/rejected": -0.0002497506211511791,
"logps/chosen": -224.26544189453125,
"logps/rejected": -220.6869354248047,
"loss": 0.7452,
"rewards/accuracies": 0.5198437571525574,
"rewards/chosen": -0.6790177822113037,
"rewards/margins": 0.027901820838451385,
"rewards/rejected": -0.7069195508956909,
"step": 1200
},
{
"epoch": 0.5174129353233831,
"grad_norm": 5.184940814971924,
"learning_rate": 2.3624996759476285e-07,
"logits/chosen": -0.02832232229411602,
"logits/rejected": 0.001360323396511376,
"logps/chosen": -225.9873504638672,
"logps/rejected": -224.61253356933594,
"loss": 0.7448,
"rewards/accuracies": 0.5210937261581421,
"rewards/chosen": -0.6495236158370972,
"rewards/margins": 0.03301383554935455,
"rewards/rejected": -0.6825373768806458,
"step": 1300
},
{
"epoch": 0.5572139303482587,
"grad_norm": 4.051429271697998,
"learning_rate": 2.0522015093886614e-07,
"logits/chosen": -0.07805901765823364,
"logits/rejected": -0.054497309029102325,
"logps/chosen": -227.91299438476562,
"logps/rejected": -231.43540954589844,
"loss": 0.7352,
"rewards/accuracies": 0.5206249952316284,
"rewards/chosen": -0.6832866072654724,
"rewards/margins": 0.05871019884943962,
"rewards/rejected": -0.7419967651367188,
"step": 1400
},
{
"epoch": 0.5970149253731343,
"grad_norm": 4.9932379722595215,
"learning_rate": 1.7488981696314154e-07,
"logits/chosen": -0.07948292791843414,
"logits/rejected": -0.04982204735279083,
"logps/chosen": -226.94752502441406,
"logps/rejected": -225.03204345703125,
"loss": 0.7349,
"rewards/accuracies": 0.5214062333106995,
"rewards/chosen": -0.6902438998222351,
"rewards/margins": 0.03936518728733063,
"rewards/rejected": -0.7296090722084045,
"step": 1500
},
{
"epoch": 0.6368159203980099,
"grad_norm": 8.183513641357422,
"learning_rate": 1.4573274000458839e-07,
"logits/chosen": -0.08282151073217392,
"logits/rejected": -0.06661933660507202,
"logps/chosen": -227.28172302246094,
"logps/rejected": -227.94952392578125,
"loss": 0.7374,
"rewards/accuracies": 0.5310937762260437,
"rewards/chosen": -0.6844578385353088,
"rewards/margins": 0.03974698856472969,
"rewards/rejected": -0.7242047190666199,
"step": 1600
},
{
"epoch": 0.6766169154228856,
"grad_norm": 5.1135573387146,
"learning_rate": 1.1820436756391414e-07,
"logits/chosen": -0.1615159809589386,
"logits/rejected": -0.13768981397151947,
"logps/chosen": -230.94302368164062,
"logps/rejected": -229.08021545410156,
"loss": 0.7359,
"rewards/accuracies": 0.5257812738418579,
"rewards/chosen": -0.654849648475647,
"rewards/margins": 0.04666764661669731,
"rewards/rejected": -0.7015172839164734,
"step": 1700
}
],
"logging_steps": 100,
"max_steps": 2512,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}
|