File size: 8,770 Bytes
171b7c8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 |
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 5.405405405405405,
"eval_steps": 500,
"global_step": 150,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.36036036036036034,
"grad_norm": 0.5300200581550598,
"learning_rate": 9.259259259259259e-07,
"logits/chosen": 1.7405741214752197,
"logits/rejected": 1.663368582725525,
"logps/chosen": -97.57881164550781,
"logps/rejected": -70.59793853759766,
"loss": 0.6938,
"rewards/accuracies": 0.4375,
"rewards/chosen": 0.0018835498485714197,
"rewards/margins": -0.0008800366776995361,
"rewards/rejected": 0.0027635858859866858,
"step": 10
},
{
"epoch": 0.7207207207207207,
"grad_norm": 0.4883837401866913,
"learning_rate": 1.8518518518518519e-06,
"logits/chosen": 1.8280715942382812,
"logits/rejected": 1.8215343952178955,
"logps/chosen": -90.60624694824219,
"logps/rejected": -79.04981994628906,
"loss": 0.6945,
"rewards/accuracies": 0.4749999940395355,
"rewards/chosen": 0.0014454321935772896,
"rewards/margins": -0.004174981266260147,
"rewards/rejected": 0.005620413459837437,
"step": 20
},
{
"epoch": 1.0810810810810811,
"grad_norm": 0.5554109215736389,
"learning_rate": 2.7777777777777783e-06,
"logits/chosen": 1.7980501651763916,
"logits/rejected": 1.841104507446289,
"logps/chosen": -80.78228759765625,
"logps/rejected": -85.08882141113281,
"loss": 0.693,
"rewards/accuracies": 0.4749999940395355,
"rewards/chosen": 0.0042568682692945,
"rewards/margins": -0.0038191028870642185,
"rewards/rejected": 0.008075973019003868,
"step": 30
},
{
"epoch": 1.4414414414414414,
"grad_norm": 0.537497341632843,
"learning_rate": 3.7037037037037037e-06,
"logits/chosen": 1.7320470809936523,
"logits/rejected": 1.7411377429962158,
"logps/chosen": -82.24813842773438,
"logps/rejected": -80.90709686279297,
"loss": 0.6952,
"rewards/accuracies": 0.42500001192092896,
"rewards/chosen": -0.00350201572291553,
"rewards/margins": -0.012091752141714096,
"rewards/rejected": 0.008589735254645348,
"step": 40
},
{
"epoch": 1.8018018018018018,
"grad_norm": 0.5023094415664673,
"learning_rate": 4.62962962962963e-06,
"logits/chosen": 1.8292573690414429,
"logits/rejected": 1.8632844686508179,
"logps/chosen": -85.98481750488281,
"logps/rejected": -86.14008331298828,
"loss": 0.6928,
"rewards/accuracies": 0.4124999940395355,
"rewards/chosen": 0.00168000184930861,
"rewards/margins": -0.011136507615447044,
"rewards/rejected": 0.01281650923192501,
"step": 50
},
{
"epoch": 2.1621621621621623,
"grad_norm": 0.4981901943683624,
"learning_rate": 4.998119881260576e-06,
"logits/chosen": 1.8536121845245361,
"logits/rejected": 1.7948782444000244,
"logps/chosen": -90.0439682006836,
"logps/rejected": -79.8309555053711,
"loss": 0.6914,
"rewards/accuracies": 0.48750001192092896,
"rewards/chosen": 0.0025153260212391615,
"rewards/margins": -0.0021209525875747204,
"rewards/rejected": 0.004636278375983238,
"step": 60
},
{
"epoch": 2.5225225225225225,
"grad_norm": 0.6208468079566956,
"learning_rate": 4.9866405060165044e-06,
"logits/chosen": 1.7845121622085571,
"logits/rejected": 1.8721017837524414,
"logps/chosen": -74.6461181640625,
"logps/rejected": -98.91078186035156,
"loss": 0.694,
"rewards/accuracies": 0.5249999761581421,
"rewards/chosen": 0.00022823773906566203,
"rewards/margins": 0.0024594543501734734,
"rewards/rejected": -0.00223121652379632,
"step": 70
},
{
"epoch": 2.8828828828828827,
"grad_norm": 0.5074446797370911,
"learning_rate": 4.964774158361991e-06,
"logits/chosen": 1.8522154092788696,
"logits/rejected": 1.8161392211914062,
"logps/chosen": -89.16864013671875,
"logps/rejected": -81.97349548339844,
"loss": 0.6911,
"rewards/accuracies": 0.5249999761581421,
"rewards/chosen": 0.006215038243681192,
"rewards/margins": 0.005521883722394705,
"rewards/rejected": 0.0006931538810022175,
"step": 80
},
{
"epoch": 3.2432432432432434,
"grad_norm": 0.6148263812065125,
"learning_rate": 4.93261217644956e-06,
"logits/chosen": 1.7915083169937134,
"logits/rejected": 1.7579513788223267,
"logps/chosen": -88.66618347167969,
"logps/rejected": -80.2238998413086,
"loss": 0.6918,
"rewards/accuracies": 0.574999988079071,
"rewards/chosen": 0.005774274934083223,
"rewards/margins": 0.016623441129922867,
"rewards/rejected": -0.010849165730178356,
"step": 90
},
{
"epoch": 3.6036036036036037,
"grad_norm": 0.6143240332603455,
"learning_rate": 4.8902889044347e-06,
"logits/chosen": 1.6886920928955078,
"logits/rejected": 1.8121178150177002,
"logps/chosen": -72.908203125,
"logps/rejected": -92.44017028808594,
"loss": 0.6911,
"rewards/accuracies": 0.48750001192092896,
"rewards/chosen": -0.0015121791511774063,
"rewards/margins": 0.0036420777905732393,
"rewards/rejected": -0.005154256708920002,
"step": 100
},
{
"epoch": 3.963963963963964,
"grad_norm": 0.7140340209007263,
"learning_rate": 4.837981131305475e-06,
"logits/chosen": 1.7481105327606201,
"logits/rejected": 1.723141074180603,
"logps/chosen": -80.63452911376953,
"logps/rejected": -72.64492797851562,
"loss": 0.6894,
"rewards/accuracies": 0.48750001192092896,
"rewards/chosen": -0.006246576085686684,
"rewards/margins": -0.0035218377597630024,
"rewards/rejected": -0.0027247383259236813,
"step": 110
},
{
"epoch": 4.324324324324325,
"grad_norm": 1.1645854711532593,
"learning_rate": 4.775907352415367e-06,
"logits/chosen": 1.7416290044784546,
"logits/rejected": 1.8237574100494385,
"logps/chosen": -89.01248931884766,
"logps/rejected": -92.31901550292969,
"loss": 0.6869,
"rewards/accuracies": 0.612500011920929,
"rewards/chosen": -0.008855113759636879,
"rewards/margins": 0.013151508755981922,
"rewards/rejected": -0.022006623446941376,
"step": 120
},
{
"epoch": 4.684684684684685,
"grad_norm": 0.6638664603233337,
"learning_rate": 4.70432685680402e-06,
"logits/chosen": 1.7124770879745483,
"logits/rejected": 1.7777938842773438,
"logps/chosen": -84.87271881103516,
"logps/rejected": -92.4839096069336,
"loss": 0.6855,
"rewards/accuracies": 0.574999988079071,
"rewards/chosen": -0.004437069408595562,
"rewards/margins": 0.019811339676380157,
"rewards/rejected": -0.024248410016298294,
"step": 130
},
{
"epoch": 5.045045045045045,
"grad_norm": 0.785169780254364,
"learning_rate": 4.623538644118244e-06,
"logits/chosen": 1.7838690280914307,
"logits/rejected": 1.8052574396133423,
"logps/chosen": -94.57842254638672,
"logps/rejected": -80.6390609741211,
"loss": 0.6836,
"rewards/accuracies": 0.48750001192092896,
"rewards/chosen": -0.026147600263357162,
"rewards/margins": 0.010936126112937927,
"rewards/rejected": -0.03708372637629509,
"step": 140
},
{
"epoch": 5.405405405405405,
"grad_norm": 0.745952844619751,
"learning_rate": 4.533880175657419e-06,
"logits/chosen": 1.7925735712051392,
"logits/rejected": 1.7987396717071533,
"logps/chosen": -82.4854507446289,
"logps/rejected": -77.50787353515625,
"loss": 0.6767,
"rewards/accuracies": 0.625,
"rewards/chosen": -0.013469865545630455,
"rewards/margins": 0.04023212194442749,
"rewards/rejected": -0.053701985627412796,
"step": 150
}
],
"logging_steps": 10,
"max_steps": 540,
"num_input_tokens_seen": 0,
"num_train_epochs": 20,
"save_steps": 50,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 5.08406132209877e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}
|