File size: 11,421 Bytes
db6ca4b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 6.896551724137931,
  "eval_steps": 500,
  "global_step": 200,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.3448275862068966,
      "grad_norm": 0.5030671954154968,
      "learning_rate": 8.620689655172415e-07,
      "logits/chosen": 1.8564815521240234,
      "logits/rejected": 1.8255866765975952,
      "logps/chosen": -95.54290008544922,
      "logps/rejected": -79.79582214355469,
      "loss": 0.693,
      "rewards/accuracies": 0.4000000059604645,
      "rewards/chosen": -0.0028573228046298027,
      "rewards/margins": 0.0005112116923555732,
      "rewards/rejected": -0.0033685355447232723,
      "step": 10
    },
    {
      "epoch": 0.6896551724137931,
      "grad_norm": 0.45081979036331177,
      "learning_rate": 1.724137931034483e-06,
      "logits/chosen": 1.781947374343872,
      "logits/rejected": 1.6911979913711548,
      "logps/chosen": -104.20463562011719,
      "logps/rejected": -78.91150665283203,
      "loss": 0.6929,
      "rewards/accuracies": 0.512499988079071,
      "rewards/chosen": 0.00018558502779342234,
      "rewards/margins": -0.002152198925614357,
      "rewards/rejected": 0.0023377849720418453,
      "step": 20
    },
    {
      "epoch": 1.0344827586206897,
      "grad_norm": 0.41456305980682373,
      "learning_rate": 2.5862068965517246e-06,
      "logits/chosen": 1.7937275171279907,
      "logits/rejected": 1.729128122329712,
      "logps/chosen": -90.21867370605469,
      "logps/rejected": -71.69265747070312,
      "loss": 0.6935,
      "rewards/accuracies": 0.574999988079071,
      "rewards/chosen": 0.003012509550899267,
      "rewards/margins": 0.009945740923285484,
      "rewards/rejected": -0.006933231838047504,
      "step": 30
    },
    {
      "epoch": 1.3793103448275863,
      "grad_norm": 0.45128729939460754,
      "learning_rate": 3.448275862068966e-06,
      "logits/chosen": 1.864363670349121,
      "logits/rejected": 1.9002879858016968,
      "logps/chosen": -87.1126480102539,
      "logps/rejected": -77.18392181396484,
      "loss": 0.6923,
      "rewards/accuracies": 0.5625,
      "rewards/chosen": 0.0013037443859502673,
      "rewards/margins": 0.006342612206935883,
      "rewards/rejected": -0.005038867238909006,
      "step": 40
    },
    {
      "epoch": 1.7241379310344827,
      "grad_norm": 0.4960842728614807,
      "learning_rate": 4.310344827586207e-06,
      "logits/chosen": 1.7825076580047607,
      "logits/rejected": 1.829602837562561,
      "logps/chosen": -78.63069915771484,
      "logps/rejected": -91.36685180664062,
      "loss": 0.6954,
      "rewards/accuracies": 0.625,
      "rewards/chosen": 0.009792634285986423,
      "rewards/margins": 0.014146638102829456,
      "rewards/rejected": -0.0043540047481656075,
      "step": 50
    },
    {
      "epoch": 2.0689655172413794,
      "grad_norm": 0.5404626727104187,
      "learning_rate": 4.999818897894192e-06,
      "logits/chosen": 1.8061244487762451,
      "logits/rejected": 1.7855304479599,
      "logps/chosen": -90.69072723388672,
      "logps/rejected": -72.0459213256836,
      "loss": 0.6938,
      "rewards/accuracies": 0.5874999761581421,
      "rewards/chosen": 0.002143201883882284,
      "rewards/margins": 0.005239076912403107,
      "rewards/rejected": -0.0030958750285208225,
      "step": 60
    },
    {
      "epoch": 2.413793103448276,
      "grad_norm": 0.5091063380241394,
      "learning_rate": 4.9934830787948756e-06,
      "logits/chosen": 1.6158870458602905,
      "logits/rejected": 1.696754813194275,
      "logps/chosen": -76.28319549560547,
      "logps/rejected": -77.81846618652344,
      "loss": 0.6905,
      "rewards/accuracies": 0.637499988079071,
      "rewards/chosen": 1.5049334251671098e-05,
      "rewards/margins": 0.007967600598931313,
      "rewards/rejected": -0.00795255322009325,
      "step": 70
    },
    {
      "epoch": 2.7586206896551726,
      "grad_norm": 0.6066665053367615,
      "learning_rate": 4.978118375700895e-06,
      "logits/chosen": 1.6109062433242798,
      "logits/rejected": 1.7185981273651123,
      "logps/chosen": -84.615966796875,
      "logps/rejected": -96.0793228149414,
      "loss": 0.6942,
      "rewards/accuracies": 0.4749999940395355,
      "rewards/chosen": -0.000627221364993602,
      "rewards/margins": 0.0009560534963384271,
      "rewards/rejected": -0.0015832759672775865,
      "step": 80
    },
    {
      "epoch": 3.103448275862069,
      "grad_norm": 0.5341666340827942,
      "learning_rate": 4.953780424089803e-06,
      "logits/chosen": 1.8657306432724,
      "logits/rejected": 1.8894052505493164,
      "logps/chosen": -87.67496490478516,
      "logps/rejected": -77.41777038574219,
      "loss": 0.6919,
      "rewards/accuracies": 0.550000011920929,
      "rewards/chosen": -0.004135184455662966,
      "rewards/margins": 0.011894735507667065,
      "rewards/rejected": -0.016029920428991318,
      "step": 90
    },
    {
      "epoch": 3.4482758620689653,
      "grad_norm": 0.5891664028167725,
      "learning_rate": 4.920557351506409e-06,
      "logits/chosen": 1.8143476247787476,
      "logits/rejected": 1.8618648052215576,
      "logps/chosen": -82.7096176147461,
      "logps/rejected": -80.64532470703125,
      "loss": 0.6905,
      "rewards/accuracies": 0.4749999940395355,
      "rewards/chosen": -0.014110831543803215,
      "rewards/margins": -0.007867741398513317,
      "rewards/rejected": -0.0062430910766124725,
      "step": 100
    },
    {
      "epoch": 3.793103448275862,
      "grad_norm": 0.5773605704307556,
      "learning_rate": 4.878569458453592e-06,
      "logits/chosen": 1.7850589752197266,
      "logits/rejected": 1.76922607421875,
      "logps/chosen": -87.4909896850586,
      "logps/rejected": -90.67459869384766,
      "loss": 0.6899,
      "rewards/accuracies": 0.5874999761581421,
      "rewards/chosen": 0.0026252653915435076,
      "rewards/margins": 0.014214910566806793,
      "rewards/rejected": -0.011589646339416504,
      "step": 110
    },
    {
      "epoch": 4.137931034482759,
      "grad_norm": 0.6582341194152832,
      "learning_rate": 4.827968782785062e-06,
      "logits/chosen": 1.7467533349990845,
      "logits/rejected": 1.8858740329742432,
      "logps/chosen": -69.12274169921875,
      "logps/rejected": -94.86824035644531,
      "loss": 0.6878,
      "rewards/accuracies": 0.4749999940395355,
      "rewards/chosen": -0.009279675781726837,
      "rewards/margins": 0.0035783485509455204,
      "rewards/rejected": -0.012858022935688496,
      "step": 120
    },
    {
      "epoch": 4.482758620689655,
      "grad_norm": 0.7790700793266296,
      "learning_rate": 4.7689385491773934e-06,
      "logits/chosen": 1.8058643341064453,
      "logits/rejected": 1.866713523864746,
      "logps/chosen": -79.86953735351562,
      "logps/rejected": -76.4103012084961,
      "loss": 0.6857,
      "rewards/accuracies": 0.625,
      "rewards/chosen": 0.00016659722314216197,
      "rewards/margins": 0.023889193311333656,
      "rewards/rejected": -0.02372259460389614,
      "step": 130
    },
    {
      "epoch": 4.827586206896552,
      "grad_norm": 0.7933465242385864,
      "learning_rate": 4.70169250567482e-06,
      "logits/chosen": 1.7705074548721313,
      "logits/rejected": 1.7728191614151,
      "logps/chosen": -86.46583557128906,
      "logps/rejected": -75.83828735351562,
      "loss": 0.6804,
      "rewards/accuracies": 0.6625000238418579,
      "rewards/chosen": -0.0001745700865285471,
      "rewards/margins": 0.029326915740966797,
      "rewards/rejected": -0.02950148656964302,
      "step": 140
    },
    {
      "epoch": 5.172413793103448,
      "grad_norm": 0.7435988187789917,
      "learning_rate": 4.626474149709127e-06,
      "logits/chosen": 1.740312933921814,
      "logits/rejected": 1.7526963949203491,
      "logps/chosen": -100.93019104003906,
      "logps/rejected": -90.39695739746094,
      "loss": 0.6803,
      "rewards/accuracies": 0.637499988079071,
      "rewards/chosen": -0.013870477676391602,
      "rewards/margins": 0.03614342585206032,
      "rewards/rejected": -0.05001390725374222,
      "step": 150
    },
    {
      "epoch": 5.517241379310345,
      "grad_norm": 0.8416279554367065,
      "learning_rate": 4.54355584639723e-06,
      "logits/chosen": 1.7618926763534546,
      "logits/rejected": 1.8320789337158203,
      "logps/chosen": -71.05604553222656,
      "logps/rejected": -82.77362060546875,
      "loss": 0.6748,
      "rewards/accuracies": 0.6625000238418579,
      "rewards/chosen": -0.028952527791261673,
      "rewards/margins": 0.03283882141113281,
      "rewards/rejected": -0.061791349202394485,
      "step": 160
    },
    {
      "epoch": 5.862068965517241,
      "grad_norm": 0.9731557965278625,
      "learning_rate": 4.45323784230908e-06,
      "logits/chosen": 1.7541310787200928,
      "logits/rejected": 1.845503807067871,
      "logps/chosen": -79.18022155761719,
      "logps/rejected": -89.36749267578125,
      "loss": 0.6706,
      "rewards/accuracies": 0.7250000238418579,
      "rewards/chosen": -0.02990702912211418,
      "rewards/margins": 0.05189533159136772,
      "rewards/rejected": -0.0818023532629013,
      "step": 170
    },
    {
      "epoch": 6.206896551724138,
      "grad_norm": 0.8338156938552856,
      "learning_rate": 4.355847178277025e-06,
      "logits/chosen": 1.7501287460327148,
      "logits/rejected": 1.7370710372924805,
      "logps/chosen": -91.2562484741211,
      "logps/rejected": -80.88179779052734,
      "loss": 0.6661,
      "rewards/accuracies": 0.7250000238418579,
      "rewards/chosen": -0.03045990690588951,
      "rewards/margins": 0.08234542608261108,
      "rewards/rejected": -0.1128053292632103,
      "step": 180
    },
    {
      "epoch": 6.551724137931035,
      "grad_norm": 0.8057475686073303,
      "learning_rate": 4.2517365051833564e-06,
      "logits/chosen": 1.8594491481781006,
      "logits/rejected": 1.9256786108016968,
      "logps/chosen": -74.18313598632812,
      "logps/rejected": -84.37249755859375,
      "loss": 0.6573,
      "rewards/accuracies": 0.7124999761581421,
      "rewards/chosen": -0.05836126208305359,
      "rewards/margins": 0.06953532248735428,
      "rewards/rejected": -0.12789657711982727,
      "step": 190
    },
    {
      "epoch": 6.896551724137931,
      "grad_norm": 0.957699716091156,
      "learning_rate": 4.141282807014034e-06,
      "logits/chosen": 1.696286916732788,
      "logits/rejected": 1.782080888748169,
      "logps/chosen": -83.79094696044922,
      "logps/rejected": -79.17488861083984,
      "loss": 0.6595,
      "rewards/accuracies": 0.574999988079071,
      "rewards/chosen": -0.08588370680809021,
      "rewards/margins": 0.062126852571964264,
      "rewards/rejected": -0.14801056683063507,
      "step": 200
    }
  ],
  "logging_steps": 10,
  "max_steps": 580,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 20,
  "save_steps": 50,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": false
      },
      "attributes": {}
    }
  },
  "total_flos": 6.756175928193188e+17,
  "train_batch_size": 1,
  "trial_name": null,
  "trial_params": null
}