File size: 8,754 Bytes
5709761
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 2.7330960854092528,
  "eval_steps": 500,
  "global_step": 12,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.2277580071174377,
      "grad_norm": 91.0121833928011,
      "learning_rate": 1e-08,
      "logits/chosen": -0.16277597844600677,
      "logits/rejected": -0.17357584834098816,
      "logps/chosen": -97.06861114501953,
      "logps/rejected": -61.69409942626953,
      "loss": 0.6931,
      "rewards/accuracies": 0.0,
      "rewards/chosen": 0.0,
      "rewards/margins": 0.0,
      "rewards/rejected": 0.0,
      "step": 1
    },
    {
      "epoch": 0.4555160142348754,
      "grad_norm": 92.40889807622672,
      "learning_rate": 2.1590760247862313e-06,
      "logits/chosen": -0.16356882452964783,
      "logits/rejected": -0.17447908222675323,
      "logps/chosen": -101.86387634277344,
      "logps/rejected": -61.07148361206055,
      "loss": 0.685,
      "rewards/accuracies": 0.8046875,
      "rewards/chosen": 0.010509281419217587,
      "rewards/margins": 0.01663581281900406,
      "rewards/rejected": -0.00612653186544776,
      "step": 2
    },
    {
      "epoch": 0.6832740213523132,
      "grad_norm": 91.93831171277715,
      "learning_rate": 3.416204910485067e-06,
      "logits/chosen": -0.16362115740776062,
      "logits/rejected": -0.17490728199481964,
      "logps/chosen": -107.25846862792969,
      "logps/rejected": -65.02969360351562,
      "loss": 0.6758,
      "rewards/accuracies": 0.810546875,
      "rewards/chosen": 0.022437484934926033,
      "rewards/margins": 0.036239851266145706,
      "rewards/rejected": -0.013802366331219673,
      "step": 3
    },
    {
      "epoch": 0.9110320284697508,
      "grad_norm": 66.11318002285611,
      "learning_rate": 4.308152049572463e-06,
      "logits/chosen": -0.13364359736442566,
      "logits/rejected": -0.14053839445114136,
      "logps/chosen": -99.9971923828125,
      "logps/rejected": -81.93231201171875,
      "loss": 0.5446,
      "rewards/accuracies": 0.73828125,
      "rewards/chosen": 0.727262020111084,
      "rewards/margins": 3.223222255706787,
      "rewards/rejected": -2.495960235595703,
      "step": 4
    },
    {
      "epoch": 0.9110320284697508,
      "eval_logits/chosen": -0.11921684443950653,
      "eval_logits/rejected": -0.12497611343860626,
      "eval_logps/chosen": -79.31426239013672,
      "eval_logps/rejected": -84.31745910644531,
      "eval_loss": 0.4257468581199646,
      "eval_rewards/accuracies": 0.7913534045219421,
      "eval_rewards/chosen": 1.9340568780899048,
      "eval_rewards/margins": 4.300276279449463,
      "eval_rewards/rejected": -2.3662197589874268,
      "eval_runtime": 113.096,
      "eval_samples_per_second": 4.704,
      "eval_steps_per_second": 1.176,
      "step": 4
    },
    {
      "epoch": 1.1387900355871885,
      "grad_norm": 34.50854460328227,
      "learning_rate": 5e-06,
      "logits/chosen": -0.11707393825054169,
      "logits/rejected": -0.12238450348377228,
      "logps/chosen": -73.72509765625,
      "logps/rejected": -87.21283721923828,
      "loss": 0.2882,
      "rewards/accuracies": 0.85546875,
      "rewards/chosen": 2.148752212524414,
      "rewards/margins": 5.1668314933776855,
      "rewards/rejected": -3.0180792808532715,
      "step": 5
    },
    {
      "epoch": 1.3665480427046264,
      "grad_norm": 13.417292887403837,
      "learning_rate": 5e-06,
      "logits/chosen": -0.10912273824214935,
      "logits/rejected": -0.11423137038946152,
      "logps/chosen": -76.66941833496094,
      "logps/rejected": -106.3405532836914,
      "loss": 0.1626,
      "rewards/accuracies": 0.931640625,
      "rewards/chosen": 3.0610146522521973,
      "rewards/margins": 7.797012805938721,
      "rewards/rejected": -4.735998153686523,
      "step": 6
    },
    {
      "epoch": 1.594306049822064,
      "grad_norm": 19.9016761528305,
      "learning_rate": 4.287142857142857e-06,
      "logits/chosen": -0.139112189412117,
      "logits/rejected": -0.14223890006542206,
      "logps/chosen": -69.056884765625,
      "logps/rejected": -128.1103057861328,
      "loss": 0.1538,
      "rewards/accuracies": 0.9453125,
      "rewards/chosen": 2.666012763977051,
      "rewards/margins": 8.673298835754395,
      "rewards/rejected": -6.0072855949401855,
      "step": 7
    },
    {
      "epoch": 1.8220640569395017,
      "grad_norm": 9.174237329584994,
      "learning_rate": 3.5742857142857147e-06,
      "logits/chosen": -0.17990067601203918,
      "logits/rejected": -0.18341703712940216,
      "logps/chosen": -70.980224609375,
      "logps/rejected": -110.5055923461914,
      "loss": 0.1066,
      "rewards/accuracies": 0.962890625,
      "rewards/chosen": 3.78892183303833,
      "rewards/margins": 8.644651412963867,
      "rewards/rejected": -4.855730056762695,
      "step": 8
    },
    {
      "epoch": 1.8220640569395017,
      "eval_logits/chosen": -0.17611196637153625,
      "eval_logits/rejected": -0.18176034092903137,
      "eval_logps/chosen": -79.25057983398438,
      "eval_logps/rejected": -99.81920623779297,
      "eval_loss": 0.2954053282737732,
      "eval_rewards/accuracies": 0.8721804618835449,
      "eval_rewards/chosen": 1.9404244422912598,
      "eval_rewards/margins": 5.856820106506348,
      "eval_rewards/rejected": -3.916395664215088,
      "eval_runtime": 114.7166,
      "eval_samples_per_second": 4.638,
      "eval_steps_per_second": 1.159,
      "step": 8
    },
    {
      "epoch": 2.0498220640569396,
      "grad_norm": 9.255031678596845,
      "learning_rate": 2.8614285714285714e-06,
      "logits/chosen": -0.1780690848827362,
      "logits/rejected": -0.18225862085819244,
      "logps/chosen": -68.87483215332031,
      "logps/rejected": -115.84172058105469,
      "loss": 0.1101,
      "rewards/accuracies": 0.962890625,
      "rewards/chosen": 3.048909902572632,
      "rewards/margins": 8.731538772583008,
      "rewards/rejected": -5.682629108428955,
      "step": 9
    },
    {
      "epoch": 2.277580071174377,
      "grad_norm": 2.670834355828655,
      "learning_rate": 2.1485714285714285e-06,
      "logits/chosen": -0.17336754500865936,
      "logits/rejected": -0.17464184761047363,
      "logps/chosen": -69.73114013671875,
      "logps/rejected": -122.48251342773438,
      "loss": 0.0307,
      "rewards/accuracies": 0.99609375,
      "rewards/chosen": 3.650970220565796,
      "rewards/margins": 9.824505805969238,
      "rewards/rejected": -6.173535346984863,
      "step": 10
    },
    {
      "epoch": 2.505338078291815,
      "grad_norm": 2.28657153621763,
      "learning_rate": 1.4357142857142856e-06,
      "logits/chosen": -0.15467900037765503,
      "logits/rejected": -0.15725603699684143,
      "logps/chosen": -72.08135986328125,
      "logps/rejected": -129.1175994873047,
      "loss": 0.0335,
      "rewards/accuracies": 0.994140625,
      "rewards/chosen": 3.849161148071289,
      "rewards/margins": 10.588750839233398,
      "rewards/rejected": -6.739590644836426,
      "step": 11
    },
    {
      "epoch": 2.7330960854092528,
      "grad_norm": 2.014013246005093,
      "learning_rate": 7.228571428571429e-07,
      "logits/chosen": -0.13617806136608124,
      "logits/rejected": -0.13841822743415833,
      "logps/chosen": -65.92801666259766,
      "logps/rejected": -133.12391662597656,
      "loss": 0.0267,
      "rewards/accuracies": 0.99609375,
      "rewards/chosen": 3.2951810359954834,
      "rewards/margins": 10.423425674438477,
      "rewards/rejected": -7.128243446350098,
      "step": 12
    },
    {
      "epoch": 2.7330960854092528,
      "eval_logits/chosen": -0.13148866593837738,
      "eval_logits/rejected": -0.1344377100467682,
      "eval_logps/chosen": -81.27617645263672,
      "eval_logps/rejected": -107.10063171386719,
      "eval_loss": 0.2463793009519577,
      "eval_rewards/accuracies": 0.8778195381164551,
      "eval_rewards/chosen": 1.7378650903701782,
      "eval_rewards/margins": 6.3824028968811035,
      "eval_rewards/rejected": -4.644537925720215,
      "eval_runtime": 114.8291,
      "eval_samples_per_second": 4.633,
      "eval_steps_per_second": 1.158,
      "step": 12
    }
  ],
  "logging_steps": 1,
  "max_steps": 12,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 3,
  "save_steps": 500,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": true
      },
      "attributes": {}
    }
  },
  "total_flos": 0.0,
  "train_batch_size": 1,
  "trial_name": null,
  "trial_params": null
}