Incomple commited on
Commit
9547147
·
verified ·
1 Parent(s): ca52646

End of training

Browse files
README.md CHANGED
@@ -3,9 +3,10 @@ library_name: peft
3
  license: llama3.1
4
  base_model: meta-llama/Llama-3.1-8B-Instruct
5
  tags:
 
 
6
  - trl
7
  - dpo
8
- - llama-factory
9
  - generated_from_trainer
10
  model-index:
11
  - name: Llama-3.1-8B-Instruct_resist_10
@@ -17,7 +18,7 @@ should probably proofread and complete it, then remove this comment. -->
17
 
18
  # Llama-3.1-8B-Instruct_resist_10
19
 
20
- This model is a fine-tuned version of [meta-llama/Llama-3.1-8B-Instruct](https://huggingface.co/meta-llama/Llama-3.1-8B-Instruct) on an unknown dataset.
21
 
22
  ## Model description
23
 
 
3
  license: llama3.1
4
  base_model: meta-llama/Llama-3.1-8B-Instruct
5
  tags:
6
+ - llama-factory
7
+ - lora
8
  - trl
9
  - dpo
 
10
  - generated_from_trainer
11
  model-index:
12
  - name: Llama-3.1-8B-Instruct_resist_10
 
18
 
19
  # Llama-3.1-8B-Instruct_resist_10
20
 
21
+ This model is a fine-tuned version of [meta-llama/Llama-3.1-8B-Instruct](https://huggingface.co/meta-llama/Llama-3.1-8B-Instruct) on the resist_10 dataset.
22
 
23
  ## Model description
24
 
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.999259807549963,
3
+ "total_flos": 3.8465011975952794e+17,
4
+ "train_loss": 0.33879759046766494,
5
+ "train_runtime": 9841.7629,
6
+ "train_samples_per_second": 0.549,
7
+ "train_steps_per_second": 0.069
8
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.999259807549963,
3
+ "total_flos": 3.8465011975952794e+17,
4
+ "train_loss": 0.33879759046766494,
5
+ "train_runtime": 9841.7629,
6
+ "train_samples_per_second": 0.549,
7
+ "train_steps_per_second": 0.069
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,327 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.999259807549963,
5
+ "eval_steps": 500,
6
+ "global_step": 675,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.050333086602516654,
13
+ "grad_norm": 3.643551826477051,
14
+ "learning_rate": 5e-07,
15
+ "logits/chosen": -0.5447720289230347,
16
+ "logits/rejected": -0.4665735065937042,
17
+ "logps/chosen": -70.74549102783203,
18
+ "logps/rejected": -10.341827392578125,
19
+ "loss": 0.6933,
20
+ "rewards/accuracies": 0.4852941334247589,
21
+ "rewards/chosen": 0.0009019060526043177,
22
+ "rewards/margins": 2.6691925086197443e-05,
23
+ "rewards/rejected": 0.0008752142894081771,
24
+ "step": 34
25
+ },
26
+ {
27
+ "epoch": 0.10066617320503331,
28
+ "grad_norm": 8.25416374206543,
29
+ "learning_rate": 1e-06,
30
+ "logits/chosen": -0.5530118346214294,
31
+ "logits/rejected": -0.46221238374710083,
32
+ "logps/chosen": -64.53274536132812,
33
+ "logps/rejected": -8.40461254119873,
34
+ "loss": 0.6902,
35
+ "rewards/accuracies": 0.5625,
36
+ "rewards/chosen": 0.004430013243108988,
37
+ "rewards/margins": 0.006295821163803339,
38
+ "rewards/rejected": -0.0018658083863556385,
39
+ "step": 68
40
+ },
41
+ {
42
+ "epoch": 0.15099925980754997,
43
+ "grad_norm": 5.755356788635254,
44
+ "learning_rate": 9.43986820428336e-07,
45
+ "logits/chosen": -0.531723141670227,
46
+ "logits/rejected": -0.4240272343158722,
47
+ "logps/chosen": -74.01490783691406,
48
+ "logps/rejected": -9.048775672912598,
49
+ "loss": 0.6686,
50
+ "rewards/accuracies": 0.8713235259056091,
51
+ "rewards/chosen": 0.0434609092772007,
52
+ "rewards/margins": 0.05061572045087814,
53
+ "rewards/rejected": -0.007154808845371008,
54
+ "step": 102
55
+ },
56
+ {
57
+ "epoch": 0.20133234641006661,
58
+ "grad_norm": 6.1793928146362305,
59
+ "learning_rate": 8.879736408566722e-07,
60
+ "logits/chosen": -0.5414551496505737,
61
+ "logits/rejected": -0.44891485571861267,
62
+ "logps/chosen": -74.40818786621094,
63
+ "logps/rejected": -11.396381378173828,
64
+ "loss": 0.6245,
65
+ "rewards/accuracies": 0.9779411554336548,
66
+ "rewards/chosen": 0.12970797717571259,
67
+ "rewards/margins": 0.14640042185783386,
68
+ "rewards/rejected": -0.016692442819476128,
69
+ "step": 136
70
+ },
71
+ {
72
+ "epoch": 0.25166543301258326,
73
+ "grad_norm": 4.775036811828613,
74
+ "learning_rate": 8.319604612850081e-07,
75
+ "logits/chosen": -0.5510014891624451,
76
+ "logits/rejected": -0.4638059437274933,
77
+ "logps/chosen": -65.10930633544922,
78
+ "logps/rejected": -9.851871490478516,
79
+ "loss": 0.573,
80
+ "rewards/accuracies": 0.9852941036224365,
81
+ "rewards/chosen": 0.23002856969833374,
82
+ "rewards/margins": 0.26800811290740967,
83
+ "rewards/rejected": -0.037979576736688614,
84
+ "step": 170
85
+ },
86
+ {
87
+ "epoch": 0.30199851961509994,
88
+ "grad_norm": 4.691457271575928,
89
+ "learning_rate": 7.759472817133442e-07,
90
+ "logits/chosen": -0.5684951543807983,
91
+ "logits/rejected": -0.48908984661102295,
92
+ "logps/chosen": -63.056541442871094,
93
+ "logps/rejected": -9.765806198120117,
94
+ "loss": 0.4989,
95
+ "rewards/accuracies": 0.9742646813392639,
96
+ "rewards/chosen": 0.39861220121383667,
97
+ "rewards/margins": 0.48014047741889954,
98
+ "rewards/rejected": -0.08152831345796585,
99
+ "step": 204
100
+ },
101
+ {
102
+ "epoch": 0.35233160621761656,
103
+ "grad_norm": 3.0710372924804688,
104
+ "learning_rate": 7.199341021416804e-07,
105
+ "logits/chosen": -0.5603327751159668,
106
+ "logits/rejected": -0.474366694688797,
107
+ "logps/chosen": -68.22887420654297,
108
+ "logps/rejected": -11.145505905151367,
109
+ "loss": 0.4096,
110
+ "rewards/accuracies": 0.9742646813392639,
111
+ "rewards/chosen": 0.6239223480224609,
112
+ "rewards/margins": 0.7788197994232178,
113
+ "rewards/rejected": -0.15489746630191803,
114
+ "step": 238
115
+ },
116
+ {
117
+ "epoch": 0.40266469282013323,
118
+ "grad_norm": 3.50852108001709,
119
+ "learning_rate": 6.639209225700164e-07,
120
+ "logits/chosen": -0.554121732711792,
121
+ "logits/rejected": -0.47616252303123474,
122
+ "logps/chosen": -59.60889434814453,
123
+ "logps/rejected": -12.72043514251709,
124
+ "loss": 0.3624,
125
+ "rewards/accuracies": 0.9779411554336548,
126
+ "rewards/chosen": 0.769890308380127,
127
+ "rewards/margins": 1.017148494720459,
128
+ "rewards/rejected": -0.24725815653800964,
129
+ "step": 272
130
+ },
131
+ {
132
+ "epoch": 0.4529977794226499,
133
+ "grad_norm": 2.061458110809326,
134
+ "learning_rate": 6.079077429983526e-07,
135
+ "logits/chosen": -0.5355279445648193,
136
+ "logits/rejected": -0.46640804409980774,
137
+ "logps/chosen": -64.37019348144531,
138
+ "logps/rejected": -15.296799659729004,
139
+ "loss": 0.2901,
140
+ "rewards/accuracies": 0.9779411554336548,
141
+ "rewards/chosen": 0.9912306666374207,
142
+ "rewards/margins": 1.3743596076965332,
143
+ "rewards/rejected": -0.3831288516521454,
144
+ "step": 306
145
+ },
146
+ {
147
+ "epoch": 0.5033308660251665,
148
+ "grad_norm": 2.341501474380493,
149
+ "learning_rate": 5.518945634266886e-07,
150
+ "logits/chosen": -0.5389317870140076,
151
+ "logits/rejected": -0.4714396893978119,
152
+ "logps/chosen": -62.30534744262695,
153
+ "logps/rejected": -16.425796508789062,
154
+ "loss": 0.2661,
155
+ "rewards/accuracies": 0.9816176295280457,
156
+ "rewards/chosen": 1.0894008874893188,
157
+ "rewards/margins": 1.5544438362121582,
158
+ "rewards/rejected": -0.46504291892051697,
159
+ "step": 340
160
+ },
161
+ {
162
+ "epoch": 0.5536639526276832,
163
+ "grad_norm": 1.6648075580596924,
164
+ "learning_rate": 4.958813838550247e-07,
165
+ "logits/chosen": -0.5351024270057678,
166
+ "logits/rejected": -0.48344936966896057,
167
+ "logps/chosen": -53.63265609741211,
168
+ "logps/rejected": -17.541196823120117,
169
+ "loss": 0.2411,
170
+ "rewards/accuracies": 0.9705882668495178,
171
+ "rewards/chosen": 1.1532927751541138,
172
+ "rewards/margins": 1.7396329641342163,
173
+ "rewards/rejected": -0.5863401889801025,
174
+ "step": 374
175
+ },
176
+ {
177
+ "epoch": 0.6039970392301999,
178
+ "grad_norm": 2.055748701095581,
179
+ "learning_rate": 4.3986820428336077e-07,
180
+ "logits/chosen": -0.538494884967804,
181
+ "logits/rejected": -0.4838006794452667,
182
+ "logps/chosen": -57.38086700439453,
183
+ "logps/rejected": -18.22422981262207,
184
+ "loss": 0.2084,
185
+ "rewards/accuracies": 0.9705882668495178,
186
+ "rewards/chosen": 1.300463318824768,
187
+ "rewards/margins": 1.98318350315094,
188
+ "rewards/rejected": -0.6827201843261719,
189
+ "step": 408
190
+ },
191
+ {
192
+ "epoch": 0.6543301258327165,
193
+ "grad_norm": 3.2196602821350098,
194
+ "learning_rate": 3.8385502471169683e-07,
195
+ "logits/chosen": -0.5572548508644104,
196
+ "logits/rejected": -0.5117458701133728,
197
+ "logps/chosen": -54.00746154785156,
198
+ "logps/rejected": -17.898473739624023,
199
+ "loss": 0.1869,
200
+ "rewards/accuracies": 0.9889705777168274,
201
+ "rewards/chosen": 1.3422279357910156,
202
+ "rewards/margins": 2.1278655529022217,
203
+ "rewards/rejected": -0.7856374382972717,
204
+ "step": 442
205
+ },
206
+ {
207
+ "epoch": 0.7046632124352331,
208
+ "grad_norm": 1.63054358959198,
209
+ "learning_rate": 3.2784184514003294e-07,
210
+ "logits/chosen": -0.5344655513763428,
211
+ "logits/rejected": -0.47442930936813354,
212
+ "logps/chosen": -56.39248275756836,
213
+ "logps/rejected": -17.505578994750977,
214
+ "loss": 0.1707,
215
+ "rewards/accuracies": 0.9889705777168274,
216
+ "rewards/chosen": 1.3500103950500488,
217
+ "rewards/margins": 2.1952497959136963,
218
+ "rewards/rejected": -0.8452394008636475,
219
+ "step": 476
220
+ },
221
+ {
222
+ "epoch": 0.7549962990377498,
223
+ "grad_norm": 1.9224135875701904,
224
+ "learning_rate": 2.7182866556836905e-07,
225
+ "logits/chosen": -0.5313306450843811,
226
+ "logits/rejected": -0.48571935296058655,
227
+ "logps/chosen": -50.30500411987305,
228
+ "logps/rejected": -18.38837242126465,
229
+ "loss": 0.1665,
230
+ "rewards/accuracies": 0.9816176295280457,
231
+ "rewards/chosen": 1.3861879110336304,
232
+ "rewards/margins": 2.343000888824463,
233
+ "rewards/rejected": -0.9568131566047668,
234
+ "step": 510
235
+ },
236
+ {
237
+ "epoch": 0.8053293856402665,
238
+ "grad_norm": 2.294525146484375,
239
+ "learning_rate": 2.158154859967051e-07,
240
+ "logits/chosen": -0.5495839715003967,
241
+ "logits/rejected": -0.4935465455055237,
242
+ "logps/chosen": -54.87118911743164,
243
+ "logps/rejected": -20.08263397216797,
244
+ "loss": 0.1558,
245
+ "rewards/accuracies": 0.9889705777168274,
246
+ "rewards/chosen": 1.3922532796859741,
247
+ "rewards/margins": 2.4232118129730225,
248
+ "rewards/rejected": -1.0309585332870483,
249
+ "step": 544
250
+ },
251
+ {
252
+ "epoch": 0.8556624722427831,
253
+ "grad_norm": 1.459617257118225,
254
+ "learning_rate": 1.5980230642504117e-07,
255
+ "logits/chosen": -0.5833328366279602,
256
+ "logits/rejected": -0.517787516117096,
257
+ "logps/chosen": -62.456932067871094,
258
+ "logps/rejected": -20.363330841064453,
259
+ "loss": 0.1342,
260
+ "rewards/accuracies": 0.9926470518112183,
261
+ "rewards/chosen": 1.5634468793869019,
262
+ "rewards/margins": 2.619420289993286,
263
+ "rewards/rejected": -1.0559734106063843,
264
+ "step": 578
265
+ },
266
+ {
267
+ "epoch": 0.9059955588452998,
268
+ "grad_norm": 1.3194255828857422,
269
+ "learning_rate": 1.0378912685337726e-07,
270
+ "logits/chosen": -0.5344783663749695,
271
+ "logits/rejected": -0.4842905104160309,
272
+ "logps/chosen": -54.44826126098633,
273
+ "logps/rejected": -20.549943923950195,
274
+ "loss": 0.1334,
275
+ "rewards/accuracies": 0.9852941036224365,
276
+ "rewards/chosen": 1.394426703453064,
277
+ "rewards/margins": 2.494863271713257,
278
+ "rewards/rejected": -1.1004364490509033,
279
+ "step": 612
280
+ },
281
+ {
282
+ "epoch": 0.9563286454478165,
283
+ "grad_norm": 1.6856123208999634,
284
+ "learning_rate": 4.777594728171334e-08,
285
+ "logits/chosen": -0.5274606347084045,
286
+ "logits/rejected": -0.48015105724334717,
287
+ "logps/chosen": -56.23059844970703,
288
+ "logps/rejected": -23.505773544311523,
289
+ "loss": 0.1363,
290
+ "rewards/accuracies": 0.9852941036224365,
291
+ "rewards/chosen": 1.4247695207595825,
292
+ "rewards/margins": 2.5551772117614746,
293
+ "rewards/rejected": -1.130407691001892,
294
+ "step": 646
295
+ },
296
+ {
297
+ "epoch": 0.999259807549963,
298
+ "step": 675,
299
+ "total_flos": 3.8465011975952794e+17,
300
+ "train_loss": 0.33879759046766494,
301
+ "train_runtime": 9841.7629,
302
+ "train_samples_per_second": 0.549,
303
+ "train_steps_per_second": 0.069
304
+ }
305
+ ],
306
+ "logging_steps": 34,
307
+ "max_steps": 675,
308
+ "num_input_tokens_seen": 0,
309
+ "num_train_epochs": 1,
310
+ "save_steps": 500,
311
+ "stateful_callbacks": {
312
+ "TrainerControl": {
313
+ "args": {
314
+ "should_epoch_stop": false,
315
+ "should_evaluate": false,
316
+ "should_log": false,
317
+ "should_save": true,
318
+ "should_training_stop": true
319
+ },
320
+ "attributes": {}
321
+ }
322
+ },
323
+ "total_flos": 3.8465011975952794e+17,
324
+ "train_batch_size": 2,
325
+ "trial_name": null,
326
+ "trial_params": null
327
+ }
training_loss.png ADDED
training_rewards_accuracies.png ADDED