P1ayer-1 commited on
Commit
0e1925d
·
1 Parent(s): 5ae72ce

commit files to HF hub

Browse files
Files changed (5) hide show
  1. README.md +21 -4
  2. all_results.json +15 -0
  3. eval_results.json +10 -0
  4. train_results.json +8 -0
  5. trainer_state.json +370 -0
README.md CHANGED
@@ -2,19 +2,32 @@
2
  license: apache-2.0
3
  tags:
4
  - generated_from_trainer
 
 
5
  metrics:
6
  - accuracy
7
  model-index:
8
- - name: expert-pubmed_central
9
- results: []
 
 
 
 
 
 
 
 
 
 
 
10
  ---
11
 
12
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
  should probably proofread and complete it, then remove this comment. -->
14
 
15
- # expert-pubmed_central
16
 
17
- This model is a fine-tuned version of [EleutherAI/pythia-1b-deduped](https://huggingface.co/EleutherAI/pythia-1b-deduped) on the None dataset.
18
  It achieves the following results on the evaluation set:
19
  - Loss: 2.0227
20
  - Accuracy: 0.5768
@@ -66,3 +79,7 @@ The following hyperparameters were used during training:
66
  - Pytorch 2.0.0+cu117
67
  - Datasets 2.11.0
68
  - Tokenizers 0.13.3
 
 
 
 
 
2
  license: apache-2.0
3
  tags:
4
  - generated_from_trainer
5
+ datasets:
6
+ - Multi-Domain-Expert-Layers/pubmed_central
7
  metrics:
8
  - accuracy
9
  model-index:
10
+ - name: layer_9,10,11,12,13
11
+ results:
12
+ - task:
13
+ type: text-generation
14
+ name: Causal Language Modeling
15
+ dataset:
16
+ name: Multi-Domain-Expert-Layers/pubmed_central
17
+ type: Multi-Domain-Expert-Layers/pubmed_central
18
+ split: None
19
+ metrics:
20
+ - type: accuracy
21
+ value: 0.5767534246575342
22
+ name: Accuracy
23
  ---
24
 
25
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
26
  should probably proofread and complete it, then remove this comment. -->
27
 
28
+ # layer_9,10,11,12,13
29
 
30
+ This model is a fine-tuned version of [EleutherAI/pythia-1b-deduped](https://huggingface.co/EleutherAI/pythia-1b-deduped) on the Multi-Domain-Expert-Layers/pubmed_central dataset.
31
  It achieves the following results on the evaluation set:
32
  - Loss: 2.0227
33
  - Accuracy: 0.5768
 
79
  - Pytorch 2.0.0+cu117
80
  - Datasets 2.11.0
81
  - Tokenizers 0.13.3
82
+
83
+
84
+ ## Wandb Report
85
+ https://wandb.ai/ontocord/pythia-1b-deduped-layer-test-pubmed_central/runs/yy3pwx0o
all_results.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.02,
3
+ "eval_accuracy": 0.5767534246575342,
4
+ "eval_loss": 2.0226891040802,
5
+ "eval_runtime": 13.0624,
6
+ "eval_samples": 1000,
7
+ "eval_samples_per_second": 76.555,
8
+ "eval_steps_per_second": 1.225,
9
+ "perplexity": 7.558623554318639,
10
+ "train_loss": 2.0490207977294923,
11
+ "train_runtime": 1425.429,
12
+ "train_samples": 3358758,
13
+ "train_samples_per_second": 44.899,
14
+ "train_steps_per_second": 0.702
15
+ }
eval_results.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.02,
3
+ "eval_accuracy": 0.5767534246575342,
4
+ "eval_loss": 2.0226891040802,
5
+ "eval_runtime": 13.0624,
6
+ "eval_samples": 1000,
7
+ "eval_samples_per_second": 76.555,
8
+ "eval_steps_per_second": 1.225,
9
+ "perplexity": 7.558623554318639
10
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.02,
3
+ "train_loss": 2.0490207977294923,
4
+ "train_runtime": 1425.429,
5
+ "train_samples": 3358758,
6
+ "train_samples_per_second": 44.899,
7
+ "train_steps_per_second": 0.702
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,370 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.019054651121247126,
5
+ "global_step": 1000,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.0,
12
+ "learning_rate": 9.8e-05,
13
+ "loss": 2.0665,
14
+ "step": 20
15
+ },
16
+ {
17
+ "epoch": 0.0,
18
+ "learning_rate": 9.6e-05,
19
+ "loss": 2.038,
20
+ "step": 40
21
+ },
22
+ {
23
+ "epoch": 0.0,
24
+ "learning_rate": 9.4e-05,
25
+ "loss": 2.0553,
26
+ "step": 60
27
+ },
28
+ {
29
+ "epoch": 0.0,
30
+ "learning_rate": 9.200000000000001e-05,
31
+ "loss": 2.063,
32
+ "step": 80
33
+ },
34
+ {
35
+ "epoch": 0.0,
36
+ "learning_rate": 9e-05,
37
+ "loss": 2.0761,
38
+ "step": 100
39
+ },
40
+ {
41
+ "epoch": 0.0,
42
+ "learning_rate": 8.800000000000001e-05,
43
+ "loss": 2.0427,
44
+ "step": 120
45
+ },
46
+ {
47
+ "epoch": 0.0,
48
+ "learning_rate": 8.6e-05,
49
+ "loss": 2.0504,
50
+ "step": 140
51
+ },
52
+ {
53
+ "epoch": 0.0,
54
+ "learning_rate": 8.4e-05,
55
+ "loss": 2.072,
56
+ "step": 160
57
+ },
58
+ {
59
+ "epoch": 0.0,
60
+ "learning_rate": 8.2e-05,
61
+ "loss": 2.0897,
62
+ "step": 180
63
+ },
64
+ {
65
+ "epoch": 0.0,
66
+ "learning_rate": 8e-05,
67
+ "loss": 2.0567,
68
+ "step": 200
69
+ },
70
+ {
71
+ "epoch": 0.0,
72
+ "eval_accuracy": 0.57173385518591,
73
+ "eval_loss": 2.053316354751587,
74
+ "eval_runtime": 13.3251,
75
+ "eval_samples_per_second": 75.046,
76
+ "eval_steps_per_second": 1.201,
77
+ "step": 200
78
+ },
79
+ {
80
+ "epoch": 0.0,
81
+ "learning_rate": 7.800000000000001e-05,
82
+ "loss": 2.0786,
83
+ "step": 220
84
+ },
85
+ {
86
+ "epoch": 0.0,
87
+ "learning_rate": 7.6e-05,
88
+ "loss": 2.0559,
89
+ "step": 240
90
+ },
91
+ {
92
+ "epoch": 0.0,
93
+ "learning_rate": 7.4e-05,
94
+ "loss": 2.0664,
95
+ "step": 260
96
+ },
97
+ {
98
+ "epoch": 0.01,
99
+ "learning_rate": 7.2e-05,
100
+ "loss": 2.0799,
101
+ "step": 280
102
+ },
103
+ {
104
+ "epoch": 0.01,
105
+ "learning_rate": 7e-05,
106
+ "loss": 2.0744,
107
+ "step": 300
108
+ },
109
+ {
110
+ "epoch": 0.01,
111
+ "learning_rate": 6.800000000000001e-05,
112
+ "loss": 2.0509,
113
+ "step": 320
114
+ },
115
+ {
116
+ "epoch": 0.01,
117
+ "learning_rate": 6.6e-05,
118
+ "loss": 2.094,
119
+ "step": 340
120
+ },
121
+ {
122
+ "epoch": 0.01,
123
+ "learning_rate": 6.400000000000001e-05,
124
+ "loss": 2.0419,
125
+ "step": 360
126
+ },
127
+ {
128
+ "epoch": 0.01,
129
+ "learning_rate": 6.2e-05,
130
+ "loss": 2.055,
131
+ "step": 380
132
+ },
133
+ {
134
+ "epoch": 0.01,
135
+ "learning_rate": 6e-05,
136
+ "loss": 2.041,
137
+ "step": 400
138
+ },
139
+ {
140
+ "epoch": 0.01,
141
+ "eval_accuracy": 0.5733287671232876,
142
+ "eval_loss": 2.043827772140503,
143
+ "eval_runtime": 13.0102,
144
+ "eval_samples_per_second": 76.863,
145
+ "eval_steps_per_second": 1.23,
146
+ "step": 400
147
+ },
148
+ {
149
+ "epoch": 0.01,
150
+ "learning_rate": 5.8e-05,
151
+ "loss": 2.0314,
152
+ "step": 420
153
+ },
154
+ {
155
+ "epoch": 0.01,
156
+ "learning_rate": 5.6000000000000006e-05,
157
+ "loss": 2.0513,
158
+ "step": 440
159
+ },
160
+ {
161
+ "epoch": 0.01,
162
+ "learning_rate": 5.4000000000000005e-05,
163
+ "loss": 2.0545,
164
+ "step": 460
165
+ },
166
+ {
167
+ "epoch": 0.01,
168
+ "learning_rate": 5.2000000000000004e-05,
169
+ "loss": 2.0954,
170
+ "step": 480
171
+ },
172
+ {
173
+ "epoch": 0.01,
174
+ "learning_rate": 5e-05,
175
+ "loss": 2.0493,
176
+ "step": 500
177
+ },
178
+ {
179
+ "epoch": 0.01,
180
+ "learning_rate": 4.8e-05,
181
+ "loss": 2.0714,
182
+ "step": 520
183
+ },
184
+ {
185
+ "epoch": 0.01,
186
+ "learning_rate": 4.600000000000001e-05,
187
+ "loss": 2.077,
188
+ "step": 540
189
+ },
190
+ {
191
+ "epoch": 0.01,
192
+ "learning_rate": 4.4000000000000006e-05,
193
+ "loss": 2.07,
194
+ "step": 560
195
+ },
196
+ {
197
+ "epoch": 0.01,
198
+ "learning_rate": 4.2e-05,
199
+ "loss": 2.0233,
200
+ "step": 580
201
+ },
202
+ {
203
+ "epoch": 0.01,
204
+ "learning_rate": 4e-05,
205
+ "loss": 2.0496,
206
+ "step": 600
207
+ },
208
+ {
209
+ "epoch": 0.01,
210
+ "eval_accuracy": 0.5749158512720156,
211
+ "eval_loss": 2.036094903945923,
212
+ "eval_runtime": 12.9978,
213
+ "eval_samples_per_second": 76.936,
214
+ "eval_steps_per_second": 1.231,
215
+ "step": 600
216
+ },
217
+ {
218
+ "epoch": 0.01,
219
+ "learning_rate": 3.8e-05,
220
+ "loss": 2.063,
221
+ "step": 620
222
+ },
223
+ {
224
+ "epoch": 0.01,
225
+ "learning_rate": 3.6e-05,
226
+ "loss": 2.0683,
227
+ "step": 640
228
+ },
229
+ {
230
+ "epoch": 0.01,
231
+ "learning_rate": 3.4000000000000007e-05,
232
+ "loss": 2.0344,
233
+ "step": 660
234
+ },
235
+ {
236
+ "epoch": 0.01,
237
+ "learning_rate": 3.2000000000000005e-05,
238
+ "loss": 2.0414,
239
+ "step": 680
240
+ },
241
+ {
242
+ "epoch": 0.01,
243
+ "learning_rate": 3e-05,
244
+ "loss": 2.0146,
245
+ "step": 700
246
+ },
247
+ {
248
+ "epoch": 0.01,
249
+ "learning_rate": 2.8000000000000003e-05,
250
+ "loss": 2.0369,
251
+ "step": 720
252
+ },
253
+ {
254
+ "epoch": 0.01,
255
+ "learning_rate": 2.6000000000000002e-05,
256
+ "loss": 2.0333,
257
+ "step": 740
258
+ },
259
+ {
260
+ "epoch": 0.01,
261
+ "learning_rate": 2.4e-05,
262
+ "loss": 2.039,
263
+ "step": 760
264
+ },
265
+ {
266
+ "epoch": 0.01,
267
+ "learning_rate": 2.2000000000000003e-05,
268
+ "loss": 2.0402,
269
+ "step": 780
270
+ },
271
+ {
272
+ "epoch": 0.02,
273
+ "learning_rate": 2e-05,
274
+ "loss": 2.0194,
275
+ "step": 800
276
+ },
277
+ {
278
+ "epoch": 0.02,
279
+ "eval_accuracy": 0.5760665362035226,
280
+ "eval_loss": 2.0276312828063965,
281
+ "eval_runtime": 11.977,
282
+ "eval_samples_per_second": 83.493,
283
+ "eval_steps_per_second": 1.336,
284
+ "step": 800
285
+ },
286
+ {
287
+ "epoch": 0.02,
288
+ "learning_rate": 1.8e-05,
289
+ "loss": 2.003,
290
+ "step": 820
291
+ },
292
+ {
293
+ "epoch": 0.02,
294
+ "learning_rate": 1.6000000000000003e-05,
295
+ "loss": 2.0207,
296
+ "step": 840
297
+ },
298
+ {
299
+ "epoch": 0.02,
300
+ "learning_rate": 1.4000000000000001e-05,
301
+ "loss": 2.0228,
302
+ "step": 860
303
+ },
304
+ {
305
+ "epoch": 0.02,
306
+ "learning_rate": 1.2e-05,
307
+ "loss": 2.0349,
308
+ "step": 880
309
+ },
310
+ {
311
+ "epoch": 0.02,
312
+ "learning_rate": 1e-05,
313
+ "loss": 2.0158,
314
+ "step": 900
315
+ },
316
+ {
317
+ "epoch": 0.02,
318
+ "learning_rate": 8.000000000000001e-06,
319
+ "loss": 2.019,
320
+ "step": 920
321
+ },
322
+ {
323
+ "epoch": 0.02,
324
+ "learning_rate": 6e-06,
325
+ "loss": 2.0196,
326
+ "step": 940
327
+ },
328
+ {
329
+ "epoch": 0.02,
330
+ "learning_rate": 4.000000000000001e-06,
331
+ "loss": 2.0414,
332
+ "step": 960
333
+ },
334
+ {
335
+ "epoch": 0.02,
336
+ "learning_rate": 2.0000000000000003e-06,
337
+ "loss": 2.0279,
338
+ "step": 980
339
+ },
340
+ {
341
+ "epoch": 0.02,
342
+ "learning_rate": 0.0,
343
+ "loss": 2.0338,
344
+ "step": 1000
345
+ },
346
+ {
347
+ "epoch": 0.02,
348
+ "eval_accuracy": 0.5767534246575342,
349
+ "eval_loss": 2.0226891040802,
350
+ "eval_runtime": 13.0341,
351
+ "eval_samples_per_second": 76.722,
352
+ "eval_steps_per_second": 1.228,
353
+ "step": 1000
354
+ },
355
+ {
356
+ "epoch": 0.02,
357
+ "step": 1000,
358
+ "total_flos": 1.7866929733632e+17,
359
+ "train_loss": 2.0490207977294923,
360
+ "train_runtime": 1425.429,
361
+ "train_samples_per_second": 44.899,
362
+ "train_steps_per_second": 0.702
363
+ }
364
+ ],
365
+ "max_steps": 1000,
366
+ "num_train_epochs": 1,
367
+ "total_flos": 1.7866929733632e+17,
368
+ "trial_name": null,
369
+ "trial_params": null
370
+ }