TinyPixel commited on
Commit
cd48d59
·
1 Parent(s): f0e650a

Upload folder using huggingface_hub

Browse files
README.md CHANGED
@@ -12,21 +12,9 @@ The following `bitsandbytes` quantization config was used during training:
12
  - llm_int8_enable_fp32_cpu_offload: False
13
  - llm_int8_has_fp16_weight: False
14
  - bnb_4bit_quant_type: nf4
15
- - bnb_4bit_use_double_quant: False
16
- - bnb_4bit_compute_dtype: float16
17
-
18
- The following `bitsandbytes` quantization config was used during training:
19
- - load_in_8bit: False
20
- - load_in_4bit: True
21
- - llm_int8_threshold: 6.0
22
- - llm_int8_skip_modules: None
23
- - llm_int8_enable_fp32_cpu_offload: False
24
- - llm_int8_has_fp16_weight: False
25
- - bnb_4bit_quant_type: nf4
26
- - bnb_4bit_use_double_quant: False
27
- - bnb_4bit_compute_dtype: float16
28
  ### Framework versions
29
 
30
- - PEFT 0.5.0.dev0
31
 
32
- - PEFT 0.5.0.dev0
 
12
  - llm_int8_enable_fp32_cpu_offload: False
13
  - llm_int8_has_fp16_weight: False
14
  - bnb_4bit_quant_type: nf4
15
+ - bnb_4bit_use_double_quant: True
16
+ - bnb_4bit_compute_dtype: float32
 
 
 
 
 
 
 
 
 
 
 
17
  ### Framework versions
18
 
 
19
 
20
+ - PEFT 0.4.0
adapter_config.json CHANGED
@@ -1,26 +1,23 @@
1
  {
2
  "auto_mapping": null,
3
- "base_model_name_or_path": "TinyPixel/Llama-2-7B-bf16-sharded",
4
  "bias": "none",
5
  "fan_in_fan_out": false,
6
  "inference_mode": true,
7
  "init_lora_weights": true,
8
  "layers_pattern": null,
9
  "layers_to_transform": null,
10
- "lora_alpha": 16,
11
- "lora_dropout": 0.1,
12
  "modules_to_save": null,
13
  "peft_type": "LORA",
14
  "r": 64,
15
  "revision": null,
16
  "target_modules": [
17
- "q_proj",
18
- "up_proj",
19
- "o_proj",
20
- "k_proj",
21
- "down_proj",
22
- "gate_proj",
23
- "v_proj"
24
  ],
25
  "task_type": "CAUSAL_LM"
26
  }
 
1
  {
2
  "auto_mapping": null,
3
+ "base_model_name_or_path": "EleutherAI/pythia-1.4b-deduped",
4
  "bias": "none",
5
  "fan_in_fan_out": false,
6
  "inference_mode": true,
7
  "init_lora_weights": true,
8
  "layers_pattern": null,
9
  "layers_to_transform": null,
10
+ "lora_alpha": 16.0,
11
+ "lora_dropout": 0.05,
12
  "modules_to_save": null,
13
  "peft_type": "LORA",
14
  "r": 64,
15
  "revision": null,
16
  "target_modules": [
17
+ "query_key_value",
18
+ "dense",
19
+ "dense_4h_to_h",
20
+ "dense_h_to_4h"
 
 
 
21
  ],
22
  "task_type": "CAUSAL_LM"
23
  }
adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:69e59b1e6ca86aabe4a134fb6cda6e31c43c152c84e9396f92ab05dcdce8d222
3
- size 639792909
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c131d7ef0152ba05901fcc4d00eb8d3989c4ab25884d85967b687a68fe837221
3
+ size 201396877
adapter_model/adapter_config.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "auto_mapping": null,
3
- "base_model_name_or_path": "TinyPixel/Llama-2-7B-bf16-sharded",
4
  "bias": "none",
5
  "fan_in_fan_out": false,
6
  "inference_mode": true,
@@ -14,13 +14,10 @@
14
  "r": 64,
15
  "revision": null,
16
  "target_modules": [
17
- "q_proj",
18
- "up_proj",
19
- "o_proj",
20
- "k_proj",
21
- "down_proj",
22
- "gate_proj",
23
- "v_proj"
24
  ],
25
  "task_type": "CAUSAL_LM"
26
  }
 
1
  {
2
  "auto_mapping": null,
3
+ "base_model_name_or_path": "EleutherAI/pythia-1.4b-deduped",
4
  "bias": "none",
5
  "fan_in_fan_out": false,
6
  "inference_mode": true,
 
14
  "r": 64,
15
  "revision": null,
16
  "target_modules": [
17
+ "query_key_value",
18
+ "dense",
19
+ "dense_4h_to_h",
20
+ "dense_h_to_4h"
 
 
 
21
  ],
22
  "task_type": "CAUSAL_LM"
23
  }
adapter_model/adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4590eccec48a795c79c444a5e95a4897a5e1fd17be423ccb30c2452c03c56f0f
3
- size 639792909
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c131d7ef0152ba05901fcc4d00eb8d3989c4ab25884d85967b687a68fe837221
3
+ size 201396877
optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:63c22dea1561a4ddccff7a19420a0c4db147310ac0f72710d4ef54be986579f1
3
- size 1279539525
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0451a20c28f34735ef7dfa777a68b2a3a923bed7fac096edb2ac3b648829c015
3
+ size 2050282373
rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:21da822cb611592092c4a65da42e66edb941b19dd533aad1e0a40e0228ea3a5e
3
- size 14575
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d266ddecfade7ae12d89d853bf0985517cf37a6a52c274d08548cfff0d913c51
3
+ size 14511
scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1c2c0790105388e71413fa1315b498e3e67fda20e8db9c016f7c880ef11ee42e
3
  size 627
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f4880f3f40e7ab04d8efce4fc909327351c41d3ccafff780222b4189f527a9a0
3
  size 627
special_tokens_map.json CHANGED
@@ -1,24 +1,6 @@
1
  {
2
- "bos_token": {
3
- "content": "<s>",
4
- "lstrip": false,
5
- "normalized": true,
6
- "rstrip": false,
7
- "single_word": false
8
- },
9
- "eos_token": {
10
- "content": "</s>",
11
- "lstrip": false,
12
- "normalized": true,
13
- "rstrip": false,
14
- "single_word": false
15
- },
16
- "pad_token": "</s>",
17
- "unk_token": {
18
- "content": "<unk>",
19
- "lstrip": false,
20
- "normalized": true,
21
- "rstrip": false,
22
- "single_word": false
23
- }
24
  }
 
1
  {
2
+ "bos_token": "<|endoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "pad_token": "[PAD]",
5
+ "unk_token": "<|endoftext|>"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  }
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -1,31 +1,10 @@
1
  {
2
- "bos_token": {
3
- "__type": "AddedToken",
4
- "content": "<s>",
5
- "lstrip": false,
6
- "normalized": true,
7
- "rstrip": false,
8
- "single_word": false
9
- },
10
- "clean_up_tokenization_spaces": false,
11
- "eos_token": {
12
- "__type": "AddedToken",
13
- "content": "</s>",
14
- "lstrip": false,
15
- "normalized": true,
16
- "rstrip": false,
17
- "single_word": false
18
- },
19
  "model_max_length": 1000000000000000019884624838656,
20
- "pad_token": null,
21
- "sp_model_kwargs": {},
22
- "tokenizer_class": "LlamaTokenizer",
23
- "unk_token": {
24
- "__type": "AddedToken",
25
- "content": "<unk>",
26
- "lstrip": false,
27
- "normalized": true,
28
- "rstrip": false,
29
- "single_word": false
30
- }
31
  }
 
1
  {
2
+ "add_prefix_space": false,
3
+ "bos_token": "<|endoftext|>",
4
+ "clean_up_tokenization_spaces": true,
5
+ "eos_token": "<|endoftext|>",
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  "model_max_length": 1000000000000000019884624838656,
7
+ "padding_side": "right",
8
+ "tokenizer_class": "GPTNeoXTokenizer",
9
+ "unk_token": "<|endoftext|>"
 
 
 
 
 
 
 
 
10
  }
trainer_state.json CHANGED
@@ -1,616 +1,136 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.024906018695080284,
5
  "global_step": 200,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
9
  "log_history": [
10
  {
11
- "epoch": 0.0,
12
- "learning_rate": 2e-05,
13
- "loss": 0.9985,
14
- "step": 2
15
- },
16
- {
17
- "epoch": 0.0,
18
- "learning_rate": 2e-05,
19
- "loss": 0.886,
20
- "step": 4
21
- },
22
- {
23
- "epoch": 0.0,
24
- "learning_rate": 2e-05,
25
- "loss": 1.1051,
26
- "step": 6
27
- },
28
- {
29
- "epoch": 0.0,
30
- "learning_rate": 2e-05,
31
- "loss": 1.1122,
32
- "step": 8
33
- },
34
- {
35
- "epoch": 0.0,
36
  "learning_rate": 2e-05,
37
- "loss": 1.1735,
38
  "step": 10
39
  },
40
  {
41
- "epoch": 0.0,
42
- "learning_rate": 2e-05,
43
- "loss": 1.1521,
44
- "step": 12
45
- },
46
- {
47
- "epoch": 0.0,
48
- "learning_rate": 2e-05,
49
- "loss": 1.1679,
50
- "step": 14
51
- },
52
- {
53
- "epoch": 0.0,
54
  "learning_rate": 2e-05,
55
- "loss": 1.1777,
56
- "step": 16
57
- },
58
- {
59
- "epoch": 0.0,
60
- "learning_rate": 2e-05,
61
- "loss": 1.1099,
62
- "step": 18
63
- },
64
- {
65
- "epoch": 0.0,
66
- "learning_rate": 2e-05,
67
- "loss": 1.1247,
68
  "step": 20
69
  },
70
  {
71
- "epoch": 0.0,
72
- "learning_rate": 2e-05,
73
- "loss": 1.2112,
74
- "step": 22
75
- },
76
- {
77
- "epoch": 0.0,
78
  "learning_rate": 2e-05,
79
- "loss": 1.1689,
80
- "step": 24
81
- },
82
- {
83
- "epoch": 0.0,
84
- "learning_rate": 2e-05,
85
- "loss": 1.1411,
86
- "step": 26
87
- },
88
- {
89
- "epoch": 0.0,
90
- "learning_rate": 2e-05,
91
- "loss": 1.3501,
92
- "step": 28
93
- },
94
- {
95
- "epoch": 0.0,
96
- "learning_rate": 2e-05,
97
- "loss": 1.1767,
98
  "step": 30
99
  },
100
  {
101
- "epoch": 0.0,
102
- "learning_rate": 2e-05,
103
- "loss": 1.2272,
104
- "step": 32
105
- },
106
- {
107
- "epoch": 0.0,
108
- "learning_rate": 2e-05,
109
- "loss": 1.2486,
110
- "step": 34
111
- },
112
- {
113
- "epoch": 0.0,
114
- "learning_rate": 2e-05,
115
- "loss": 1.2727,
116
- "step": 36
117
- },
118
- {
119
- "epoch": 0.0,
120
  "learning_rate": 2e-05,
121
- "loss": 1.2661,
122
- "step": 38
123
- },
124
- {
125
- "epoch": 0.0,
126
- "learning_rate": 2e-05,
127
- "loss": 1.333,
128
  "step": 40
129
  },
130
  {
131
- "epoch": 0.01,
132
  "learning_rate": 2e-05,
133
- "loss": 1.3674,
134
- "step": 42
135
- },
136
- {
137
- "epoch": 0.01,
138
- "learning_rate": 2e-05,
139
- "loss": 1.3425,
140
- "step": 44
141
- },
142
- {
143
- "epoch": 0.01,
144
- "learning_rate": 2e-05,
145
- "loss": 1.3959,
146
- "step": 46
147
- },
148
- {
149
- "epoch": 0.01,
150
- "learning_rate": 2e-05,
151
- "loss": 1.3078,
152
- "step": 48
153
- },
154
- {
155
- "epoch": 0.01,
156
- "learning_rate": 2e-05,
157
- "loss": 2.4367,
158
  "step": 50
159
  },
160
  {
161
- "epoch": 0.01,
162
  "learning_rate": 2e-05,
163
- "loss": 0.9375,
164
- "step": 52
165
- },
166
- {
167
- "epoch": 0.01,
168
- "learning_rate": 2e-05,
169
- "loss": 0.7958,
170
- "step": 54
171
- },
172
- {
173
- "epoch": 0.01,
174
- "learning_rate": 2e-05,
175
- "loss": 0.9266,
176
- "step": 56
177
- },
178
- {
179
- "epoch": 0.01,
180
- "learning_rate": 2e-05,
181
- "loss": 0.9467,
182
- "step": 58
183
- },
184
- {
185
- "epoch": 0.01,
186
- "learning_rate": 2e-05,
187
- "loss": 0.9492,
188
  "step": 60
189
  },
190
  {
191
- "epoch": 0.01,
192
- "learning_rate": 2e-05,
193
- "loss": 1.0022,
194
- "step": 62
195
- },
196
- {
197
- "epoch": 0.01,
198
- "learning_rate": 2e-05,
199
- "loss": 1.0071,
200
- "step": 64
201
- },
202
- {
203
- "epoch": 0.01,
204
- "learning_rate": 2e-05,
205
- "loss": 0.8606,
206
- "step": 66
207
- },
208
- {
209
- "epoch": 0.01,
210
- "learning_rate": 2e-05,
211
- "loss": 0.9849,
212
- "step": 68
213
- },
214
- {
215
- "epoch": 0.01,
216
  "learning_rate": 2e-05,
217
- "loss": 1.0246,
218
  "step": 70
219
  },
220
  {
221
- "epoch": 0.01,
222
  "learning_rate": 2e-05,
223
- "loss": 0.9328,
224
- "step": 72
225
- },
226
- {
227
- "epoch": 0.01,
228
- "learning_rate": 2e-05,
229
- "loss": 0.9118,
230
- "step": 74
231
- },
232
- {
233
- "epoch": 0.01,
234
- "learning_rate": 2e-05,
235
- "loss": 1.0096,
236
- "step": 76
237
- },
238
- {
239
- "epoch": 0.01,
240
- "learning_rate": 2e-05,
241
- "loss": 0.9651,
242
- "step": 78
243
- },
244
- {
245
- "epoch": 0.01,
246
- "learning_rate": 2e-05,
247
- "loss": 1.0071,
248
  "step": 80
249
  },
250
  {
251
- "epoch": 0.01,
252
- "learning_rate": 2e-05,
253
- "loss": 0.9272,
254
- "step": 82
255
- },
256
- {
257
- "epoch": 0.01,
258
- "learning_rate": 2e-05,
259
- "loss": 0.9486,
260
- "step": 84
261
- },
262
- {
263
- "epoch": 0.01,
264
- "learning_rate": 2e-05,
265
- "loss": 1.0818,
266
- "step": 86
267
- },
268
- {
269
- "epoch": 0.01,
270
  "learning_rate": 2e-05,
271
- "loss": 0.9924,
272
- "step": 88
273
- },
274
- {
275
- "epoch": 0.01,
276
- "learning_rate": 2e-05,
277
- "loss": 1.0475,
278
  "step": 90
279
  },
280
  {
281
- "epoch": 0.01,
282
- "learning_rate": 2e-05,
283
- "loss": 1.1217,
284
- "step": 92
285
- },
286
- {
287
- "epoch": 0.01,
288
- "learning_rate": 2e-05,
289
- "loss": 1.0328,
290
- "step": 94
291
- },
292
- {
293
- "epoch": 0.01,
294
  "learning_rate": 2e-05,
295
- "loss": 1.0489,
296
- "step": 96
297
- },
298
- {
299
- "epoch": 0.01,
300
- "learning_rate": 2e-05,
301
- "loss": 0.9313,
302
- "step": 98
303
- },
304
- {
305
- "epoch": 0.01,
306
- "learning_rate": 2e-05,
307
- "loss": 1.9119,
308
  "step": 100
309
  },
310
  {
311
- "epoch": 0.01,
312
- "learning_rate": 2e-05,
313
- "loss": 0.7919,
314
- "step": 102
315
- },
316
- {
317
- "epoch": 0.01,
318
  "learning_rate": 2e-05,
319
- "loss": 0.7663,
320
- "step": 104
321
- },
322
- {
323
- "epoch": 0.01,
324
- "learning_rate": 2e-05,
325
- "loss": 0.8743,
326
- "step": 106
327
- },
328
- {
329
- "epoch": 0.01,
330
- "learning_rate": 2e-05,
331
- "loss": 0.8572,
332
- "step": 108
333
- },
334
- {
335
- "epoch": 0.01,
336
- "learning_rate": 2e-05,
337
- "loss": 0.8919,
338
  "step": 110
339
  },
340
  {
341
- "epoch": 0.01,
342
  "learning_rate": 2e-05,
343
- "loss": 0.8456,
344
- "step": 112
345
- },
346
- {
347
- "epoch": 0.01,
348
- "learning_rate": 2e-05,
349
- "loss": 0.8218,
350
- "step": 114
351
- },
352
- {
353
- "epoch": 0.01,
354
- "learning_rate": 2e-05,
355
- "loss": 0.8647,
356
- "step": 116
357
- },
358
- {
359
- "epoch": 0.01,
360
- "learning_rate": 2e-05,
361
- "loss": 0.8727,
362
- "step": 118
363
- },
364
- {
365
- "epoch": 0.01,
366
- "learning_rate": 2e-05,
367
- "loss": 0.8096,
368
  "step": 120
369
  },
370
  {
371
- "epoch": 0.02,
372
- "learning_rate": 2e-05,
373
- "loss": 0.9242,
374
- "step": 122
375
- },
376
- {
377
- "epoch": 0.02,
378
- "learning_rate": 2e-05,
379
- "loss": 0.7764,
380
- "step": 124
381
- },
382
- {
383
- "epoch": 0.02,
384
  "learning_rate": 2e-05,
385
- "loss": 0.8185,
386
- "step": 126
387
- },
388
- {
389
- "epoch": 0.02,
390
- "learning_rate": 2e-05,
391
- "loss": 0.7131,
392
- "step": 128
393
- },
394
- {
395
- "epoch": 0.02,
396
- "learning_rate": 2e-05,
397
- "loss": 0.8053,
398
  "step": 130
399
  },
400
  {
401
- "epoch": 0.02,
402
- "learning_rate": 2e-05,
403
- "loss": 0.8017,
404
- "step": 132
405
- },
406
- {
407
- "epoch": 0.02,
408
- "learning_rate": 2e-05,
409
- "loss": 0.8523,
410
- "step": 134
411
- },
412
- {
413
- "epoch": 0.02,
414
- "learning_rate": 2e-05,
415
- "loss": 0.8351,
416
- "step": 136
417
- },
418
- {
419
- "epoch": 0.02,
420
- "learning_rate": 2e-05,
421
- "loss": 0.7854,
422
- "step": 138
423
- },
424
- {
425
- "epoch": 0.02,
426
  "learning_rate": 2e-05,
427
- "loss": 0.8708,
428
  "step": 140
429
  },
430
  {
431
- "epoch": 0.02,
432
- "learning_rate": 2e-05,
433
- "loss": 0.8467,
434
- "step": 142
435
- },
436
- {
437
- "epoch": 0.02,
438
- "learning_rate": 2e-05,
439
- "loss": 0.8095,
440
- "step": 144
441
- },
442
- {
443
- "epoch": 0.02,
444
  "learning_rate": 2e-05,
445
- "loss": 0.6581,
446
- "step": 146
447
- },
448
- {
449
- "epoch": 0.02,
450
- "learning_rate": 2e-05,
451
- "loss": 0.9329,
452
- "step": 148
453
- },
454
- {
455
- "epoch": 0.02,
456
- "learning_rate": 2e-05,
457
- "loss": 1.4044,
458
  "step": 150
459
  },
460
  {
461
- "epoch": 0.02,
462
- "learning_rate": 2e-05,
463
- "loss": 0.6767,
464
- "step": 152
465
- },
466
- {
467
- "epoch": 0.02,
468
- "learning_rate": 2e-05,
469
- "loss": 0.761,
470
- "step": 154
471
- },
472
- {
473
- "epoch": 0.02,
474
- "learning_rate": 2e-05,
475
- "loss": 0.792,
476
- "step": 156
477
- },
478
- {
479
- "epoch": 0.02,
480
- "learning_rate": 2e-05,
481
- "loss": 0.786,
482
- "step": 158
483
- },
484
- {
485
- "epoch": 0.02,
486
  "learning_rate": 2e-05,
487
- "loss": 0.8145,
488
  "step": 160
489
  },
490
  {
491
- "epoch": 0.02,
492
- "learning_rate": 2e-05,
493
- "loss": 0.8232,
494
- "step": 162
495
- },
496
- {
497
- "epoch": 0.02,
498
- "learning_rate": 2e-05,
499
- "loss": 0.755,
500
- "step": 164
501
- },
502
- {
503
- "epoch": 0.02,
504
  "learning_rate": 2e-05,
505
- "loss": 0.8011,
506
- "step": 166
507
- },
508
- {
509
- "epoch": 0.02,
510
- "learning_rate": 2e-05,
511
- "loss": 0.8464,
512
- "step": 168
513
- },
514
- {
515
- "epoch": 0.02,
516
- "learning_rate": 2e-05,
517
- "loss": 0.8536,
518
  "step": 170
519
  },
520
  {
521
- "epoch": 0.02,
522
- "learning_rate": 2e-05,
523
- "loss": 0.7742,
524
- "step": 172
525
- },
526
- {
527
- "epoch": 0.02,
528
- "learning_rate": 2e-05,
529
- "loss": 0.804,
530
- "step": 174
531
- },
532
- {
533
- "epoch": 0.02,
534
- "learning_rate": 2e-05,
535
- "loss": 0.8144,
536
- "step": 176
537
- },
538
- {
539
- "epoch": 0.02,
540
- "learning_rate": 2e-05,
541
- "loss": 0.729,
542
- "step": 178
543
- },
544
- {
545
- "epoch": 0.02,
546
  "learning_rate": 2e-05,
547
- "loss": 0.7371,
548
  "step": 180
549
  },
550
  {
551
- "epoch": 0.02,
552
- "learning_rate": 2e-05,
553
- "loss": 0.7174,
554
- "step": 182
555
- },
556
- {
557
- "epoch": 0.02,
558
- "learning_rate": 2e-05,
559
- "loss": 0.7451,
560
- "step": 184
561
- },
562
- {
563
- "epoch": 0.02,
564
- "learning_rate": 2e-05,
565
- "loss": 0.8926,
566
- "step": 186
567
- },
568
- {
569
- "epoch": 0.02,
570
- "learning_rate": 2e-05,
571
- "loss": 0.8575,
572
- "step": 188
573
- },
574
- {
575
- "epoch": 0.02,
576
  "learning_rate": 2e-05,
577
- "loss": 0.9674,
578
  "step": 190
579
  },
580
  {
581
- "epoch": 0.02,
582
- "learning_rate": 2e-05,
583
- "loss": 0.7664,
584
- "step": 192
585
- },
586
- {
587
- "epoch": 0.02,
588
- "learning_rate": 2e-05,
589
- "loss": 0.8667,
590
- "step": 194
591
- },
592
- {
593
- "epoch": 0.02,
594
- "learning_rate": 2e-05,
595
- "loss": 0.8064,
596
- "step": 196
597
- },
598
- {
599
- "epoch": 0.02,
600
- "learning_rate": 2e-05,
601
- "loss": 0.8116,
602
- "step": 198
603
- },
604
- {
605
- "epoch": 0.02,
606
  "learning_rate": 2e-05,
607
- "loss": 1.1953,
608
  "step": 200
609
  }
610
  ],
611
- "max_steps": 1000,
612
- "num_train_epochs": 1,
613
- "total_flos": 2.748430712070144e+16,
614
  "trial_name": null,
615
  "trial_params": null
616
  }
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.36272953978689637,
5
  "global_step": 200,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
9
  "log_history": [
10
  {
11
+ "epoch": 0.02,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  "learning_rate": 2e-05,
13
+ "loss": 1.9524,
14
  "step": 10
15
  },
16
  {
17
+ "epoch": 0.04,
 
 
 
 
 
 
 
 
 
 
 
 
18
  "learning_rate": 2e-05,
19
+ "loss": 1.9249,
 
 
 
 
 
 
 
 
 
 
 
 
20
  "step": 20
21
  },
22
  {
23
+ "epoch": 0.05,
 
 
 
 
 
 
24
  "learning_rate": 2e-05,
25
+ "loss": 1.9685,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  "step": 30
27
  },
28
  {
29
+ "epoch": 0.07,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
  "learning_rate": 2e-05,
31
+ "loss": 2.1624,
 
 
 
 
 
 
32
  "step": 40
33
  },
34
  {
35
+ "epoch": 0.09,
36
  "learning_rate": 2e-05,
37
+ "loss": 2.3455,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
  "step": 50
39
  },
40
  {
41
+ "epoch": 0.11,
42
  "learning_rate": 2e-05,
43
+ "loss": 1.7288,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  "step": 60
45
  },
46
  {
47
+ "epoch": 0.13,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
  "learning_rate": 2e-05,
49
+ "loss": 1.712,
50
  "step": 70
51
  },
52
  {
53
+ "epoch": 0.15,
54
  "learning_rate": 2e-05,
55
+ "loss": 1.8443,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
  "step": 80
57
  },
58
  {
59
+ "epoch": 0.16,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60
  "learning_rate": 2e-05,
61
+ "loss": 1.9731,
 
 
 
 
 
 
62
  "step": 90
63
  },
64
  {
65
+ "epoch": 0.18,
 
 
 
 
 
 
 
 
 
 
 
 
66
  "learning_rate": 2e-05,
67
+ "loss": 2.1967,
 
 
 
 
 
 
 
 
 
 
 
 
68
  "step": 100
69
  },
70
  {
71
+ "epoch": 0.2,
 
 
 
 
 
 
72
  "learning_rate": 2e-05,
73
+ "loss": 1.6612,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74
  "step": 110
75
  },
76
  {
77
+ "epoch": 0.22,
78
  "learning_rate": 2e-05,
79
+ "loss": 1.6775,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
80
  "step": 120
81
  },
82
  {
83
+ "epoch": 0.24,
 
 
 
 
 
 
 
 
 
 
 
 
84
  "learning_rate": 2e-05,
85
+ "loss": 1.7389,
 
 
 
 
 
 
 
 
 
 
 
 
86
  "step": 130
87
  },
88
  {
89
+ "epoch": 0.25,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90
  "learning_rate": 2e-05,
91
+ "loss": 1.9169,
92
  "step": 140
93
  },
94
  {
95
+ "epoch": 0.27,
 
 
 
 
 
 
 
 
 
 
 
 
96
  "learning_rate": 2e-05,
97
+ "loss": 2.2075,
 
 
 
 
 
 
 
 
 
 
 
 
98
  "step": 150
99
  },
100
  {
101
+ "epoch": 0.29,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102
  "learning_rate": 2e-05,
103
+ "loss": 1.6245,
104
  "step": 160
105
  },
106
  {
107
+ "epoch": 0.31,
 
 
 
 
 
 
 
 
 
 
 
 
108
  "learning_rate": 2e-05,
109
+ "loss": 1.6433,
 
 
 
 
 
 
 
 
 
 
 
 
110
  "step": 170
111
  },
112
  {
113
+ "epoch": 0.33,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114
  "learning_rate": 2e-05,
115
+ "loss": 1.7877,
116
  "step": 180
117
  },
118
  {
119
+ "epoch": 0.34,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
120
  "learning_rate": 2e-05,
121
+ "loss": 1.9041,
122
  "step": 190
123
  },
124
  {
125
+ "epoch": 0.36,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
126
  "learning_rate": 2e-05,
127
+ "loss": 2.1649,
128
  "step": 200
129
  }
130
  ],
131
+ "max_steps": 1875,
132
+ "num_train_epochs": 4,
133
+ "total_flos": 4967090265784320.0,
134
  "trial_name": null,
135
  "trial_params": null
136
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a46f9ccb8a40684c8a4cca7430124490c6b150e680d2a31cb618fee1006ba524
3
- size 3963
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:643cd8215538899410f2288daee01465ee05e777a4971bf0fec462ea7d279181
3
+ size 5691