rachidnichan commited on
Commit
c5aae74
·
verified ·
1 Parent(s): 8863475

Upgrade to v1.1: Better Tokenizer and 54k Augmented Data

Browse files
config.json CHANGED
@@ -5,19 +5,19 @@
5
  "GPT2LMHeadModel"
6
  ],
7
  "attn_pdrop": 0.1,
8
- "bos_token_id": 50256,
9
  "dtype": "float32",
10
  "embd_pdrop": 0.1,
11
- "eos_token_id": 50256,
12
  "initializer_range": 0.02,
13
  "layer_norm_epsilon": 1e-05,
14
  "model_type": "gpt2",
15
- "n_ctx": 1024,
16
  "n_embd": 768,
17
  "n_head": 12,
18
  "n_inner": null,
19
  "n_layer": 12,
20
- "n_positions": 1024,
21
  "pad_token_id": null,
22
  "reorder_and_upcast_attn": false,
23
  "resid_pdrop": 0.1,
@@ -28,14 +28,8 @@
28
  "summary_proj_to_labels": true,
29
  "summary_type": "cls_index",
30
  "summary_use_proj": true,
31
- "task_specific_params": {
32
- "text-generation": {
33
- "do_sample": true,
34
- "max_length": 50
35
- }
36
- },
37
  "tie_word_embeddings": true,
38
  "transformers_version": "5.0.0",
39
  "use_cache": false,
40
- "vocab_size": 5000
41
  }
 
5
  "GPT2LMHeadModel"
6
  ],
7
  "attn_pdrop": 0.1,
8
+ "bos_token_id": 52000,
9
  "dtype": "float32",
10
  "embd_pdrop": 0.1,
11
+ "eos_token_id": 52000,
12
  "initializer_range": 0.02,
13
  "layer_norm_epsilon": 1e-05,
14
  "model_type": "gpt2",
15
+ "n_ctx": 128,
16
  "n_embd": 768,
17
  "n_head": 12,
18
  "n_inner": null,
19
  "n_layer": 12,
20
+ "n_positions": 128,
21
  "pad_token_id": null,
22
  "reorder_and_upcast_attn": false,
23
  "resid_pdrop": 0.1,
 
28
  "summary_proj_to_labels": true,
29
  "summary_type": "cls_index",
30
  "summary_use_proj": true,
 
 
 
 
 
 
31
  "tie_word_embeddings": true,
32
  "transformers_version": "5.0.0",
33
  "use_cache": false,
34
+ "vocab_size": 52001
35
  }
generation_config.json CHANGED
@@ -1,6 +1,9 @@
1
  {
2
  "_from_model_config": true,
3
- "bos_token_id": 50256,
4
- "eos_token_id": 50256,
5
- "transformers_version": "5.0.0"
 
 
 
6
  }
 
1
  {
2
  "_from_model_config": true,
3
+ "bos_token_id": 52000,
4
+ "eos_token_id": 52000,
5
+ "output_attentions": false,
6
+ "output_hidden_states": false,
7
+ "transformers_version": "5.0.0",
8
+ "use_cache": true
9
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5e6768300ec7cd41b760bb054abf2cf77de92cde8ad35479fbe4a115c11b76f4
3
- size 358744704
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07c8380535cd585257172f2535b27b517ff8dfb45783184732301b9d0beefd41
3
+ size 500379264
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -4,8 +4,8 @@
4
  "bos_token": "<|endoftext|>",
5
  "eos_token": "<|endoftext|>",
6
  "errors": "replace",
7
- "is_local": false,
8
- "model_max_length": 1024,
9
  "pad_token": "<|endoftext|>",
10
  "tokenizer_class": "GPT2Tokenizer",
11
  "unk_token": "<|endoftext|>"
 
4
  "bos_token": "<|endoftext|>",
5
  "eos_token": "<|endoftext|>",
6
  "errors": "replace",
7
+ "is_local": true,
8
+ "model_max_length": 1000000000000000019884624838656,
9
  "pad_token": "<|endoftext|>",
10
  "tokenizer_class": "GPT2Tokenizer",
11
  "unk_token": "<|endoftext|>"
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:65fc6222c3494c4a3f1bd9da9a8325ad126393761745ec56660fedfb9ef33704
3
  size 5137
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6a12a3feb2c9f1e766a942747742670b79d3a779e8e786865fd0b191bf77f1e
3
  size 5137