BhavyaMuni commited on
Commit
57dbdbd
·
1 Parent(s): 80e5569

End of training

Browse files
added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "[PAD]": 50257
3
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:96e6942f5e8166b067278365ba3ee056966b61b47b9dfacc1695973930810fe2
3
  size 333970169
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6805b278b46f0f1cf983948b252e3303a54cbd5a1ac44a323418ae1a3d6b7f4
3
  size 333970169
special_tokens_map.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|endoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "pad_token": "[PAD]",
5
+ "unk_token": "<|endoftext|>"
6
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "bos_token": "<|endoftext|>",
4
+ "eos_token": "<|endoftext|>",
5
+ "model_max_length": 1024,
6
+ "name_or_path": "distilgpt2",
7
+ "pad_token": "[PAD]",
8
+ "special_tokens_map_file": null,
9
+ "tokenizer_class": "GPT2Tokenizer",
10
+ "unk_token": "<|endoftext|>"
11
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff