strumber commited on
Commit
f6141de
·
verified ·
1 Parent(s): 3cdc4e3

Upload folder using huggingface_hub

Browse files
Files changed (4) hide show
  1. model.pt +3 -0
  2. special_tokens_map.json +30 -0
  3. tokenizer.json +195 -0
  4. tokenizer_config.json +44 -0
model.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59fa3abd17a7e8ee27f3802143a9130d7f4694699d4de554efbe4740abb83356
3
+ size 356235922
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|startoftext|>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<|pad|>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<|unknown|>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.json ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "1.0",
3
+ "truncation": {
4
+ "direction": "Right",
5
+ "max_length": 64,
6
+ "strategy": "LongestFirst",
7
+ "stride": 0
8
+ },
9
+ "padding": {
10
+ "strategy": {
11
+ "Fixed": 64
12
+ },
13
+ "direction": "Right",
14
+ "pad_to_multiple_of": null,
15
+ "pad_id": 0,
16
+ "pad_type_id": 0,
17
+ "pad_token": "<|pad|>"
18
+ },
19
+ "added_tokens": [
20
+ {
21
+ "id": 0,
22
+ "content": "<|pad|>",
23
+ "single_word": false,
24
+ "lstrip": false,
25
+ "rstrip": false,
26
+ "normalized": false,
27
+ "special": true
28
+ },
29
+ {
30
+ "id": 1,
31
+ "content": "<|startoftext|>",
32
+ "single_word": false,
33
+ "lstrip": false,
34
+ "rstrip": false,
35
+ "normalized": false,
36
+ "special": true
37
+ },
38
+ {
39
+ "id": 2,
40
+ "content": "<|endoftext|>",
41
+ "single_word": false,
42
+ "lstrip": false,
43
+ "rstrip": false,
44
+ "normalized": false,
45
+ "special": true
46
+ },
47
+ {
48
+ "id": 3,
49
+ "content": "<|unknown|>",
50
+ "single_word": false,
51
+ "lstrip": false,
52
+ "rstrip": false,
53
+ "normalized": false,
54
+ "special": true
55
+ }
56
+ ],
57
+ "normalizer": null,
58
+ "pre_tokenizer": {
59
+ "type": "Sequence",
60
+ "pretokenizers": [
61
+ {
62
+ "type": "Whitespace"
63
+ },
64
+ {
65
+ "type": "Split",
66
+ "pattern": {
67
+ "Regex": "\\d|[QBRN]"
68
+ },
69
+ "behavior": "MergedWithPrevious",
70
+ "invert": false
71
+ }
72
+ ]
73
+ },
74
+ "post_processor": {
75
+ "type": "TemplateProcessing",
76
+ "single": [
77
+ {
78
+ "SpecialToken": {
79
+ "id": "<|startoftext|>",
80
+ "type_id": 0
81
+ }
82
+ },
83
+ {
84
+ "Sequence": {
85
+ "id": "A",
86
+ "type_id": 0
87
+ }
88
+ }
89
+ ],
90
+ "pair": [
91
+ {
92
+ "Sequence": {
93
+ "id": "A",
94
+ "type_id": 0
95
+ }
96
+ },
97
+ {
98
+ "Sequence": {
99
+ "id": "B",
100
+ "type_id": 1
101
+ }
102
+ }
103
+ ],
104
+ "special_tokens": {
105
+ "<|startoftext|>": {
106
+ "id": "<|startoftext|>",
107
+ "ids": [
108
+ 1
109
+ ],
110
+ "tokens": [
111
+ "<|startoftext|>"
112
+ ]
113
+ }
114
+ }
115
+ },
116
+ "decoder": null,
117
+ "model": {
118
+ "type": "WordLevel",
119
+ "vocab": {
120
+ "<|pad|>": 0,
121
+ "<|startoftext|>": 1,
122
+ "<|endoftext|>": 2,
123
+ "<|unknown|>": 3,
124
+ "a1": 4,
125
+ "b1": 5,
126
+ "c1": 6,
127
+ "d1": 7,
128
+ "e1": 8,
129
+ "f1": 9,
130
+ "g1": 10,
131
+ "h1": 11,
132
+ "a2": 12,
133
+ "b2": 13,
134
+ "c2": 14,
135
+ "d2": 15,
136
+ "e2": 16,
137
+ "f2": 17,
138
+ "g2": 18,
139
+ "h2": 19,
140
+ "a3": 20,
141
+ "b3": 21,
142
+ "c3": 22,
143
+ "d3": 23,
144
+ "e3": 24,
145
+ "f3": 25,
146
+ "g3": 26,
147
+ "h3": 27,
148
+ "a4": 28,
149
+ "b4": 29,
150
+ "c4": 30,
151
+ "d4": 31,
152
+ "e4": 32,
153
+ "f4": 33,
154
+ "g4": 34,
155
+ "h4": 35,
156
+ "a5": 36,
157
+ "b5": 37,
158
+ "c5": 38,
159
+ "d5": 39,
160
+ "e5": 40,
161
+ "f5": 41,
162
+ "g5": 42,
163
+ "h5": 43,
164
+ "a6": 44,
165
+ "b6": 45,
166
+ "c6": 46,
167
+ "d6": 47,
168
+ "e6": 48,
169
+ "f6": 49,
170
+ "g6": 50,
171
+ "h6": 51,
172
+ "a7": 52,
173
+ "b7": 53,
174
+ "c7": 54,
175
+ "d7": 55,
176
+ "e7": 56,
177
+ "f7": 57,
178
+ "g7": 58,
179
+ "h7": 59,
180
+ "a8": 60,
181
+ "b8": 61,
182
+ "c8": 62,
183
+ "d8": 63,
184
+ "e8": 64,
185
+ "f8": 65,
186
+ "g8": 66,
187
+ "h8": 67,
188
+ "Q": 68,
189
+ "R": 69,
190
+ "B": 70,
191
+ "N": 71
192
+ },
193
+ "unk_token": "<|unknown|>"
194
+ }
195
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<|pad|>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<|startoftext|>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "<|endoftext|>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "<|unknown|>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ }
35
+ },
36
+ "bos_token": "<|startoftext|>",
37
+ "clean_up_tokenization_spaces": false,
38
+ "eos_token": "<|endoftext|>",
39
+ "extra_special_tokens": {},
40
+ "model_max_length": 1024,
41
+ "pad_token": "<|pad|>",
42
+ "tokenizer_class": "PreTrainedTokenizer",
43
+ "unk_token": "<|unknown|>"
44
+ }