Text Generation
Transformers
Safetensors
PyTorch
English
gpt_neox
causal-lm
pythia
safety
unlearning
data-filtering
interpretability
pretraining
eleutherai
gpt-neox
wmdp
cbrn
tamper-resistance
research
model-suite
6.9b
circuit-breaking
knowledge-filtering
open-weight
biothreat
safety-research
model-diffing
training-dynamics
text-generation-inference
Upload tokenizer
Browse files- special_tokens_map.json +23 -1
- tokenizer.json +24 -4
- tokenizer_config.json +9 -2
special_tokens_map.json
CHANGED
|
@@ -1 +1,23 @@
|
|
| 1 |
-
{
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bos_token": {
|
| 3 |
+
"content": "<|endoftext|>",
|
| 4 |
+
"lstrip": false,
|
| 5 |
+
"normalized": false,
|
| 6 |
+
"rstrip": false,
|
| 7 |
+
"single_word": false
|
| 8 |
+
},
|
| 9 |
+
"eos_token": {
|
| 10 |
+
"content": "<|endoftext|>",
|
| 11 |
+
"lstrip": false,
|
| 12 |
+
"normalized": false,
|
| 13 |
+
"rstrip": false,
|
| 14 |
+
"single_word": false
|
| 15 |
+
},
|
| 16 |
+
"unk_token": {
|
| 17 |
+
"content": "<|endoftext|>",
|
| 18 |
+
"lstrip": false,
|
| 19 |
+
"normalized": false,
|
| 20 |
+
"rstrip": false,
|
| 21 |
+
"single_word": false
|
| 22 |
+
}
|
| 23 |
+
}
|
tokenizer.json
CHANGED
|
@@ -239,10 +239,30 @@
|
|
| 239 |
"use_regex": true
|
| 240 |
},
|
| 241 |
"post_processor": {
|
| 242 |
-
"type": "
|
| 243 |
-
"
|
| 244 |
-
|
| 245 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 246 |
},
|
| 247 |
"decoder": {
|
| 248 |
"type": "ByteLevel",
|
|
|
|
| 239 |
"use_regex": true
|
| 240 |
},
|
| 241 |
"post_processor": {
|
| 242 |
+
"type": "TemplateProcessing",
|
| 243 |
+
"single": [
|
| 244 |
+
{
|
| 245 |
+
"Sequence": {
|
| 246 |
+
"id": "A",
|
| 247 |
+
"type_id": 0
|
| 248 |
+
}
|
| 249 |
+
}
|
| 250 |
+
],
|
| 251 |
+
"pair": [
|
| 252 |
+
{
|
| 253 |
+
"Sequence": {
|
| 254 |
+
"id": "A",
|
| 255 |
+
"type_id": 0
|
| 256 |
+
}
|
| 257 |
+
},
|
| 258 |
+
{
|
| 259 |
+
"Sequence": {
|
| 260 |
+
"id": "B",
|
| 261 |
+
"type_id": 1
|
| 262 |
+
}
|
| 263 |
+
}
|
| 264 |
+
],
|
| 265 |
+
"special_tokens": {}
|
| 266 |
},
|
| 267 |
"decoder": {
|
| 268 |
"type": "ByteLevel",
|
tokenizer_config.json
CHANGED
|
@@ -1,4 +1,7 @@
|
|
| 1 |
{
|
|
|
|
|
|
|
|
|
|
| 2 |
"added_tokens_decoder": {
|
| 3 |
"0": {
|
| 4 |
"content": "<|endoftext|>",
|
|
@@ -201,8 +204,12 @@
|
|
| 201 |
"special": false
|
| 202 |
}
|
| 203 |
},
|
| 204 |
-
"
|
|
|
|
|
|
|
| 205 |
"extra_special_tokens": {},
|
| 206 |
"model_max_length": 1000000000000000019884624838656,
|
| 207 |
-
"
|
|
|
|
|
|
|
| 208 |
}
|
|
|
|
| 1 |
{
|
| 2 |
+
"add_bos_token": false,
|
| 3 |
+
"add_eos_token": false,
|
| 4 |
+
"add_prefix_space": false,
|
| 5 |
"added_tokens_decoder": {
|
| 6 |
"0": {
|
| 7 |
"content": "<|endoftext|>",
|
|
|
|
| 204 |
"special": false
|
| 205 |
}
|
| 206 |
},
|
| 207 |
+
"bos_token": "<|endoftext|>",
|
| 208 |
+
"clean_up_tokenization_spaces": false,
|
| 209 |
+
"eos_token": "<|endoftext|>",
|
| 210 |
"extra_special_tokens": {},
|
| 211 |
"model_max_length": 1000000000000000019884624838656,
|
| 212 |
+
"pad_token": null,
|
| 213 |
+
"tokenizer_class": "GPTNeoXTokenizer",
|
| 214 |
+
"unk_token": "<|endoftext|>"
|
| 215 |
}
|