Upload folder using huggingface_hub
Browse files- README.md +2 -2
- merges.txt +0 -0
- tokenizer.json +0 -0
- tokenizer_config.json +3 -2
- vocab.json +0 -0
README.md
CHANGED
|
@@ -20,7 +20,7 @@ A **Byte-Level BPE** tokenizer trained on **numeric** data from Fineweb-2-HQ.
|
|
| 20 |
| Algorithm | Byte-Level BPE |
|
| 21 |
| Language | `numeric` |
|
| 22 |
| Target Vocab Size | 10,007 |
|
| 23 |
-
| Final Vocab Size |
|
| 24 |
| Pre-tokenizer | byte_level |
|
| 25 |
| Number handling | ltr_4digit |
|
| 26 |
| Contraction handling | False |
|
|
@@ -46,4 +46,4 @@ tokens = tokenizer.encode("Hello, world!")
|
|
| 46 |
## Sample Encoding
|
| 47 |
| Text | Tokens | Token IDs |
|
| 48 |
|------|--------|-----------|
|
| 49 |
-
| `12345009 mod 67` | `
|
|
|
|
| 20 |
| Algorithm | Byte-Level BPE |
|
| 21 |
| Language | `numeric` |
|
| 22 |
| Target Vocab Size | 10,007 |
|
| 23 |
+
| Final Vocab Size | 9,430 |
|
| 24 |
| Pre-tokenizer | byte_level |
|
| 25 |
| Number handling | ltr_4digit |
|
| 26 |
| Contraction handling | False |
|
|
|
|
| 46 |
## Sample Encoding
|
| 47 |
| Text | Tokens | Token IDs |
|
| 48 |
|------|--------|-----------|
|
| 49 |
+
| `12345009 mod 67` | `1234, 5009, , mod, , 67` | `633, 3614, 6, 4, 6, 53` |
|
merges.txt
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
tokenizer.json
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
tokenizer_config.json
CHANGED
|
@@ -69,5 +69,6 @@
|
|
| 69 |
"model_max_length": 1000000000000000019884624838656,
|
| 70 |
"pad_token": "<pad>",
|
| 71 |
"tokenizer_class": "PreTrainedTokenizerFast",
|
| 72 |
-
"unk_token": "<unk>"
|
| 73 |
-
|
|
|
|
|
|
| 69 |
"model_max_length": 1000000000000000019884624838656,
|
| 70 |
"pad_token": "<pad>",
|
| 71 |
"tokenizer_class": "PreTrainedTokenizerFast",
|
| 72 |
+
"unk_token": "<unk>",
|
| 73 |
+
"number_handling": "ltr_4digit"
|
| 74 |
+
}
|
vocab.json
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|