gsaltintas commited on
Commit
a7672fa
·
verified ·
1 Parent(s): 337270f

Upload folder using huggingface_hub

Browse files
Files changed (4) hide show
  1. README.md +2 -4
  2. tokenizer.json +127 -1
  3. tokenizer_config.json +74 -1
  4. vocab.json +10 -10
README.md CHANGED
@@ -1,14 +1,12 @@
1
  ---
2
  license: mit
3
  language:
4
- - und
5
  tags:
6
  - tokenizer
7
  - bpe
8
  - flexitok
9
  - fineweb2
10
- datasets:
11
- - flexitok/mod-arithmetic
12
  ---
13
 
14
  # Byte-Level BPE Tokenizer: numeric (0K)
@@ -48,4 +46,4 @@ tokens = tokenizer.encode("Hello, world!")
48
  ## Sample Encoding
49
  | Text | Tokens | Token IDs |
50
  |------|--------|-----------|
51
- | `1234500119 mod 67` | `1, 2, 3, 4, 5, 0, 0, 1, 1, 9, , mod, , 6, 7` | `8, 9, 10, 11, 12, 7, 7, 8, 8, 16, 6, 4, 6, 13, 14` |
 
1
  ---
2
  license: mit
3
  language:
4
+ - und # ISO 639-3 code or "und" if not identifiable
5
  tags:
6
  - tokenizer
7
  - bpe
8
  - flexitok
9
  - fineweb2
 
 
10
  ---
11
 
12
  # Byte-Level BPE Tokenizer: numeric (0K)
 
46
  ## Sample Encoding
47
  | Text | Tokens | Token IDs |
48
  |------|--------|-----------|
49
+ | `123500119 mod 67` | `1, 2, 3, 5, 0, 0, 1, 1, 9, , mod, , 6, 7` | `8, 9, 10, 12, 7, 7, 8, 8, 16, 6, 4, 6, 13, 14` |
tokenizer.json CHANGED
@@ -1 +1,127 @@
1
- {"<unk>": [0], "~SPECIAL~ALIGNED~BOS~SYMBOL~": [1], "</s>": [2], "<pad>": [3], "mod": [4], "=": [5], " ": [6], "0": [7], "1": [8], "2": [9], "3": [10], "4": [11], "5": [12], "6": [13], "7": [14], "8": [15], "9": [16]}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "1.0",
3
+ "truncation": null,
4
+ "padding": null,
5
+ "added_tokens": [
6
+ {
7
+ "id": 0,
8
+ "content": "<unk>",
9
+ "single_word": false,
10
+ "lstrip": false,
11
+ "rstrip": false,
12
+ "normalized": false,
13
+ "special": true
14
+ },
15
+ {
16
+ "id": 1,
17
+ "content": "<s>",
18
+ "single_word": false,
19
+ "lstrip": false,
20
+ "rstrip": false,
21
+ "normalized": false,
22
+ "special": true
23
+ },
24
+ {
25
+ "id": 2,
26
+ "content": "</s>",
27
+ "single_word": false,
28
+ "lstrip": false,
29
+ "rstrip": false,
30
+ "normalized": false,
31
+ "special": true
32
+ },
33
+ {
34
+ "id": 3,
35
+ "content": "<pad>",
36
+ "single_word": false,
37
+ "lstrip": false,
38
+ "rstrip": false,
39
+ "normalized": false,
40
+ "special": true
41
+ },
42
+ {
43
+ "id": 4,
44
+ "content": "mod",
45
+ "single_word": false,
46
+ "lstrip": false,
47
+ "rstrip": false,
48
+ "normalized": false,
49
+ "special": true
50
+ },
51
+ {
52
+ "id": 5,
53
+ "content": "=",
54
+ "single_word": false,
55
+ "lstrip": false,
56
+ "rstrip": false,
57
+ "normalized": false,
58
+ "special": true
59
+ },
60
+ {
61
+ "id": 6,
62
+ "content": " ",
63
+ "single_word": false,
64
+ "lstrip": false,
65
+ "rstrip": false,
66
+ "normalized": false,
67
+ "special": true
68
+ }
69
+ ],
70
+ "normalizer": null,
71
+ "pre_tokenizer": {
72
+ "type": "Sequence",
73
+ "pretokenizers": [
74
+ {
75
+ "type": "Split",
76
+ "pattern": {
77
+ "Regex": "\\p{N}"
78
+ },
79
+ "behavior": "Isolated",
80
+ "invert": false
81
+ },
82
+ {
83
+ "type": "ByteLevel",
84
+ "add_prefix_space": false,
85
+ "trim_offsets": true,
86
+ "use_regex": false
87
+ }
88
+ ]
89
+ },
90
+ "post_processor": null,
91
+ "decoder": {
92
+ "type": "ByteLevel",
93
+ "add_prefix_space": true,
94
+ "trim_offsets": true,
95
+ "use_regex": true
96
+ },
97
+ "model": {
98
+ "type": "BPE",
99
+ "dropout": null,
100
+ "unk_token": "<unk>",
101
+ "continuing_subword_prefix": null,
102
+ "end_of_word_suffix": null,
103
+ "fuse_unk": false,
104
+ "byte_fallback": false,
105
+ "ignore_merges": false,
106
+ "vocab": {
107
+ "<unk>": 0,
108
+ "<s>": 1,
109
+ "</s>": 2,
110
+ "<pad>": 3,
111
+ "mod": 4,
112
+ "=": 5,
113
+ " ": 6,
114
+ "0": 7,
115
+ "1": 8,
116
+ "2": 9,
117
+ "3": 10,
118
+ "4": 11,
119
+ "5": 12,
120
+ "6": 13,
121
+ "7": 14,
122
+ "8": 15,
123
+ "9": 16
124
+ },
125
+ "merges": []
126
+ }
127
+ }
tokenizer_config.json CHANGED
@@ -1 +1,74 @@
1
- {"data": {"tokenizer": {"name": "huggingface", "path": "flexitok/mod-tokenizers-individual"}}}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<unk>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<s>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "</s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "<pad>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "4": {
36
+ "content": "mod",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ },
43
+ "5": {
44
+ "content": "=",
45
+ "lstrip": false,
46
+ "normalized": false,
47
+ "rstrip": false,
48
+ "single_word": false,
49
+ "special": true
50
+ },
51
+ "6": {
52
+ "content": " ",
53
+ "lstrip": false,
54
+ "normalized": false,
55
+ "rstrip": false,
56
+ "single_word": false,
57
+ "special": true
58
+ }
59
+ },
60
+ "additional_special_tokens": [
61
+ "mod",
62
+ "=",
63
+ " "
64
+ ],
65
+ "bos_token": "<s>",
66
+ "clean_up_tokenization_spaces": false,
67
+ "eos_token": "</s>",
68
+ "extra_special_tokens": {},
69
+ "model_max_length": 1000000000000000019884624838656,
70
+ "pad_token": "<pad>",
71
+ "tokenizer_class": "PreTrainedTokenizerFast",
72
+ "unk_token": "<unk>",
73
+ "number_handling": "individual"
74
+ }
vocab.json CHANGED
@@ -1,19 +1,19 @@
1
  {
 
 
 
2
  "<unk>": 0,
3
- "mod": 4,
4
  "4": 11,
5
- "=": 5,
6
  "1": 8,
7
  "<pad>": 3,
8
- "7": 14,
9
- "5": 12,
10
  "3": 10,
11
- "9": 16,
 
12
  "<s>": 1,
13
- "2": 9,
14
- "0": 7,
15
- "8": 15,
16
  "6": 13,
17
- " ": 6,
18
- "</s>": 2
19
  }
 
1
  {
2
+ "=": 5,
3
+ "2": 9,
4
+ "</s>": 2,
5
  "<unk>": 0,
 
6
  "4": 11,
 
7
  "1": 8,
8
  "<pad>": 3,
 
 
9
  "3": 10,
10
+ " ": 6,
11
+ "mod": 4,
12
  "<s>": 1,
13
+ "9": 16,
14
+ "5": 12,
15
+ "7": 14,
16
  "6": 13,
17
+ "8": 15,
18
+ "0": 7
19
  }