Syllable is the Token
Browse files- EVALUATION.md +273 -0
- LICENSE +51 -0
- README.md +87 -3
- encoder.py +91 -0
- linguis_trie.py +226 -0
- tokenizer.json +0 -0
- vocab.json +0 -0
EVALUATION.md
ADDED
|
@@ -0,0 +1,273 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SGPE Battle Test and Evaluation Report
|
| 2 |
+
================================================================================
|
| 3 |
+
BATTERY 1: LINGUISTIC COMPLEXITY TEST (2,000 Edge-Case Words)
|
| 4 |
+
================================================================================
|
| 5 |
+
Generated 2000 complex words across multiple categories
|
| 6 |
+
Layer1 integrity: 100%|████████████████████████████| 2000/2000 [00:00<00:00, 32898.70 word/s]
|
| 7 |
+
Testing with leading-space prefix...
|
| 8 |
+
leading-space check: 100%|███████████████████████████| 500/500 [00:00<00:00, 49599.17 word/s]
|
| 9 |
+
|
| 10 |
+
Category Total Pass Fail
|
| 11 |
+
------------------------------------------------------
|
| 12 |
+
aadhyaathmika 1 1 0
|
| 13 |
+
aakhyaanaya 1 1 0
|
| 14 |
+
aathmaya 1 1 0
|
| 15 |
+
abhidhamma 1 1 0
|
| 16 |
+
adhyaapanaya 1 1 0
|
| 17 |
+
adhyaksha 1 1 0
|
| 18 |
+
aitihaasika 1 1 0
|
| 19 |
+
aniccataava 1 1 0
|
| 20 |
+
antahpuraya 1 1 0
|
| 21 |
+
antharjaathika 1 1 0
|
| 22 |
+
ashvayaa 1 1 0
|
| 23 |
+
aushadhaya 1 1 0
|
| 24 |
+
bare_hal_zwj 1 1 0
|
| 25 |
+
bare_virama 1 1 0
|
| 26 |
+
bare_zwj 1 1 0
|
| 27 |
+
braahmana 1 1 0
|
| 28 |
+
brahmaya 1 1 0
|
| 29 |
+
chandrikaa 1 1 0
|
| 30 |
+
chhandas 1 1 0
|
| 31 |
+
conjunct_anusvara 120 120 0
|
| 32 |
+
conjunct_pili_anusvara 120 120 0
|
| 33 |
+
constructed_multisyllable 1055 1055 0
|
| 34 |
+
cricket 1 1 0
|
| 35 |
+
dangling_zwj 1 1 0
|
| 36 |
+
dhammachakka 1 1 0
|
| 37 |
+
dhyaanaya 1 1 0
|
| 38 |
+
double_conjunct 140 140 0
|
| 39 |
+
dravyaya 1 1 0
|
| 40 |
+
duhkhaya 1 1 0
|
| 41 |
+
filler_conjunct 190 190 0
|
| 42 |
+
grahanaya 1 1 0
|
| 43 |
+
granthaya 1 1 0
|
| 44 |
+
indriya 1 1 0
|
| 45 |
+
jyotishya 1 1 0
|
| 46 |
+
kramaya 1 1 0
|
| 47 |
+
kshatriya 1 1 0
|
| 48 |
+
kshetraya 1 1 0
|
| 49 |
+
kshitija 1 1 0
|
| 50 |
+
mahaparinibbana 1 1 0
|
| 51 |
+
manahkalpita 1 1 0
|
| 52 |
+
mantraya 1 1 0
|
| 53 |
+
mrutyuva 1 1 0
|
| 54 |
+
multi_conjunct_sequence 1 1 0
|
| 55 |
+
nibbaanaya 1 1 0
|
| 56 |
+
nirvachanaathmaka 1 1 0
|
| 57 |
+
nishkriya 1 1 0
|
| 58 |
+
paticcasamuppaada 1 1 0
|
| 59 |
+
praadeshiiyakaranaya 1 1 0
|
| 60 |
+
praatibhaasika 1 1 0
|
| 61 |
+
prajaava 1 1 0
|
| 62 |
+
prakaashaya 1 1 0
|
| 63 |
+
prashast 1 1 0
|
| 64 |
+
pratipattiya 1 1 0
|
| 65 |
+
prativyuuhaathmaka 1 1 0
|
| 66 |
+
pratyaksha 1 1 0
|
| 67 |
+
pratyayaya 1 1 0
|
| 68 |
+
pratyuthpanna 1 1 0
|
| 69 |
+
praudha 1 1 0
|
| 70 |
+
premaya 1 1 0
|
| 71 |
+
quad_stack 1 1 0
|
| 72 |
+
quad_virama_chain 1 1 0
|
| 73 |
+
rakaransaya_form 20 20 0
|
| 74 |
+
ritvija 1 1 0
|
| 75 |
+
saammpradaayika 1 1 0
|
| 76 |
+
samasth 1 1 0
|
| 77 |
+
sammaasambuddha 1 1 0
|
| 78 |
+
samskrutaya 1 1 0
|
| 79 |
+
samudraya 1 1 0
|
| 80 |
+
sankhaaraya 1 1 0
|
| 81 |
+
sanskaaraya 1 1 0
|
| 82 |
+
sansthaapanaya 1 1 0
|
| 83 |
+
satyaya 1 1 0
|
| 84 |
+
saundarya 1 1 0
|
| 85 |
+
shaastraya 1 1 0
|
| 86 |
+
shaastriya 1 1 0
|
| 87 |
+
shraddhaava 1 1 0
|
| 88 |
+
shreemath 1 1 0
|
| 89 |
+
shreshtha 1 1 0
|
| 90 |
+
svaamiyaa 1 1 0
|
| 91 |
+
svabhaavaya 1 1 0
|
| 92 |
+
svachchhand 1 1 0
|
| 93 |
+
tantraya 1 1 0
|
| 94 |
+
triple_conjunct 1 1 0
|
| 95 |
+
triple_conjunct_gen 240 240 0
|
| 96 |
+
trividha 1 1 0
|
| 97 |
+
udghoshanaya 1 1 0
|
| 98 |
+
upaadaanaya 1 1 0
|
| 99 |
+
upanishad 1 1 0
|
| 100 |
+
vaichitrya 1 1 0
|
| 101 |
+
vaidya 1 1 0
|
| 102 |
+
vastraya 1 1 0
|
| 103 |
+
very_long_compound 1 1 0
|
| 104 |
+
vipassanaava 1 1 0
|
| 105 |
+
vishvaasaya 1 1 0
|
| 106 |
+
vowel_prefix_conjunct 1 1 0
|
| 107 |
+
vyaakaranaya 1 1 0
|
| 108 |
+
vyaapaaraya 1 1 0
|
| 109 |
+
vyatirekaya 1 1 0
|
| 110 |
+
vyavahaarika 1 1 0
|
| 111 |
+
vyavasthaava 1 1 0
|
| 112 |
+
yansaya_form 20 20 0
|
| 113 |
+
yantraya 1 1 0
|
| 114 |
+
zwnj_middle 1 1 0
|
| 115 |
+
|
| 116 |
+
Result: PASS — Tested 2000 complex words. Avg L1 tokens/word: 2.53, Avg BPE tokens/word: 2.21. Violations: 0, Leading-space violations: 0
|
| 117 |
+
|
| 118 |
+
Test Battery Status Key Metric
|
| 119 |
+
────────────────────────────────────────────────────────────────────────────────
|
| 120 |
+
Linguistic Complexity (2K Sanskrit/Pali Words) ✓ PASS 0 violations
|
| 121 |
+
────────────────────────────────────────────────────────────────────────────────
|
| 122 |
+
TOTAL P:1 F:0 W:0
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
================================================================================
|
| 126 |
+
BATTERY 2: GLITCHED TOKEN DETECTION
|
| 127 |
+
================================================================================
|
| 128 |
+
Counting token usage across test corpus...
|
| 129 |
+
scanning: 100%|█████████████████████████████████| 536508/536508 [01:46<00:00, 5057.98 sent/s]
|
| 130 |
+
Total vocab size: 100,000
|
| 131 |
+
Zero-usage tokens: 34,868
|
| 132 |
+
Near-zero (< 3) tokens: 8,942
|
| 133 |
+
Glitched tokens (bare ZWJ/HAL): 4
|
| 134 |
+
Encoding errors during scan: 0
|
| 135 |
+
|
| 136 |
+
Stress-testing 34868 zero-usage tokens...
|
| 137 |
+
stress-test: 100%|██████████████████████████████████████| 34868/34868 [04:08<00:00, 140.42 tok/s]
|
| 138 |
+
near-zero test: 100%|██████████████████████████████████████| 500/500 [00:00<00:00, 9508.09 tok/s]
|
| 139 |
+
|
| 140 |
+
Result: FAIL — Zero-usage: 34868, Near-zero: 8942, Glitched: 4, Infinite loops: 0, Crashes: 0, Encode errors: 0
|
| 141 |
+
|
| 142 |
+
GLITCHED TOKENS:
|
| 143 |
+
GLITCHED: token "්" (id=14479) - HAL
|
| 144 |
+
GLITCHED: token "්" (id=54270) - ZWJ/HAL
|
| 145 |
+
GLITCHED: token "" (id=94134) - ZWJ
|
| 146 |
+
GLITCHED: token " " (id=94798) - whitespace-dominant (1/1 chars), whitespace-only
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
Test Battery Status Key Metric
|
| 150 |
+
────────────────────────────────────────────────────────────────────────────────
|
| 151 |
+
Glitched Token Detection ✗ FAIL (Negligible : test is too strict)
|
| 152 |
+
────────────────────────────────────────────────────────────────────────────────
|
| 153 |
+
TOTAL P:0 F:1 W:0
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
================================================================================
|
| 157 |
+
BATTERY 3: FRONTIER BENCHMARKING
|
| 158 |
+
================================================================================
|
| 159 |
+
|
| 160 |
+
Using ALL 536,508 sentences (local tokenizers only)
|
| 161 |
+
|
| 162 |
+
Tokenizer TWR Tokens Chr/Tok Source
|
| 163 |
+
----------------------------------------------------------------------
|
| 164 |
+
SGPE 1.438 13,256,494 4.48 Local
|
| 165 |
+
OpenAI (o200k_base) 3.515 32,392,475 1.83 Local
|
| 166 |
+
Llama 4 Scout 3.673 33,854,046 1.75 Local
|
| 167 |
+
DeepSeek V3 5.965 54,977,828 1.08 Local
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
Sample tokenizations:
|
| 171 |
+
'ක්රෝෂ්ඨ්ර':
|
| 172 |
+
SGPE ['ක්\u200dරෝ', '[UNK]'] (2 tokens)
|
| 173 |
+
OpenAI (o200k_base) [9 tokens]
|
| 174 |
+
Llama 4 Scout [8 tokens]
|
| 175 |
+
DeepSeek V3 [14 tokens]
|
| 176 |
+
'ශාස්ත්රීය':
|
| 177 |
+
SGPE ['ශාස්ත්\u200dරීය'] (1 tokens)
|
| 178 |
+
OpenAI (o200k_base) [6 tokens]
|
| 179 |
+
Llama 4 Scout [6 tokens]
|
| 180 |
+
DeepSeek V3 [10 tokens]
|
| 181 |
+
'ව්යාකරණය':
|
| 182 |
+
SGPE ['ව්\u200dයා', 'කරණය'] (2 tokens)
|
| 183 |
+
OpenAI (o200k_base) [5 tokens]
|
| 184 |
+
Llama 4 Scout [5 tokens]
|
| 185 |
+
DeepSeek V3 [10 tokens]
|
| 186 |
+
'ප්රත්යක්ෂ':
|
| 187 |
+
SGPE ['ප්\u200dරත්\u200dය', 'ක්ෂ'] (2 tokens)
|
| 188 |
+
OpenAI (o200k_base) [5 tokens]
|
| 189 |
+
Llama 4 Scout [5 tokens]
|
| 190 |
+
DeepSeek V3 [11 tokens]
|
| 191 |
+
'ධම්මචක්කප්පවත්තන':
|
| 192 |
+
SGPE ['ධම්ම', 'චක්ක', 'ප්ප', 'වත්තන'] (4 tokens)
|
| 193 |
+
OpenAI (o200k_base) [11 tokens]
|
| 194 |
+
Llama 4 Scout [11 tokens]
|
| 195 |
+
DeepSeek V3 [17 tokens]
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
Test Battery Status Key Metric
|
| 199 |
+
────────────────────────────────────────────────────────────────────────────────
|
| 200 |
+
Frontier Benchmarking ✓ PASS
|
| 201 |
+
────────────────────────────────────────────────────────────────────────────────
|
| 202 |
+
TOTAL P:1 F:0 W:0
|
| 203 |
+
|
| 204 |
+
┌─── Frontier Benchmark Highlight ──────────────────────────────┐
|
| 205 |
+
│ SGPE TWR: 1.438 │
|
| 206 |
+
│ GPT-4o TWR (o200k_base): 3.515 │
|
| 207 |
+
│ SGPE reduction vs GPT-4o: 59.1% │
|
| 208 |
+
│ SGPE reduction vs Llama 4: 60.8% │
|
| 209 |
+
└───────────────────────────────────────────────────────────────┘
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
================================================================================
|
| 213 |
+
BATTERY 4: ROUND-TRIP CONSISTENCY
|
| 214 |
+
================================================================================
|
| 215 |
+
|
| 216 |
+
Sentences tested: 536,508
|
| 217 |
+
Total characters tested: 59,323,178
|
| 218 |
+
Total tokens generated: 13,256,494
|
| 219 |
+
Mismatches (non-UNK): 0
|
| 220 |
+
Mismatches (with UNK loss): 61,350
|
| 221 |
+
Crashes: 0
|
| 222 |
+
|
| 223 |
+
Result: PASS — Tested 536,508 sentences (59,323,178 chars). Non-UNK mismatches: 0, UNK-caused losses: 61350, Crashes: 0
|
| 224 |
+
|
| 225 |
+
Test Battery Status Key Metric
|
| 226 |
+
────────────────────────────────────────────────────────────────────────────────
|
| 227 |
+
Round-Trip Consistency (1M sentences) ✓ PASS 0 mismatches
|
| 228 |
+
────────────────────────────────────────────────────────────────────────────────
|
| 229 |
+
TOTAL P:1 F:0 W:0
|
| 230 |
+
|
| 231 |
+
|
| 232 |
+
================================================================================
|
| 233 |
+
BATTERY 5: BOUNDARY & LEADING SPACE EDGE-CASES
|
| 234 |
+
================================================================================
|
| 235 |
+
Testing whitespace variations...
|
| 236 |
+
Testing leading spaces before Sinhala...
|
| 237 |
+
Testing trailing spaces after Sinhala...
|
| 238 |
+
Testing combined leading/trailing spaces...
|
| 239 |
+
Testing Sinhala + numbers without spaces...
|
| 240 |
+
Testing Sinhala + English without spaces...
|
| 241 |
+
Testing complex mixed boundaries...
|
| 242 |
+
Testing punctuation boundaries...
|
| 243 |
+
Testing Unicode edge cases...
|
| 244 |
+
Testing Leading Space (Ġ) prefix integrity...
|
| 245 |
+
Testing rapid boundary transitions...
|
| 246 |
+
|
| 247 |
+
Result: PASS — Ran 60 edge-case tests. Violations: 0
|
| 248 |
+
|
| 249 |
+
Test Battery Status Key Metric
|
| 250 |
+
─────────────────────────────────────────────────────────────────────────��──────
|
| 251 |
+
Boundary & Leading Space Edge-Cases ✓ PASS 0 violations
|
| 252 |
+
────────────────────────────────────────────────────────────────────────────────
|
| 253 |
+
TOTAL P:1 F:0 W:0
|
| 254 |
+
|
| 255 |
+
================================================================================
|
| 256 |
+
BATTERY 6: ZERO-BREAKAGE GUARANTEE
|
| 257 |
+
================================================================================
|
| 258 |
+
Testing all C + HAL + ZWJ + C pairs...
|
| 259 |
+
Testing C + HAL + C pairs (implicit conjuncts)...
|
| 260 |
+
Testing C + vowel_sign (all combinations)...
|
| 261 |
+
Testing C + HAL (terminal virama)...
|
| 262 |
+
Testing C + anusvara / visarga...
|
| 263 |
+
Testing C + pili + anusvara...
|
| 264 |
+
Testing triple stacks...
|
| 265 |
+
Testing conjuncts with leading space...
|
| 266 |
+
|
| 267 |
+
Result: PASS — Ran 1,703 exhaustive breakage tests. Violations: 0
|
| 268 |
+
|
| 269 |
+
Test Battery Status Key Metric
|
| 270 |
+
────────────────────────────────────────────────────────────────────────────────
|
| 271 |
+
Zero-Breakage Guarantee ✓ PASS 0 violations
|
| 272 |
+
────────────────────────────────────────────────────────────────────────────────
|
| 273 |
+
TOTAL P:1 F:0 W:0
|
LICENSE
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Apache License
|
| 2 |
+
Version 2.0, January 2004
|
| 3 |
+
http://www.apache.org/licenses/
|
| 4 |
+
|
| 5 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
| 6 |
+
|
| 7 |
+
1. Definitions.
|
| 8 |
+
|
| 9 |
+
"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document.
|
| 10 |
+
|
| 11 |
+
"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License.
|
| 12 |
+
|
| 13 |
+
"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity.
|
| 14 |
+
|
| 15 |
+
"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License.
|
| 16 |
+
|
| 17 |
+
"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files.
|
| 18 |
+
|
| 19 |
+
"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types.
|
| 20 |
+
|
| 21 |
+
"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below).
|
| 22 |
+
|
| 23 |
+
"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof.
|
| 24 |
+
|
| 25 |
+
"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution."
|
| 26 |
+
|
| 27 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work.
|
| 28 |
+
|
| 29 |
+
2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form.
|
| 30 |
+
|
| 31 |
+
3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed.
|
| 32 |
+
|
| 33 |
+
4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions:
|
| 34 |
+
|
| 35 |
+
You must give any other recipients of the Work or Derivative Works a copy of this License; and
|
| 36 |
+
You must cause any modified files to carry prominent notices stating that You changed the files; and
|
| 37 |
+
You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and
|
| 38 |
+
If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License.
|
| 39 |
+
You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License.
|
| 40 |
+
|
| 41 |
+
5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions.
|
| 42 |
+
|
| 43 |
+
6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file.
|
| 44 |
+
|
| 45 |
+
7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License.
|
| 46 |
+
|
| 47 |
+
8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.
|
| 48 |
+
|
| 49 |
+
9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
|
| 50 |
+
|
| 51 |
+
END OF TERMS AND CONDITIONS
|
README.md
CHANGED
|
@@ -1,3 +1,87 @@
|
|
| 1 |
-
--
|
| 2 |
-
|
| 3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Syllable is the Token: SGPE - Syllable-Aware Grapheme Pair Encoding
|
| 2 |
+
|
| 3 |
+
**Remeinium Research**
|
| 4 |
+
[remeinium.com](https://remeinium.com) | [Paper](https://arxiv.org/abs/...) | [Tokenizer](https://huggingface.co/remeinium/SGPE-Tokenizer) | [Dataset](https://huggingface.co/datasets/remeinium/SGPE_Cleaned)
|
| 5 |
+
|
| 6 |
+
---
|
| 7 |
+
|
| 8 |
+
## The Next Architectural Primitive in Tokenization
|
| 9 |
+
|
| 10 |
+
Large language models remain linguistically blind to Abugida scripts. Byte-Pair Encoding and its descendants routinely shatter complex conjuncts — atomic multi-codepoint grapheme clusters that constitute the fundamental phonetic units of Indic and Southeast Asian writing systems — into meaningless sub-character fragments. The result is degraded reasoning, inflated inference costs, and a systemic “Token Tax” that disproportionately burdens more than one billion speakers.
|
| 11 |
+
|
| 12 |
+
**SGPE introduces the clean separation of concerns the field has been missing.**
|
| 13 |
+
|
| 14 |
+
**Layer 1 (LinguisTrie)** enforces linguistic integrity by construction: a deterministic $O(N)$ finite automaton segments raw Unicode into well-formed syllables with a formal zero-breakage guarantee.
|
| 15 |
+
**Layer 2 (GPE)** then performs statistical pair merging exclusively over this linguistically sound stream, inheriting the guarantee by design.
|
| 16 |
+
|
| 17 |
+
Sinhala serves as the high-complexity proof-of-concept. The same architecture generalizes directly to Devanagari, Tamil, Khmer, Myanmar, and the broader Abugida family through script-specific character-class mappings and conjunct rules.
|
| 18 |
+
|
| 19 |
+
---
|
| 20 |
+
|
| 21 |
+
## Results on 59.3 Million Characters
|
| 22 |
+
|
| 23 |
+
| Tokenizer | TWR ↓ | Tokens | Chars/Token ↑ | Reduction vs SGPE |
|
| 24 |
+
|------------------------|---------|-------------|---------------|-------------------|
|
| 25 |
+
| **SGPE (ours)** | **1.438** | **13.26 M** | **4.48** | — |
|
| 26 |
+
| OpenAI o200k_base | 3.515 | 32.39 M | 1.83 | 59.1 % |
|
| 27 |
+
| Llama 4 Scout | 3.673 | 33.85 M | 1.75 | 60.8 % |
|
| 28 |
+
| DeepSeek V3 | 5.965 | 54.98 M | 1.08 | 75.8 % |
|
| 29 |
+
|
| 30 |
+
- **Zero-Breakage Guarantee** validated on 1,703 exhaustive conjunct formations (0 violations).
|
| 31 |
+
- Full-corpus round-trip reconstruction: 0 non-UNK mismatches.
|
| 32 |
+
- UNK rate: 0.46 % (rare compounds only; no structural errors).
|
| 33 |
+
|
| 34 |
+
SGPE reclaims more than half the context window for Abugida text while preserving perfect orthographic and semantic integrity.
|
| 35 |
+
|
| 36 |
+
---
|
| 37 |
+
|
| 38 |
+
## Architecture
|
| 39 |
+
|
| 40 |
+
SGPE is deliberately bimodal:
|
| 41 |
+
|
| 42 |
+
1. **LinguisTrie (Layer 1)**
|
| 43 |
+
Deterministic finite automaton operating in a single left-to-right pass with constant-time transitions and $O(1)$ auxiliary space. Guarantees that no conjunct, pili, virama, or ZWJ sequence is ever fragmented.
|
| 44 |
+
|
| 45 |
+
2. **Grapheme Pair Encoding (Layer 2)**
|
| 46 |
+
Standard BPE performed exclusively on the atomic syllable stream, with three critical constraints:
|
| 47 |
+
- Syllabic initialization (base vocabulary consists of linguistically valid units)
|
| 48 |
+
- Boundary-aware scoping (merges restricted to within-word spans)
|
| 49 |
+
- Frequency pruning (rare syllables mapped to [UNK] sentinel before merging)
|
| 50 |
+
|
| 51 |
+
The decoupling is the core scientific contribution: linguistic correctness is enforced by construction rather than hoped for statistically.
|
| 52 |
+
|
| 53 |
+
---
|
| 54 |
+
|
| 55 |
+
## Quick Start with Hugging Face
|
| 56 |
+
|
| 57 |
+
```python
|
| 58 |
+
from transformers import AutoTokenizer
|
| 59 |
+
|
| 60 |
+
tokenizer = AutoTokenizer.from_pretrained("remeinium/SGPE")
|
| 61 |
+
text = "ආයුබෝවන් ශ්රී ලංකා"
|
| 62 |
+
|
| 63 |
+
tokens = tokenizer.tokenize(text)
|
| 64 |
+
# ['ආයුබෝවන්', ' ශ්රී', ' ලංකා']
|
| 65 |
+
print(tokenizer.encode(text))
|
| 66 |
+
```
|
| 67 |
+
|
| 68 |
+
---
|
| 69 |
+
|
| 70 |
+
## Resources
|
| 71 |
+
|
| 72 |
+
- **Research Paper**: “The Syllable is the Token: Breaking the Token Tax with SGPE” (Remeinium Research, February 2026)
|
| 73 |
+
- **Pre-trained Tokenizer**: [Hugging Face](https://huggingface.co/remeinium/SGPE-Tokenizer)
|
| 74 |
+
- **Cleaned Training Corpus**: [Hugging Face](https://huggingface.co/datasets/remeinium/SGPE_Cleaned)
|
| 75 |
+
- **Full Code & Evaluation Harness**: [GitHub](https://github.com/remeinium/SGPE)
|
| 76 |
+
|
| 77 |
+
---
|
| 78 |
+
|
| 79 |
+
## License
|
| 80 |
+
|
| 81 |
+
Apache License 2.0 — see [LICENSE](LICENSE).
|
| 82 |
+
|
| 83 |
+
---
|
| 84 |
+
|
| 85 |
+
**Remeinium Research | Remeinium AI | Intelligence for a Greater Tomorrow**
|
| 86 |
+
|
| 87 |
+
---
|
encoder.py
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import json
|
| 3 |
+
|
| 4 |
+
from linguis_trie import LinguisTrie
|
| 5 |
+
from gpe_trainer import segment_into_words, _is_boundary_token
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class SGPEEncoder:
|
| 9 |
+
|
| 10 |
+
def __init__(self, vocab_path: str):
|
| 11 |
+
with open(vocab_path, "r", encoding="utf-8") as f:
|
| 12 |
+
data = json.load(f)
|
| 13 |
+
|
| 14 |
+
self.vocab: dict[str, int] = data["vocab"]
|
| 15 |
+
self.merges: list[tuple[str, str]] = [tuple(m) for m in data["merges"]]
|
| 16 |
+
self.special_tokens: list[str] = data["special_tokens"]
|
| 17 |
+
self.tokenizer = LinguisTrie()
|
| 18 |
+
self.unk_id = self.vocab.get("[UNK]", 1)
|
| 19 |
+
self.leading_space: bool = data.get("leading_space", False)
|
| 20 |
+
|
| 21 |
+
self._merge_priority: dict[tuple[str, str], int] = {
|
| 22 |
+
(a, b): rank for rank, (a, b) in enumerate(self.merges)
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
def encode(self, text: str) -> list[int]:
|
| 26 |
+
tokens = self.tokenize(text)
|
| 27 |
+
return [self.vocab.get(t, self.unk_id) for t in tokens]
|
| 28 |
+
|
| 29 |
+
def _apply_merges_to_word(self, tokens: list[str]) -> list[str]:
|
| 30 |
+
if len(tokens) <= 1:
|
| 31 |
+
return tokens
|
| 32 |
+
|
| 33 |
+
while True:
|
| 34 |
+
best_rank = len(self.merges)
|
| 35 |
+
best_idx = -1
|
| 36 |
+
|
| 37 |
+
for i in range(len(tokens) - 1):
|
| 38 |
+
pair = (tokens[i], tokens[i + 1])
|
| 39 |
+
rank = self._merge_priority.get(pair)
|
| 40 |
+
if rank is not None and rank < best_rank:
|
| 41 |
+
best_rank = rank
|
| 42 |
+
best_idx = i
|
| 43 |
+
|
| 44 |
+
if best_idx == -1:
|
| 45 |
+
break
|
| 46 |
+
|
| 47 |
+
merged = tokens[best_idx] + tokens[best_idx + 1]
|
| 48 |
+
tokens = tokens[:best_idx] + [merged] + tokens[best_idx + 2:]
|
| 49 |
+
|
| 50 |
+
return tokens
|
| 51 |
+
|
| 52 |
+
def tokenize(self, text: str) -> list[str]:
|
| 53 |
+
syllables = self.layer1_tokenize(text)
|
| 54 |
+
words = segment_into_words(syllables)
|
| 55 |
+
|
| 56 |
+
result: list[str] = []
|
| 57 |
+
for word_tokens in words:
|
| 58 |
+
if len(word_tokens) == 1 and _is_boundary_token(word_tokens[0]):
|
| 59 |
+
result.append(word_tokens[0])
|
| 60 |
+
continue
|
| 61 |
+
|
| 62 |
+
cleaned = [t if t in self.vocab else "[UNK]" for t in word_tokens]
|
| 63 |
+
result.extend(self._apply_merges_to_word(cleaned))
|
| 64 |
+
|
| 65 |
+
return result
|
| 66 |
+
|
| 67 |
+
def layer1_tokenize(self, text: str) -> list[str]:
|
| 68 |
+
"""Layer 1: Deterministic LinguisTrie pre-tokenization (Syllables)."""
|
| 69 |
+
return self.tokenizer.tokenize(text, leading_space=self.leading_space)
|
| 70 |
+
|
| 71 |
+
def decode(self, ids: list[int]) -> str:
|
| 72 |
+
id_to_token = {v: k for k, v in self.vocab.items()}
|
| 73 |
+
return "".join(id_to_token.get(i, "") for i in ids)
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def main():
|
| 77 |
+
parser = argparse.ArgumentParser(description="SGPE Encoder")
|
| 78 |
+
parser.add_argument("--vocab", type=str, default="output/vocab.json")
|
| 79 |
+
parser.add_argument("--text", type=str, required=True)
|
| 80 |
+
args = parser.parse_args()
|
| 81 |
+
|
| 82 |
+
enc = SGPEEncoder(args.vocab)
|
| 83 |
+
tokens = enc.tokenize(args.text)
|
| 84 |
+
ids = enc.encode(args.text)
|
| 85 |
+
print(f"tokens : {tokens}")
|
| 86 |
+
print(f"ids : {ids}")
|
| 87 |
+
print(f"count : {len(tokens)}")
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
if __name__ == "__main__":
|
| 91 |
+
main()
|
linguis_trie.py
ADDED
|
@@ -0,0 +1,226 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
==========================================
|
| 3 |
+
SGPE Layer 1 — LinguisTrie Pre-tokenizer
|
| 4 |
+
==========================================
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
HAL = '\u0DCA' # ් virama / al-lakuna
|
| 10 |
+
ZWJ = '\u200D' # zero-width joiner
|
| 11 |
+
|
| 12 |
+
# --- Independent vowels (svara) ---
|
| 13 |
+
VOWELS: set[str] = {
|
| 14 |
+
'\u0D85', # අ
|
| 15 |
+
'\u0D86', # ආ
|
| 16 |
+
'\u0D87', # ඇ
|
| 17 |
+
'\u0D88', # ඈ
|
| 18 |
+
'\u0D89', # ඉ
|
| 19 |
+
'\u0D8A', # ඊ
|
| 20 |
+
'\u0D8B', # උ
|
| 21 |
+
'\u0D8C', # ඌ
|
| 22 |
+
'\u0D8D', # ඍ
|
| 23 |
+
'\u0D8E', # ඎ
|
| 24 |
+
'\u0D8F', # ඏ
|
| 25 |
+
'\u0D90', # ඐ
|
| 26 |
+
'\u0D91', # එ
|
| 27 |
+
'\u0D92', # ඒ
|
| 28 |
+
'\u0D93', # ඓ
|
| 29 |
+
'\u0D94', # ඔ
|
| 30 |
+
'\u0D95', # ඕ
|
| 31 |
+
'\u0D96', # ඖ
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
# --- Consonants (vyanjana) ---
|
| 35 |
+
CONSONANTS: set[str] = {chr(c) for c in range(0x0D9A, 0x0DC7)}
|
| 36 |
+
|
| 37 |
+
# --- Dependent vowel signs (pili) ---
|
| 38 |
+
VOWEL_SIGNS: set[str] = {
|
| 39 |
+
'\u0DCF', # ා
|
| 40 |
+
'\u0DD0', # ැ
|
| 41 |
+
'\u0DD1', # ෑ
|
| 42 |
+
'\u0DD2', # ි
|
| 43 |
+
'\u0DD3', # ී
|
| 44 |
+
'\u0DD4', # ු
|
| 45 |
+
'\u0DD5', # (rare/archaic)
|
| 46 |
+
'\u0DD6', # ූ
|
| 47 |
+
'\u0DD7', # (rare/archaic)
|
| 48 |
+
'\u0DD8', # ෘ
|
| 49 |
+
'\u0DD9', # ෙ
|
| 50 |
+
'\u0DDA', # ේ
|
| 51 |
+
'\u0DDB', # ෛ
|
| 52 |
+
'\u0DDC', # ො
|
| 53 |
+
'\u0DDD', # ෝ
|
| 54 |
+
'\u0DDE', # ෞ
|
| 55 |
+
'\u0DDF', # ෟ
|
| 56 |
+
'\u0DF2', # ෲ
|
| 57 |
+
'\u0DF3', # ෳ
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
# --- Post-consonant modifiers (anusvara, visarga) ---
|
| 61 |
+
POST_MODIFIERS: set[str] = {
|
| 62 |
+
'\u0D82', # ං anusvara
|
| 63 |
+
'\u0D83', # ඃ visarga
|
| 64 |
+
}
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def _is_consonant(ch: str) -> bool:
|
| 69 |
+
return ch in CONSONANTS
|
| 70 |
+
|
| 71 |
+
def _is_vowel(ch: str) -> bool:
|
| 72 |
+
return ch in VOWELS
|
| 73 |
+
|
| 74 |
+
def _is_vowel_sign(ch: str) -> bool:
|
| 75 |
+
return ch in VOWEL_SIGNS
|
| 76 |
+
|
| 77 |
+
def _is_post_modifier(ch: str) -> bool:
|
| 78 |
+
return ch in POST_MODIFIERS
|
| 79 |
+
|
| 80 |
+
def _is_hal(ch: str) -> bool:
|
| 81 |
+
return ch == HAL
|
| 82 |
+
|
| 83 |
+
def _is_zwj(ch: str) -> bool:
|
| 84 |
+
return ch == ZWJ
|
| 85 |
+
|
| 86 |
+
def _is_sinhala(ch: str) -> bool:
|
| 87 |
+
"""Any character in the Sinhala Unicode block or ZWJ."""
|
| 88 |
+
cp = ord(ch)
|
| 89 |
+
return (0x0D80 <= cp <= 0x0DFF) or cp == 0x200D
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
class LinguisTrie:
|
| 94 |
+
def tokenize(self, text: str, leading_space: bool = False) -> list[str]:
|
| 95 |
+
"""
|
| 96 |
+
Tokenize Sinhala text into atomic syllable tokens.
|
| 97 |
+
Example: "මම යනවා" → [" මම", " ය", "න", "වා"]
|
| 98 |
+
"""
|
| 99 |
+
tokens: list[str] = []
|
| 100 |
+
n = len(text)
|
| 101 |
+
pos = 0
|
| 102 |
+
pending_space = ""
|
| 103 |
+
|
| 104 |
+
while pos < n:
|
| 105 |
+
ch = text[pos]
|
| 106 |
+
|
| 107 |
+
# ─── Whitespace handling (leading-space mode) ─────────
|
| 108 |
+
if leading_space and ch in (' ', '\t', '\n', '\r'):
|
| 109 |
+
ws_buffer = ""
|
| 110 |
+
while pos < n and text[pos] in (' ', '\t', '\n', '\r'):
|
| 111 |
+
ws_buffer += text[pos]
|
| 112 |
+
pos += 1
|
| 113 |
+
|
| 114 |
+
if ws_buffer.endswith(' '):
|
| 115 |
+
for ws_char in ws_buffer[:-1]:
|
| 116 |
+
tokens.append(ws_char)
|
| 117 |
+
pending_space = " "
|
| 118 |
+
else:
|
| 119 |
+
for ws_char in ws_buffer:
|
| 120 |
+
tokens.append(ws_char)
|
| 121 |
+
pending_space = ""
|
| 122 |
+
continue
|
| 123 |
+
|
| 124 |
+
# ─── Consonant-initiated syllable ─────────────────────
|
| 125 |
+
if _is_consonant(ch):
|
| 126 |
+
start = pos
|
| 127 |
+
pos += 1
|
| 128 |
+
|
| 129 |
+
# Absorb consonant cluster: (HAL [ZWJ] Consonant)*
|
| 130 |
+
# Handles: C්C (implicit), C්C (ZWJ), and stacks
|
| 131 |
+
while pos < n and _is_hal(text[pos]):
|
| 132 |
+
if pos + 1 < n and _is_zwj(text[pos + 1]):
|
| 133 |
+
# HAL + ZWJ: must be followed by consonant
|
| 134 |
+
if pos + 2 < n and _is_consonant(text[pos + 2]):
|
| 135 |
+
pos += 3
|
| 136 |
+
continue
|
| 137 |
+
else:
|
| 138 |
+
# Stray HAL+ZWJ at end — absorb HAL+ZWJ
|
| 139 |
+
pos += 2
|
| 140 |
+
break
|
| 141 |
+
|
| 142 |
+
elif pos + 1 < n and _is_consonant(text[pos + 1]):
|
| 143 |
+
# HAL + C (implicit conjunct, no ZWJ)
|
| 144 |
+
pos += 2
|
| 145 |
+
continue
|
| 146 |
+
|
| 147 |
+
else:
|
| 148 |
+
break
|
| 149 |
+
|
| 150 |
+
# ── Post-cluster modifiers ──
|
| 151 |
+
|
| 152 |
+
if pos < n and _is_vowel_sign(text[pos]):
|
| 153 |
+
pos += 1 # pili
|
| 154 |
+
elif pos < n and _is_hal(text[pos]):
|
| 155 |
+
pos += 1 # virama
|
| 156 |
+
|
| 157 |
+
if pos < n and _is_post_modifier(text[pos]):
|
| 158 |
+
pos += 1 # anusvara/visarga
|
| 159 |
+
|
| 160 |
+
tokens.append(pending_space + text[start:pos])
|
| 161 |
+
pending_space = ""
|
| 162 |
+
continue
|
| 163 |
+
|
| 164 |
+
# ─── Independent vowel ────────────────────────────────
|
| 165 |
+
if _is_vowel(ch):
|
| 166 |
+
start = pos
|
| 167 |
+
pos += 1
|
| 168 |
+
|
| 169 |
+
# Vowel + post-modifier (e.g. අං)
|
| 170 |
+
if pos < n and _is_post_modifier(text[pos]):
|
| 171 |
+
pos += 1
|
| 172 |
+
|
| 173 |
+
tokens.append(pending_space + text[start:pos])
|
| 174 |
+
pending_space = ""
|
| 175 |
+
continue
|
| 176 |
+
|
| 177 |
+
# ─── Orphan post-modifier ──
|
| 178 |
+
if _is_post_modifier(ch) or _is_hal(ch) or _is_vowel_sign(ch):
|
| 179 |
+
tokens.append(pending_space + ch)
|
| 180 |
+
pending_space = ""
|
| 181 |
+
pos += 1
|
| 182 |
+
continue
|
| 183 |
+
|
| 184 |
+
# ─── Non-Sinhala passthrough (punctuation, digits, etc.) ──
|
| 185 |
+
if pending_space:
|
| 186 |
+
tokens.append(pending_space + ch)
|
| 187 |
+
pending_space = ""
|
| 188 |
+
else:
|
| 189 |
+
tokens.append(ch)
|
| 190 |
+
pos += 1
|
| 191 |
+
|
| 192 |
+
if pending_space:
|
| 193 |
+
tokens.append(pending_space)
|
| 194 |
+
|
| 195 |
+
return tokens
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
def build_linguistrie() -> LinguisTrie:
|
| 200 |
+
"""Build and return the LinguisTrie."""
|
| 201 |
+
return LinguisTrie()
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
if __name__ == '__main__':
|
| 206 |
+
trie = build_linguistrie()
|
| 207 |
+
|
| 208 |
+
test_sentences = [
|
| 209 |
+
# Core tests from the plan
|
| 210 |
+
"ශ්රී ලංකා ද්වීපයේ ස්වෛරීභාවය සහ ත්රිවිධ හමුදාව.",
|
| 211 |
+
"භාෂාවේ ප්රෞඪත්වය විදහාපායි",
|
| 212 |
+
"ආචාර්යවරයාගේ වෛද්ය විද්යා පර්යේෂණය සාර්ථකයි.",
|
| 213 |
+
"චන්ද්රයාගේ ආලෝකය පෘථිවියට ක්ෂණිකව ලැබේ.",
|
| 214 |
+
"මම ක්ෂණිකව ගඟට පැන්නා",
|
| 215 |
+
"සඤ්ඤක ක්ෂමතාවය ක්රමය සහ ඥානය",
|
| 216 |
+
"ද්වී ත්වේ ලං කඃ",
|
| 217 |
+
"න්ද්රී ක්ෂි ඤ්ඤ",
|
| 218 |
+
"2026 වසරේ AI තාක්ෂණය 60% දියුණුයි!",
|
| 219 |
+
]
|
| 220 |
+
|
| 221 |
+
for text in test_sentences:
|
| 222 |
+
tokens = trie.tokenize(text)
|
| 223 |
+
print(f"Input: {text}")
|
| 224 |
+
print(f"Tokens: {tokens}")
|
| 225 |
+
print(f"Count: {len(tokens)}")
|
| 226 |
+
print("-" * 60)
|
tokenizer.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
vocab.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|