omarkamali commited on
Commit
c54dc8e
·
verified ·
1 Parent(s): a8afb8b

Upload all models and assets for bn (latest)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. README.md +334 -161
  3. models/embeddings/aligned/bn_128d.bin +3 -0
  4. models/embeddings/aligned/bn_128d.meta.json +1 -0
  5. models/embeddings/aligned/bn_128d.projection.npy +3 -0
  6. models/embeddings/aligned/bn_128d_metadata.json +8 -0
  7. models/embeddings/aligned/bn_32d.bin +3 -0
  8. models/embeddings/aligned/bn_32d.meta.json +1 -0
  9. models/embeddings/aligned/bn_32d.projection.npy +3 -0
  10. models/embeddings/aligned/bn_32d_metadata.json +8 -0
  11. models/embeddings/aligned/bn_64d.bin +3 -0
  12. models/embeddings/aligned/bn_64d.meta.json +1 -0
  13. models/embeddings/aligned/bn_64d.projection.npy +3 -0
  14. models/embeddings/aligned/bn_64d_metadata.json +8 -0
  15. models/embeddings/monolingual/bn_128d.bin +2 -2
  16. models/embeddings/monolingual/bn_128d_metadata.json +5 -3
  17. models/embeddings/monolingual/bn_32d.bin +2 -2
  18. models/embeddings/monolingual/bn_32d_metadata.json +5 -3
  19. models/embeddings/monolingual/bn_64d.bin +2 -2
  20. models/embeddings/monolingual/bn_64d_metadata.json +5 -3
  21. models/subword_markov/bn_markov_ctx1_subword.parquet +2 -2
  22. models/subword_markov/bn_markov_ctx1_subword_metadata.json +2 -2
  23. models/subword_markov/bn_markov_ctx2_subword.parquet +2 -2
  24. models/subword_markov/bn_markov_ctx2_subword_metadata.json +2 -2
  25. models/subword_markov/bn_markov_ctx3_subword.parquet +2 -2
  26. models/subword_markov/bn_markov_ctx3_subword_metadata.json +2 -2
  27. models/subword_markov/bn_markov_ctx4_subword.parquet +2 -2
  28. models/subword_markov/bn_markov_ctx4_subword_metadata.json +2 -2
  29. models/subword_ngram/bn_2gram_subword.parquet +2 -2
  30. models/subword_ngram/bn_2gram_subword_metadata.json +2 -2
  31. models/subword_ngram/bn_3gram_subword.parquet +2 -2
  32. models/subword_ngram/bn_3gram_subword_metadata.json +2 -2
  33. models/subword_ngram/bn_4gram_subword.parquet +2 -2
  34. models/subword_ngram/bn_4gram_subword_metadata.json +2 -2
  35. models/subword_ngram/bn_5gram_subword.parquet +3 -0
  36. models/subword_ngram/bn_5gram_subword_metadata.json +7 -0
  37. models/tokenizer/bn_tokenizer_16k.model +2 -2
  38. models/tokenizer/bn_tokenizer_16k.vocab +0 -0
  39. models/tokenizer/bn_tokenizer_32k.model +2 -2
  40. models/tokenizer/bn_tokenizer_32k.vocab +0 -0
  41. models/tokenizer/bn_tokenizer_64k.model +2 -2
  42. models/tokenizer/bn_tokenizer_64k.vocab +0 -0
  43. models/tokenizer/bn_tokenizer_8k.model +2 -2
  44. models/tokenizer/bn_tokenizer_8k.vocab +0 -0
  45. models/vocabulary/bn_vocabulary.parquet +2 -2
  46. models/vocabulary/bn_vocabulary_metadata.json +10 -9
  47. models/word_markov/bn_markov_ctx1_word.parquet +2 -2
  48. models/word_markov/bn_markov_ctx1_word_metadata.json +2 -2
  49. models/word_markov/bn_markov_ctx2_word.parquet +2 -2
  50. models/word_markov/bn_markov_ctx2_word_metadata.json +2 -2
.gitattributes CHANGED
@@ -39,3 +39,4 @@ visualizations/position_encoding_comparison.png filter=lfs diff=lfs merge=lfs -t
39
  visualizations/tsne_sentences.png filter=lfs diff=lfs merge=lfs -text
40
  visualizations/tsne_words.png filter=lfs diff=lfs merge=lfs -text
41
  visualizations/zipf_law.png filter=lfs diff=lfs merge=lfs -text
 
 
39
  visualizations/tsne_sentences.png filter=lfs diff=lfs merge=lfs -text
40
  visualizations/tsne_words.png filter=lfs diff=lfs merge=lfs -text
41
  visualizations/zipf_law.png filter=lfs diff=lfs merge=lfs -text
42
+ visualizations/embedding_tsne_multilingual.png filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,6 +1,6 @@
1
  ---
2
  language: bn
3
- language_name: Bengali
4
  language_family: indoaryan_eastern
5
  tags:
6
  - wikilangs
@@ -10,11 +10,21 @@ tags:
10
  - n-gram
11
  - markov
12
  - wikipedia
 
 
 
 
 
 
 
 
 
 
13
  - monolingual
14
  - family-indoaryan_eastern
15
  license: mit
16
  library_name: wikilangs
17
- pipeline_tag: feature-extraction
18
  datasets:
19
  - omarkamali/wikipedia-monthly
20
  dataset_info:
@@ -23,20 +33,20 @@ dataset_info:
23
  metrics:
24
  - name: best_compression_ratio
25
  type: compression
26
- value: 4.983
27
  - name: best_isotropy
28
  type: isotropy
29
- value: 0.8133
30
  - name: vocabulary_size
31
  type: vocab
32
- value: 250463
33
- generated: 2025-12-28
34
  ---
35
 
36
- # Bengali - Wikilangs Models
37
  ## Comprehensive Research Report & Full Ablation Study
38
 
39
- This repository contains NLP models trained and evaluated by Wikilangs, specifically on **Bengali** Wikipedia data.
40
  We analyze tokenizers, n-gram models, Markov chains, vocabulary statistics, and word embeddings.
41
 
42
  ## 📋 Repository Contents
@@ -44,12 +54,13 @@ We analyze tokenizers, n-gram models, Markov chains, vocabulary statistics, and
44
  ### Models & Assets
45
 
46
  - Tokenizers (8k, 16k, 32k, 64k)
47
- - N-gram models (2, 3, 4-gram)
48
- - Markov chains (context of 1, 2, 3 and 4)
49
  - Subword N-gram and Markov chains
50
- - Embeddings in various sizes and dimensions
51
  - Language Vocabulary
52
  - Language Statistics
 
53
  ![Performance Dashboard](visualizations/performance_dashboard.png)
54
 
55
  ### Analysis and Evaluation
@@ -59,7 +70,8 @@ We analyze tokenizers, n-gram models, Markov chains, vocabulary statistics, and
59
  - [3. Markov Chain Evaluation](#3-markov-chain-evaluation)
60
  - [4. Vocabulary Analysis](#4-vocabulary-analysis)
61
  - [5. Word Embeddings Evaluation](#5-word-embeddings-evaluation)
62
- - [6. Summary & Recommendations](#6-summary--recommendations)
 
63
  - [Metrics Glossary](#appendix-metrics-glossary--interpretation-guide)
64
  - [Visualizations Index](#visualizations-index)
65
 
@@ -68,75 +80,57 @@ We analyze tokenizers, n-gram models, Markov chains, vocabulary statistics, and
68
 
69
  ![Tokenizer Compression](visualizations/tokenizer_compression.png)
70
 
 
 
 
 
 
 
71
  ### Results
72
 
73
  | Vocab Size | Compression | Avg Token Len | UNK Rate | Total Tokens |
74
  |------------|-------------|---------------|----------|--------------|
75
- | **8k** | 3.731x | 3.70 | 0.0813% | 2,819,192 |
76
- | **16k** | 4.228x | 4.19 | 0.0921% | 2,487,647 |
77
- | **32k** | 4.650x | 4.61 | 0.1013% | 2,262,081 |
78
- | **64k** | 4.983x 🏆 | 4.94 | 0.1085% | 2,111,066 |
79
 
80
  ### Tokenization Examples
81
 
82
  Below are sample sentences tokenized with each vocabulary size:
83
 
84
- **Sample 1:** `ইতিহাস
85
-
86
- ঘটনাবলী
87
-
88
- জন্ম
89
-
90
- মৃত্যু
91
- মিশেল আফলাক
92
-
93
- ছুটি এবং অন্যান্য
94
-
95
- বহিঃসংযো...`
96
 
97
  | Vocab | Tokens | Count |
98
  |-------|--------|-------|
99
- | 8k | `▁ইতিহাস ▁ঘটনাবলী ▁জন্ম ▁মৃত্যু ▁মিশ েল ▁আ ফল াক ▁ছ ... (+16 more)` | 26 |
100
- | 16k | `▁ইতিহাস ▁ঘটনাবলী ▁জন্ম ▁মৃত্যু ▁মিশেল ▁আ ফল াক ▁ছুটি ▁এবং ... (+12 more)` | 22 |
101
- | 32k | `▁ইতিহাস ▁ঘটনাবলী ▁জন্ম ▁মৃত্যু ▁মিশেল ▁আ ফল াক ▁ছুটি ▁এবং ... (+11 more)` | 21 |
102
- | 64k | `▁ইতিহাস ▁ঘটনাবলী ▁জন্ম ▁মৃত্যু ▁মিশেল ▁আ ফল াক ▁ছুটি ▁এবং ... (+11 more)` | 21 |
103
 
104
- **Sample 2:** `চিলমারী ইউনিয়ন নামে বাংলাদেশে মোট ২টি ইউনিয়ন রয়েছে। যথা:
105
- চিলমারী ইউনিয়ন, চি...`
106
 
107
  | Vocab | Tokens | Count |
108
  |-------|--------|-------|
109
- | 8k | `▁চিল ারী ▁ইউনিয়ন ▁নামে ▁বাংলাদেশে ▁মোট ▁২টি ▁ইউনিয়ন ▁রয়েছে ... (+34 more)` | 44 |
110
- | 16k | `▁চিল মারী ▁ইউনিয়ন ▁নামে ▁বাংলাদেশে ▁মোট ▁২টি ▁ইউনিয়ন ▁রয়েছে ... (+27 more)` | 37 |
111
- | 32k | `▁চিল মারী ▁ইউনিয়ন ▁নামে ▁বাংলাদেশে ▁মোট ▁২টি ▁ইউনিয়ন ▁রয়েছে ... (+24 more)` | 34 |
112
- | 64k | `▁চিলমারী ▁ইউনিয়ন ▁নামে ▁বাংলাদেশে ▁মোট ▁২টি ▁ইউনিয়ন ▁রয়েছে ▁যথা ... (+19 more)` | 29 |
113
-
114
- **Sample 3:** `ইতিহাস
115
 
116
- ঘটনাবলী
117
-
118
- জন্ম
119
-
120
- মৃত্যু
121
-
122
- ছুটি এবং অন্যান্য
123
-
124
- বহিঃসংযোগ
125
-
126
- বিষয়শ্র...`
127
 
128
  | Vocab | Tokens | Count |
129
  |-------|--------|-------|
130
- | 8k | `▁ইতিহাস ▁ঘটনাবলী ▁জন্ম ▁মৃত্যু ▁ছ ুটি ▁এবং ▁অন্যান্য ▁বহিঃসংযোগ ▁বিষয়শ্রেণী ... (+12 more)` | 22 |
131
- | 16k | `▁ইতিহাস ▁ঘটনাবলী ▁জন্ম ▁মৃত্যু ▁ছুটি ▁এবং ▁অন্যান্য ▁বহিঃসংযোগ ▁বিষয়শ্রেণী : ... (+9 more)` | 19 |
132
- | 32k | `▁ইতিহাস ▁ঘটনাবলী ▁জন্ম ▁মৃত্যু ▁ছুটি ▁এবং ▁অন্যান্য ▁বহিঃসংযোগ ▁বিষয়শ্রেণী : ... (+9 more)` | 19 |
133
- | 64k | `▁ইতিহাস ▁ঘটনাবলী ▁জন্ম ▁মৃত্যু ▁ছুটি ▁এবং ▁অন্যান্য ▁বহিঃসংযোগ ▁বিষয়শ্রেণী : ... (+7 more)` | 17 |
134
 
135
 
136
  ### Key Findings
137
 
138
- - **Best Compression:** 64k achieves 4.983x compression
139
- - **Lowest UNK Rate:** 8k with 0.0813% unknown tokens
140
  - **Trade-off:** Larger vocabularies improve compression but increase model size
141
  - **Recommendation:** 32k vocabulary provides optimal balance for production use
142
 
@@ -145,57 +139,111 @@ Below are sample sentences tokenized with each vocabulary size:
145
 
146
  ![N-gram Perplexity](visualizations/ngram_perplexity.png)
147
 
 
 
148
  ![N-gram Coverage](visualizations/ngram_coverage.png)
149
 
150
  ### Results
151
 
152
- | N-gram | Perplexity | Entropy | Unique N-grams | Top-100 Coverage | Top-1000 Coverage |
153
- |--------|------------|---------|----------------|------------------|-------------------|
154
- | **2-gram** | 1,729 🏆 | 10.76 | 514,602 | 43.5% | 78.4% |
155
- | **2-gram** | 617 🏆 | 9.27 | 33,180 | 50.6% | 92.9% |
156
- | **3-gram** | 17,027 | 14.06 | 2,087,238 | 16.5% | 45.3% |
157
- | **3-gram** | 5,420 | 12.40 | 328,172 | 19.8% | 55.7% |
158
- | **4-gram** | 101,821 | 16.64 | 6,958,889 | 8.9% | 25.5% |
159
- | **4-gram** | 30,323 | 14.89 | 1,934,479 | 10.4% | 31.7% |
 
 
160
 
161
  ### Top 5 N-grams by Size
162
 
163
- **2-grams:**
 
 
 
 
 
 
 
 
 
 
164
 
165
  | Rank | N-gram | Count |
166
  |------|--------|-------|
167
- | 1 | `া র` | 6,292,455 |
168
- | 2 | `ে র` | 5,308,140 |
169
- | 3 | `্ র` | 4,972,400 |
170
- | 4 | `য ়` | 4,738,290 |
171
- | 5 | `্ য` | 4,282,578 |
172
 
173
- **3-grams:**
174
 
175
  | Rank | N-gram | Count |
176
  |------|--------|-------|
177
- | 1 | `্ া` | 2,328,201 |
178
- | 2 | `্ ে` | 1,699,814 |
179
- | 3 | `ি ়` | 1,654,809 |
180
- | 4 | `া ়` | 1,550,712 |
181
- | 5 | `য া` | 1,358,961 |
182
 
183
- **4-grams:**
184
 
185
  | Rank | N-gram | Count |
186
  |------|--------|-------|
187
- | 1 | `শ ে` | 1,127,020 |
188
- | 2 | `ব ি ষয ়` | 1,117,741 |
189
- | 3 | `্ ণ` | 1,092,505 |
190
- | 4 | `র ী` | 1,077,879 |
191
- | 5 | `় র` | 1,049,641 |
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
192
 
193
 
194
  ### Key Findings
195
 
196
- - **Best Perplexity:** 2-gram with 617
197
  - **Entropy Trend:** Decreases with larger n-grams (more predictable)
198
- - **Coverage:** Top-1000 patterns cover ~32% of corpus
199
  - **Recommendation:** 4-gram or 5-gram for best predictive performance
200
 
201
  ---
@@ -203,55 +251,86 @@ Below are sample sentences tokenized with each vocabulary size:
203
 
204
  ![Markov Entropy](visualizations/markov_entropy.png)
205
 
 
 
206
  ![Markov Branching](visualizations/markov_branching.png)
207
 
208
  ### Results
209
 
210
- | Context | Avg Entropy | Perplexity | Branching Factor | Unique Contexts | Predictability |
211
- |---------|-------------|------------|------------------|-----------------|----------------|
212
- | **1** | 0.4721 | 1.387 | 5.55 | 741,598 | 52.8% |
213
- | **1** | 1.3088 | 2.477 | 9.81 | 11,996 | 0.0% |
214
- | **2** | 0.3508 | 1.275 | 2.80 | 4,117,166 | 64.9% |
215
- | **2** | 0.8170 | 1.762 | 6.18 | 117,615 | 18.3% |
216
- | **3** | 0.2622 | 1.199 | 2.09 | 11,539,749 | 73.8% |
217
- | **3** | 0.8527 | 1.806 | 4.88 | 727,384 | 14.7% |
218
- | **4** | 0.2332 🏆 | 1.175 | 1.80 | 24,106,813 | 76.7% |
219
- | **4** | 0.6914 🏆 | 1.615 | 3.38 | 3,549,554 | 30.9% |
220
 
221
- ### Generated Text Samples
222
 
223
- Below are text samples generated from each Markov chain model:
224
 
225
  **Context Size 1:**
226
 
227
- 1. `া মর ঐত ি নও ধরন ে`
228
- 2. `্ ড০০ তথ ভরশ হহ ি ক া`
229
- 3. `ে বন টগ , দক দ`
230
 
231
  **Context Size 2:**
232
 
233
- 1. `া ি ি ি প`
234
- 2. `ে তর আয ইর া ক ি ন ব`
235
- 3. `্ : আমর ড া`
236
 
237
  **Context Size 3:**
238
 
239
- 1. `্ শব দ ে গ ু ল`
240
- 2. `্ : ১৯৭৯ - রত ি রক ষ ি ত`
241
- 3. `ি , য া ত ি ক ব`
242
 
243
  **Context Size 4:**
244
 
245
- 1. `শ : ১৮০৬ - জন ব ি ষয ় শ ্`
246
- 2. `ব ি ষয : রত া`
247
- 3. `্ : রত ্ থ ১৯৬৬ - স ৌ`
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
248
 
249
 
250
  ### Key Findings
251
 
252
- - **Best Predictability:** Context-4 with 76.7% predictability
253
  - **Branching Factor:** Decreases with context size (more deterministic)
254
- - **Memory Trade-off:** Larger contexts require more storage (3,549,554 contexts)
255
  - **Recommendation:** Context-3 or Context-4 for text generation
256
 
257
  ---
@@ -267,64 +346,64 @@ Below are text samples generated from each Markov chain model:
267
 
268
  | Metric | Value |
269
  |--------|-------|
270
- | Vocabulary Size | 250,463 |
271
- | Total Tokens | 201,406,640 |
272
- | Mean Frequency | 804.14 |
273
  | Median Frequency | 4 |
274
- | Frequency Std Dev | 71598.48 |
275
 
276
  ### Most Common Words
277
 
278
  | Rank | Word | Frequency |
279
  |------|------|-----------|
280
- | 1 | | 21,510,159 |
281
- | 2 | | 11,295,997 |
282
- | 3 | | 10,867,520 |
283
- | 4 | | 9,714,992 |
284
- | 5 | | 9,151,878 |
285
- | 6 | | 8,792,666 |
286
- | 7 | | 7,892,623 |
287
- | 8 | | 7,373,804 |
288
- | 9 | | 5,927,003 |
289
- | 10 | | 5,815,632 |
290
 
291
  ### Least Common Words (from vocabulary)
292
 
293
  | Rank | Word | Frequency |
294
  |------|------|-----------|
295
- | 1 | kovidara | 2 |
296
- | 2 | ৩৩w | 2 |
297
- | 3 | vuer | 2 |
298
- | 4 | ২০১৯২১ | 2 |
299
- | 5 | dilg | 2 |
300
- | 6 | দ১70px | 2 |
301
- | 7 | ciacrimes | 2 |
302
- | 8 | এএমএইচএস | 2 |
303
- | 9 | yohanna | 2 |
304
- | 10 | katanacho | 2 |
305
 
306
  ### Zipf's Law Analysis
307
 
308
  | Metric | Value |
309
  |--------|-------|
310
- | Zipf Coefficient | 1.5776 |
311
- | R² (Goodness of Fit) | 0.996866 |
312
  | Adherence Quality | **excellent** |
313
 
314
  ### Coverage Analysis
315
 
316
  | Top N Words | Coverage |
317
  |-------------|----------|
318
- | Top 100 | 81.9% |
319
- | Top 1,000 | 95.2% |
320
- | Top 5,000 | 98.2% |
321
- | Top 10,000 | 98.8% |
322
 
323
  ### Key Findings
324
 
325
- - **Zipf Compliance:** R²=0.9969 indicates excellent adherence to Zipf's law
326
- - **High Frequency Dominance:** Top 100 words cover 81.9% of corpus
327
- - **Long Tail:** 240,463 words needed for remaining 1.2% coverage
328
 
329
  ---
330
  ## 5. Word Embeddings Evaluation
@@ -337,24 +416,115 @@ Below are text samples generated from each Markov chain model:
337
 
338
  ![t-SNE Sentences](visualizations/tsne_sentences.png)
339
 
340
- ### Model Comparison
341
 
342
- | Model | Vocab Size | Dimension | Avg Norm | Std Norm | Isotropy |
343
- |-------|------------|-----------|----------|----------|----------|
344
- | **mono_32d** | 502,529 | 32 | 3.036 | 0.774 | 0.8133 🏆 |
345
- | **mono_64d** | 502,529 | 64 | 3.445 | 0.763 | 0.7938 |
346
- | **mono_128d** | 502,529 | 128 | 3.881 | 0.802 | 0.7502 |
347
- | **embeddings_enhanced** | 0 | 0 | 0.000 | 0.000 | 0.0000 |
 
 
 
 
 
 
 
 
 
 
 
348
 
349
  ### Key Findings
350
 
351
- - **Best Isotropy:** mono_32d with 0.8133 (more uniform distribution)
352
- - **Dimension Trade-off:** Higher dimensions capture more semantics but reduce isotropy
353
- - **Vocabulary Coverage:** All models cover 502,529 words
354
- - **Recommendation:** 100d for balanced semantic capture and efficiency
355
 
356
  ---
357
- ## 6. Summary & Recommendations
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
358
 
359
  ![Performance Dashboard](visualizations/performance_dashboard.png)
360
 
@@ -362,11 +532,12 @@ Below are text samples generated from each Markov chain model:
362
 
363
  | Component | Recommended | Rationale |
364
  |-----------|-------------|-----------|
365
- | Tokenizer | **32k BPE** | Best compression (4.98x) with low UNK rate |
366
- | N-gram | **5-gram** | Lowest perplexity (617) |
367
- | Markov | **Context-4** | Highest predictability (76.7%) |
368
  | Embeddings | **100d** | Balanced semantic capture and isotropy |
369
 
 
370
  ---
371
  ## Appendix: Metrics Glossary & Interpretation Guide
372
 
@@ -556,7 +727,8 @@ If you use these models in your research, please cite:
556
  author = {Kamali, Omar},
557
  title = {Wikilangs: Open NLP Models for Wikipedia Languages},
558
  year = {2025},
559
- publisher = {HuggingFace},
 
560
  url = {https://huggingface.co/wikilangs}
561
  institution = {Omneity Labs}
562
  }
@@ -572,7 +744,8 @@ MIT License - Free for academic and commercial use.
572
  - 🤗 Models: [huggingface.co/wikilangs](https://huggingface.co/wikilangs)
573
  - 📊 Data: [wikipedia-monthly](https://huggingface.co/datasets/omarkamali/wikipedia-monthly)
574
  - 👤 Author: [Omar Kamali](https://huggingface.co/omarkamali)
 
575
  ---
576
  *Generated by Wikilangs Models Pipeline*
577
 
578
- *Report Date: 2025-12-28 07:23:06*
 
1
  ---
2
  language: bn
3
+ language_name: Bangla
4
  language_family: indoaryan_eastern
5
  tags:
6
  - wikilangs
 
10
  - n-gram
11
  - markov
12
  - wikipedia
13
+ - feature-extraction
14
+ - sentence-similarity
15
+ - tokenization
16
+ - n-grams
17
+ - markov-chain
18
+ - text-mining
19
+ - fasttext
20
+ - babelvec
21
+ - vocabulous
22
+ - vocabulary
23
  - monolingual
24
  - family-indoaryan_eastern
25
  license: mit
26
  library_name: wikilangs
27
+ pipeline_tag: text-generation
28
  datasets:
29
  - omarkamali/wikipedia-monthly
30
  dataset_info:
 
33
  metrics:
34
  - name: best_compression_ratio
35
  type: compression
36
+ value: 5.044
37
  - name: best_isotropy
38
  type: isotropy
39
+ value: 0.8095
40
  - name: vocabulary_size
41
  type: vocab
42
+ value: 0
43
+ generated: 2026-01-07
44
  ---
45
 
46
+ # Bangla - Wikilangs Models
47
  ## Comprehensive Research Report & Full Ablation Study
48
 
49
+ This repository contains NLP models trained and evaluated by Wikilangs, specifically on **Bangla** Wikipedia data.
50
  We analyze tokenizers, n-gram models, Markov chains, vocabulary statistics, and word embeddings.
51
 
52
  ## 📋 Repository Contents
 
54
  ### Models & Assets
55
 
56
  - Tokenizers (8k, 16k, 32k, 64k)
57
+ - N-gram models (2, 3, 4, 5-gram)
58
+ - Markov chains (context of 1, 2, 3, 4 and 5)
59
  - Subword N-gram and Markov chains
60
+ - Embeddings in various sizes and dimensions (aligned and unaligned)
61
  - Language Vocabulary
62
  - Language Statistics
63
+
64
  ![Performance Dashboard](visualizations/performance_dashboard.png)
65
 
66
  ### Analysis and Evaluation
 
70
  - [3. Markov Chain Evaluation](#3-markov-chain-evaluation)
71
  - [4. Vocabulary Analysis](#4-vocabulary-analysis)
72
  - [5. Word Embeddings Evaluation](#5-word-embeddings-evaluation)
73
+ - [6. Morphological Analysis (Experimental)](#6--morphological-analysis-experimental)
74
+ - [7. Summary & Recommendations](#7-summary--recommendations)
75
  - [Metrics Glossary](#appendix-metrics-glossary--interpretation-guide)
76
  - [Visualizations Index](#visualizations-index)
77
 
 
80
 
81
  ![Tokenizer Compression](visualizations/tokenizer_compression.png)
82
 
83
+ ![Tokenizer Fertility](visualizations/tokenizer_fertility.png)
84
+
85
+ ![Tokenizer OOV](visualizations/tokenizer_oov.png)
86
+
87
+ ![Total Tokens](visualizations/tokenizer_total_tokens.png)
88
+
89
  ### Results
90
 
91
  | Vocab Size | Compression | Avg Token Len | UNK Rate | Total Tokens |
92
  |------------|-------------|---------------|----------|--------------|
93
+ | **8k** | 3.770x | 3.77 | 0.0982% | 2,627,489 |
94
+ | **16k** | 4.281x | 4.28 | 0.1115% | 2,313,780 |
95
+ | **32k** | 4.713x | 4.71 | 0.1227% | 2,101,756 |
96
+ | **64k** | 5.044x 🏆 | 5.04 | 0.1313% | 1,964,118 |
97
 
98
  ### Tokenization Examples
99
 
100
  Below are sample sentences tokenized with each vocabulary size:
101
 
102
+ **Sample 1:** `ছেউড়িয়া কুষ্টিয়া শহরের পূর্ব দিকে অবস্থিত একটি এলাকা। লালন শাহের মাজার এই ছেউ...`
 
 
 
 
 
 
 
 
 
 
 
103
 
104
  | Vocab | Tokens | Count |
105
  |-------|--------|-------|
106
+ | 8k | `▁ছে ড়িয়া ▁কুষ্টিয়া ▁শহরের ▁পূর্ব ▁দিকে ▁অবস্থিত ▁একটি ▁এলাকা ... (+17 more)` | 27 |
107
+ | 16k | `▁ছে ড়িয়া ▁কুষ্টিয়া ▁শহরের ▁পূর্ব ▁দিকে ▁অবস্থিত ▁একটি ▁এলাকা ... (+15 more)` | 25 |
108
+ | 32k | `▁ছে ড়িয়া ▁কুষ্টিয়া ▁শহরের ▁পূর্ব ▁দিকে ▁অবস্থিত ▁একটি ▁এলাকা ... (+15 more)` | 25 |
109
+ | 64k | `▁ছে ড়িয়া ▁কুষ্টিয়া ▁শহরের ▁পূর্ব ▁দিকে ▁অবস্থিত ▁একটি ▁এলাকা ... (+15 more)` | 25 |
110
 
111
+ **Sample 2:** `বনী কেনানাহ () হল জর্ডানের ইরবিড গভর্নরেটের একটি জেলা। তথ্যসূত্র জেলা`
 
112
 
113
  | Vocab | Tokens | Count |
114
  |-------|--------|-------|
115
+ | 8k | `▁ব নী ▁কেন ান াহ ▁() ▁হল ▁জর্ ানের ... (+11 more)` | 21 |
116
+ | 16k | `▁ব নী ▁কেন ান াহ ▁() ▁হল ▁জর্ডানের ▁ইর বি ... (+7 more)` | 17 |
117
+ | 32k | `▁ব নী ▁কেন ান াহ ▁() ▁হল ▁জর্ডানের ▁ইর বি ... (+7 more)` | 17 |
118
+ | 64k | `▁বনী ▁কেন ানাহ ▁() ▁হল ▁জর্ডানের ▁ইর বিড ▁গভর্নরেটের ▁একটি ... (+4 more)` | 14 |
 
 
119
 
120
+ **Sample 3:** `উপভাষাতত্ত্ব () ভাষাবিজ্ঞানের একটি উপশাখা যেখানে ভাষার ভৌগোলিক বৈচিত্র্য নিয়ে গ...`
 
 
 
 
 
 
 
 
 
 
121
 
122
  | Vocab | Tokens | Count |
123
  |-------|--------|-------|
124
+ | 8k | `▁উপ ভাষ াত ত্ত্ব ▁() ▁ভাষ াবি জ্ঞ ানের ▁একটি ... (+25 more)` | 35 |
125
+ | 16k | `▁উপ ভাষ াত ত্ত্ব ▁() ▁ভাষ াবিজ্ঞ ানের ▁একটি ▁উপ ... (+22 more)` | 32 |
126
+ | 32k | `▁উপভাষ াত ত্ত্ব ▁() ▁ভাষাবিজ্ঞানের ▁একটি ▁উপ শাখা ▁যেখানে ▁ভাষার ... (+17 more)` | 27 |
127
+ | 64k | `▁উপভাষ াতত্ত্ব ▁() ▁ভাষাবিজ্ঞানের ▁একটি ▁উপশাখা ▁যেখানে ▁ভাষার ▁ভৌগোলিক ▁বৈচিত্র্য ... (+15 more)` | 25 |
128
 
129
 
130
  ### Key Findings
131
 
132
+ - **Best Compression:** 64k achieves 5.044x compression
133
+ - **Lowest UNK Rate:** 8k with 0.0982% unknown tokens
134
  - **Trade-off:** Larger vocabularies improve compression but increase model size
135
  - **Recommendation:** 32k vocabulary provides optimal balance for production use
136
 
 
139
 
140
  ![N-gram Perplexity](visualizations/ngram_perplexity.png)
141
 
142
+ ![N-gram Unique](visualizations/ngram_unique.png)
143
+
144
  ![N-gram Coverage](visualizations/ngram_coverage.png)
145
 
146
  ### Results
147
 
148
+ | N-gram | Variant | Perplexity | Entropy | Unique N-grams | Top-100 Coverage | Top-1000 Coverage |
149
+ |--------|---------|------------|---------|----------------|------------------|-------------------|
150
+ | **2-gram** | Word | 291,514 | 18.15 | 1,574,708 | 4.7% | 13.8% |
151
+ | **2-gram** | Subword | 2,633 🏆 | 11.36 | 151,712 | 33.5% | 66.9% |
152
+ | **3-gram** | Word | 772,868 | 19.56 | 2,366,241 | 2.2% | 7.8% |
153
+ | **3-gram** | Subword | 26,877 | 14.71 | 1,149,281 | 12.1% | 33.1% |
154
+ | **4-gram** | Word | 1,492,191 | 20.51 | 3,512,891 | 1.8% | 5.9% |
155
+ | **4-gram** | Subword | 176,159 | 17.43 | 5,668,680 | 6.6% | 19.0% |
156
+ | **5-gram** | Word | 1,031,104 | 19.98 | 2,302,686 | 2.2% | 6.7% |
157
+ | **5-gram** | Subword | 672,872 | 19.36 | 12,813,291 | 4.0% | 12.3% |
158
 
159
  ### Top 5 N-grams by Size
160
 
161
+ **2-grams (Word):**
162
+
163
+ | Rank | N-gram | Count |
164
+ |------|--------|-------|
165
+ | 1 | `করা হয়` | 178,320 |
166
+ | 2 | `তথ্যসূত্র বহিঃসংযোগ` | 62,509 |
167
+ | 3 | `করা হয়েছিল` | 55,266 |
168
+ | 4 | `করা হয়েছে` | 52,752 |
169
+ | 5 | `হয় এবং` | 47,516 |
170
+
171
+ **3-grams (Word):**
172
 
173
  | Rank | N-gram | Count |
174
  |------|--------|-------|
175
+ | 1 | `থেকে সাল পর্যন্ত` | 15,509 |
176
+ | 2 | `করা হয় এবং` | 12,875 |
177
+ | 3 | `দায়িত্ব পালন করেন` | 11,918 |
178
+ | 4 | `উপর ভিত্তি করে` | 11,195 |
179
+ | 5 | `করা যেতে পারে` | 11,181 |
180
 
181
+ **4-grams (Word):**
182
 
183
  | Rank | N-gram | Count |
184
  |------|--------|-------|
185
+ | 1 | `তথ্যসূত্র বহিঃসংযোগ জন্ম ব্যক্তি` | 6,636 |
186
+ | 2 | `সংসদ সদস্য সংসদ সদস্য` | 6,370 |
187
+ | 3 | `হিসেবে দায়িত্ব পালন করেন` | 5,513 |
188
+ | 4 | `এপ্রিল জুন জুলাই সেপ্টেম্বর` | 5,102 |
189
+ | 5 | `জুলাই সেপ্টেম্বর অক্টোবর ডিসেম্বর` | 5,100 |
190
 
191
+ **5-grams (Word):**
192
 
193
  | Rank | N-gram | Count |
194
  |------|--------|-------|
195
+ | 1 | `জুন জুলাই সেপ্টেম্বর অক্টোবর ডিসেম্বর` | 5,049 |
196
+ | 2 | `এপ্রিল জুন জুলাই সেপ্টেম্বর অক্টোবর` | 5,048 |
197
+ | 3 | `মার্চ এপ্রিল জুন জুলাই সেপ্টেম্বর` | 5,040 |
198
+ | 4 | `জানুয়ারি মার্চ এপ্রিল জুন জুলাই` | 5,039 |
199
+ | 5 | `সদস্য সংসদ সদস্য সংসদ সদস্য` | 4,613 |
200
+
201
+ **2-grams (Subword):**
202
+
203
+ | Rank | N-gram | Count |
204
+ |------|--------|-------|
205
+ | 1 | `র _` | 10,460,613 |
206
+ | 2 | `_ এ` | 4,233,657 |
207
+ | 3 | `ন _` | 4,097,869 |
208
+ | 4 | `। _` | 3,608,688 |
209
+ | 5 | `_ ক` | 3,135,335 |
210
+
211
+ **3-grams (Subword):**
212
+
213
+ | Rank | N-gram | Count |
214
+ |------|--------|-------|
215
+ | 1 | `_ ক রে` | 1,380,255 |
216
+ | 2 | `এ বং _` | 1,266,527 |
217
+ | 3 | `_ এ বং` | 1,265,068 |
218
+ | 4 | `_ এ ক` | 991,360 |
219
+ | 5 | `ন । _` | 910,746 |
220
+
221
+ **4-grams (Subword):**
222
+
223
+ | Rank | N-gram | Count |
224
+ |------|--------|-------|
225
+ | 1 | `_ এ বং _` | 1,263,055 |
226
+ | 2 | `_ এ ক টি` | 584,296 |
227
+ | 3 | `এ ক টি _` | 578,361 |
228
+ | 4 | `_ তি নি _` | 473,133 |
229
+ | 5 | `_ ক রা _` | 429,980 |
230
+
231
+ **5-grams (Subword):**
232
+
233
+ | Rank | N-gram | Count |
234
+ |------|--------|-------|
235
+ | 1 | `_ এ ক টি _` | 571,779 |
236
+ | 2 | `_ হ য় । _` | 358,749 |
237
+ | 3 | `র _ জ ��্য _` | 344,350 |
238
+ | 4 | `_ ক রা _ হ` | 325,163 |
239
+ | 5 | `_ ক রে ন ।` | 253,567 |
240
 
241
 
242
  ### Key Findings
243
 
244
+ - **Best Perplexity:** 2-gram (subword) with 2,633
245
  - **Entropy Trend:** Decreases with larger n-grams (more predictable)
246
+ - **Coverage:** Top-1000 patterns cover ~12% of corpus
247
  - **Recommendation:** 4-gram or 5-gram for best predictive performance
248
 
249
  ---
 
251
 
252
  ![Markov Entropy](visualizations/markov_entropy.png)
253
 
254
+ ![Markov Contexts](visualizations/markov_contexts.png)
255
+
256
  ![Markov Branching](visualizations/markov_branching.png)
257
 
258
  ### Results
259
 
260
+ | Context | Variant | Avg Entropy | Perplexity | Branching Factor | Unique Contexts | Predictability |
261
+ |---------|---------|-------------|------------|------------------|-----------------|----------------|
262
+ | **1** | Word | 0.8427 | 1.793 | 11.19 | 2,081,488 | 15.7% |
263
+ | **1** | Subword | 0.9831 | 1.977 | 14.53 | 30,042 | 1.7% |
264
+ | **2** | Word | 0.3490 | 1.274 | 2.13 | 23,273,031 | 65.1% |
265
+ | **2** | Subword | 0.7496 | 1.681 | 6.59 | 436,358 | 25.0% |
266
+ | **3** | Word | 0.1187 | 1.086 | 1.25 | 49,621,155 | 88.1% |
267
+ | **3** | Subword | 0.5931 | 1.508 | 4.11 | 2,877,364 | 40.7% |
268
+ | **4** | Word | 0.0412 🏆 | 1.029 | 1.07 | 61,780,303 | 95.9% |
269
+ | **4** | Subword | 0.5053 | 1.419 | 2.78 | 11,819,297 | 49.5% |
270
 
271
+ ### Generated Text Samples (Word-based)
272
 
273
+ Below are text samples generated from each word-based Markov chain model:
274
 
275
  **Context Size 1:**
276
 
277
+ 1. `এবং পেনাল্টি শুট করা ঐতিহ্যগতভাবে মে তারিখে স্বাগতিক নিউজিল্যান্ড পুরুষ দীর্ঘ এবং ফোকসোনমি সালে u1 ১...`
278
+ 2. `ও বিদ্রোহী দুর্গগুলির ধ্বংসাবশেষ এবং মহিলা ফুটবল ক্লাবের দৃশ্যের মিল মালিক মুম্বাইয়ে গুজরাটি ভাষায়...`
279
+ 3. `হয় যা মামলুকের পদক্ষেপকে ইসরায়েল বেইট শেমেশের কাছে উন্মুক্ত এবং বাবা মাকে ডেকে পিছনে চার্জার কেইস`
280
 
281
  **Context Size 2:**
282
 
283
+ 1. `করা হয় ১৫ নভেম্বর the day of francophonie ২০ মার্চ রাজ্য সরকার মাদুরাইয়ে দুটি আইটি ভিত্তিক সরঞ্জাম...`
284
+ 2. `তথ্যসূত্র বহিঃসংযোগ উপজেলার ইউনিয়ন বিভাগের ইউনিয়ন জেলার ইউনিয়ন বিভাগের ইউনিয়ন জেলার ইউনিয়ন পরিষ...`
285
+ 3. `করা হয়েছিল যে সামাজিক প্রভাবের প্রক্রিয়া যার মাধ্যমে গুগল টক ক্লায়েন্ট তৈরি করেন texier charles r...`
286
 
287
  **Context Size 3:**
288
 
289
+ 1. `থেকে সাল পর্যন্ত শাখাহার ইউনিয়নের পরপর পাঁচমেয়াদে নির্বাচিত চেয়ারম্যান ছিলেন তিনি থেকে সময়কালে অ...`
290
+ 2. `করা হয় এবং এই ডকুমেন্ট সম্মেলনে আ��োচনা হওয়ার পর সেখানেই একটি মাদরাসা প্রতিষ্ঠার ব্যাপারে অভিমত ব্য...`
291
+ 3. `দায়িত্ব পালন করেন যেখানে তিনি দ্বিতীয় স্থান অধিকার করে সমালোচনামূলক প্রতিক্রিয়া রিভিউ অ্যাগ্রিগেট...`
292
 
293
  **Context Size 4:**
294
 
295
+ 1. `তথ্যসূত্র বহিঃসংযোগ জন্ম ব্যক্তি কোরীয় চলচ্চিত্র অভিনেত্রী কোরীয় নারী আইডল কোরীয় নারী মডেল কোরীয়...`
296
+ 2. `সংসদ সদস্য সংসদ সদস্য সংসদ সদস্য সংসদ সদস্য সংসদ সদস্য সংসদ সদস্য সংসদ সদস্য সংসদ সদস্য সংসদ সদস্য স...`
297
+ 3. `হিসেবে দায়িত্ব পালন করেন জাতীয় সংসদের স্পিকার উপরাষ্ট্রপতি নিয়োগের বিধান না থাকায় রাষ্ট্রপতির অব...`
298
+
299
+
300
+ ### Generated Text Samples (Subword-based)
301
+
302
+ Below are text samples generated from each subword-based Markov chain model:
303
+
304
+ **Context Size 1:**
305
+
306
+ 1. `_প্রান্তে_সারকারীতামসদর_শের`
307
+ 2. `র_আবহ_নের_থাকে_জেদেরজা`
308
+ 3. `ন_সাইকে_স_পরিলাক্সিকাশ্মীর_`
309
+
310
+ **Context Size 2:**
311
+
312
+ 1. `র_ভূপৃষ্ঠতলের_জনসংখ্যান_একা`
313
+ 2. `_এবং_ডুবে_ঝুঁকিপূর্ণ_ছিলেন,_`
314
+ 3. `ন_নাট্যধর্মীয়_লেজে_দৃশ্য_চলচ্চি`
315
+
316
+ **Context Size 3:**
317
+
318
+ 1. `_করেন_এবং_সর্বোচ্চ_কানাডাব্যাপী_`
319
+ 2. `এবং_এর_মাত্র_এবং_"প্রক্সিমালি"`
320
+ 3. `_এবং_ওল্ফের_একটি_হল_৬৪-`
321
+
322
+ **Context Size 4:**
323
+
324
+ 1. `_এবং_তাদের_তালিকা_১৬৩_±_০`
325
+ 2. `_একটি_বিরোধ_দেখেন।_সাম্রাজ্যের_`
326
+ 3. `একটি_কাজ_শুরু,_সালেই_প্রায়_৭`
327
 
328
 
329
  ### Key Findings
330
 
331
+ - **Best Predictability:** Context-4 (word) with 95.9% predictability
332
  - **Branching Factor:** Decreases with context size (more deterministic)
333
+ - **Memory Trade-off:** Larger contexts require more storage (11,819,297 contexts)
334
  - **Recommendation:** Context-3 or Context-4 for text generation
335
 
336
  ---
 
346
 
347
  | Metric | Value |
348
  |--------|-------|
349
+ | Vocabulary Size | 838,913 |
350
+ | Total Tokens | 71,898,290 |
351
+ | Mean Frequency | 85.70 |
352
  | Median Frequency | 4 |
353
+ | Frequency Std Dev | 2805.67 |
354
 
355
  ### Most Common Words
356
 
357
  | Rank | Word | Frequency |
358
  |------|------|-----------|
359
+ | 1 | এবং | 1,267,871 |
360
+ | 2 | | 702,980 |
361
+ | 3 | হয় | 618,329 |
362
+ | 4 | করে | 616,816 |
363
+ | 5 | একটি | 586,525 |
364
+ | 6 | তিনি | 495,350 |
365
+ | 7 | করা | 454,721 |
366
+ | 8 | থেকে | 424,445 |
367
+ | 9 | এই | 402,971 |
368
+ | 10 | তার | 388,104 |
369
 
370
  ### Least Common Words (from vocabulary)
371
 
372
  | Rank | Word | Frequency |
373
  |------|------|-----------|
374
+ | 1 | সণ্ডিলা | 2 |
375
+ | 2 | শূকরক্ষেত | 2 |
376
+ | 3 | প্লীপেন | 2 |
377
+ | 4 | মস্‌ম্যান | 2 |
378
+ | 5 | শোরোশ | 2 |
379
+ | 6 | yohanna | 2 |
380
+ | 7 | katanacho | 2 |
381
+ | 8 | শোরোশের | 2 |
382
+ | 9 | ট্রাঞ্চবলের | 2 |
383
+ | 10 | হুলশফ | 2 |
384
 
385
  ### Zipf's Law Analysis
386
 
387
  | Metric | Value |
388
  |--------|-------|
389
+ | Zipf Coefficient | 1.0269 |
390
+ | R² (Goodness of Fit) | 0.987733 |
391
  | Adherence Quality | **excellent** |
392
 
393
  ### Coverage Analysis
394
 
395
  | Top N Words | Coverage |
396
  |-------------|----------|
397
+ | Top 100 | 23.9% |
398
+ | Top 1,000 | 50.1% |
399
+ | Top 5,000 | 71.3% |
400
+ | Top 10,000 | 78.8% |
401
 
402
  ### Key Findings
403
 
404
+ - **Zipf Compliance:** R²=0.9877 indicates excellent adherence to Zipf's law
405
+ - **High Frequency Dominance:** Top 100 words cover 23.9% of corpus
406
+ - **Long Tail:** 828,913 words needed for remaining 21.2% coverage
407
 
408
  ---
409
  ## 5. Word Embeddings Evaluation
 
416
 
417
  ![t-SNE Sentences](visualizations/tsne_sentences.png)
418
 
 
419
 
420
+ ### 5.1 Cross-Lingual Alignment
421
+
422
+ ![Alignment Quality](visualizations/embedding_alignment_quality.png)
423
+
424
+ ![Multilingual t-SNE](visualizations/embedding_tsne_multilingual.png)
425
+
426
+
427
+ ### 5.2 Model Comparison
428
+
429
+ | Model | Dimension | Isotropy | Semantic Density | Alignment R@1 | Alignment R@10 |
430
+ |-------|-----------|----------|------------------|---------------|----------------|
431
+ | **mono_32d** | 32 | 0.8095 🏆 | 0.3709 | N/A | N/A |
432
+ | **mono_64d** | 64 | 0.8011 | 0.2937 | N/A | N/A |
433
+ | **mono_128d** | 128 | 0.7560 | 0.2281 | N/A | N/A |
434
+ | **aligned_32d** | 32 | 0.8095 | 0.3802 | 0.0980 | 0.4600 |
435
+ | **aligned_64d** | 64 | 0.8011 | 0.2992 | 0.2280 | 0.6000 |
436
+ | **aligned_128d** | 128 | 0.7560 | 0.2319 | 0.3880 | 0.7640 |
437
 
438
  ### Key Findings
439
 
440
+ - **Best Isotropy:** mono_32d with 0.8095 (more uniform distribution)
441
+ - **Semantic Density:** Average pairwise similarity of 0.3007. Lower values indicate better semantic separation.
442
+ - **Alignment Quality:** Aligned models achieve up to 38.8% R@1 in cross-lingual retrieval.
443
+ - **Recommendation:** 128d aligned for best cross-lingual performance
444
 
445
  ---
446
+ ## 6. Morphological Analysis (Experimental)
447
+
448
+ This section presents an automated morphological analysis derived from the statistical divergence between word-level and subword-level models. By analyzing where subword predictability spikes and where word-level coverage fails, we can infer linguistic structures without supervised data.
449
+
450
+ ### 6.1 Productivity & Complexity
451
+
452
+ | Metric | Value | Interpretation | Recommendation |
453
+ |--------|-------|----------------|----------------|
454
+ | Productivity Index | **5.000** | High morphological productivity | Reliable analysis |
455
+ | Idiomaticity Gap | **-0.452** | Low formulaic content | - |
456
+
457
+ ### 6.2 Affix Inventory (Productive Units)
458
+
459
+ These are the most productive prefixes and suffixes identified by sampling the vocabulary for global substitutability patterns. A unit is considered an affix if stripping it leaves a valid stem that appears in other contexts.
460
+
461
+ #### Productive Prefixes
462
+ | Prefix | Examples |
463
+ |--------|----------|
464
+
465
+ #### Productive Suffixes
466
+ | Suffix | Examples |
467
+ |--------|----------|
468
+ | `-র` | মোহানপুর, গুরবানীর, শান্তির |
469
+ | `-ের` | সেরাদের, সাইট্রেটের, লিওঁনের |
470
+ | `-ার` | ভোজভোদিনার, স্পেকটার, মনোরোগবিদ্যার |
471
+ | `-কে` | দুঃস্বপ্নকে, ক্লাইনকে, হাংচৌকে |
472
+
473
+ ### 6.3 Bound Stems (Lexical Roots)
474
+
475
+ Bound stems are high-frequency subword units that are semantically cohesive but rarely appear as standalone words. These often correspond to the 'core' of a word that requires inflection or derivation to be valid.
476
+
477
+ | Stem | Cohesion | Substitutability | Examples |
478
+ |------|----------|------------------|----------|
479
+ | `ress` | 3.30x | 93 contexts | press, dress, cress |
480
+ | `nter` | 3.28x | 88 contexts | enter, unter, anter |
481
+ | `atio` | 3.33x | 77 contexts | ratio, ation, natio |
482
+ | `ctio` | 3.38x | 50 contexts | action, lectio, suction |
483
+ | `stor` | 2.96x | 87 contexts | astor, stora, stori |
484
+ | `mber` | 3.07x | 60 contexts | umber, ember, amber |
485
+ | `ence` | 3.40x | 37 contexts | pence, fence, bence |
486
+ | `ersi` | 3.11x | 43 contexts | ersin, persia, persie |
487
+ | `nati` | 3.22x | 34 contexts | natio, nativa, nation |
488
+ | `ical` | 3.23x | 33 contexts | epical, apical, micali |
489
+ | `ieve` | 3.35x | 25 contexts | sieve, lieve, pieve |
490
+ | `embe` | 3.34x | 20 contexts | ember, rember, embers |
491
+
492
+ ### 6.4 Affix Compatibility (Co-occurrence)
493
+
494
+ This table shows which prefixes and suffixes most frequently co-occur on the same stems, revealing the 'stacking' rules of the language's morphology.
495
+
496
+ *No significant affix co-occurrences detected.*
497
+
498
+
499
+ ### 6.5 Recursive Morpheme Segmentation
500
+
501
+ Using **Recursive Hierarchical Substitutability**, we decompose complex words into their constituent morphemes. This approach handles nested affixes (e.g., `prefix-prefix-root-suffix`).
502
+
503
+ | Word | Suggested Split | Confidence | Stem |
504
+ |------|-----------------|------------|------|
505
+ | স্যাপারের | **`স্যাপ-ার-ের`** | 6.0 | `স্যাপ` |
506
+ | ক্রুসেডারের | **`ক্রুসেড-ার-ের`** | 6.0 | `ক্রুসেড` |
507
+ | পরিষদসমূহের | **`পরিষদসমূহ-ের`** | 4.5 | `পরিষদসমূহ` |
508
+ | তন্তুগুলিকে | **`তন্তুগুলি-কে`** | 4.5 | `তন্তুগুলি` |
509
+ | ইতালিয়াসের | **`ইতালিয়াস-ের`** | 4.5 | `ইতালিয়াস` |
510
+ | দ্বিতীয়কে | **`দ্বিতীয়-কে`** | 4.5 | `দ্বিতীয়` |
511
+ | অ্যাসপার্টের | **`অ্যাসপার্ট-ের`** | 4.5 | `অ্যাসপার্ট` |
512
+ | পেটারসেনের | **`পেটারসেন-ের`** | 4.5 | `পেটারসেন` |
513
+ | হার্জেগোভিনাকে | **`হার্জেগোভিনা-কে`** | 4.5 | `হার্জেগোভিনা` |
514
+ | অ্যাক্টিনের | **`অ্যাক্টিন-ের`** | 4.5 | `অ্যাক্টিন` |
515
+ | মাইগ্রেশনের | **`মাইগ্রেশন-ের`** | 4.5 | `মাইগ্রেশন` |
516
+ | এরদোয়ানকে | **`এরদোয়ান-কে`** | 4.5 | `এরদোয়ান` |
517
+ | ক্রীড়াঙ্গণের | **`ক্রীড়াঙ্গণ-ের`** | 4.5 | `ক্রীড়াঙ্গণ` |
518
+ | অ্যাপোপটোসিসের | **`অ্যাপোপটোসিস-ের`** | 4.5 | `অ্যাপোপটোসিস` |
519
+ | জার্নালকে | **`জার্নাল-কে`** | 4.5 | `জার্নাল` |
520
+
521
+ ### 6.6 Linguistic Interpretation
522
+
523
+ > **Automated Insight:**
524
+ The language Bangla shows high morphological productivity. The subword models are significantly more efficient than word models, suggesting a rich system of affixation or compounding.
525
+
526
+ ---
527
+ ## 7. Summary & Recommendations
528
 
529
  ![Performance Dashboard](visualizations/performance_dashboard.png)
530
 
 
532
 
533
  | Component | Recommended | Rationale |
534
  |-----------|-------------|-----------|
535
+ | Tokenizer | **64k BPE** | Best compression (5.04x) |
536
+ | N-gram | **2-gram** | Lowest perplexity (2,633) |
537
+ | Markov | **Context-4** | Highest predictability (95.9%) |
538
  | Embeddings | **100d** | Balanced semantic capture and isotropy |
539
 
540
+
541
  ---
542
  ## Appendix: Metrics Glossary & Interpretation Guide
543
 
 
727
  author = {Kamali, Omar},
728
  title = {Wikilangs: Open NLP Models for Wikipedia Languages},
729
  year = {2025},
730
+ doi = {10.5281/zenodo.18073153},
731
+ publisher = {Zenodo},
732
  url = {https://huggingface.co/wikilangs}
733
  institution = {Omneity Labs}
734
  }
 
744
  - 🤗 Models: [huggingface.co/wikilangs](https://huggingface.co/wikilangs)
745
  - 📊 Data: [wikipedia-monthly](https://huggingface.co/datasets/omarkamali/wikipedia-monthly)
746
  - 👤 Author: [Omar Kamali](https://huggingface.co/omarkamali)
747
+ - 🤝 Sponsor: [Featherless AI](https://featherless.ai)
748
  ---
749
  *Generated by Wikilangs Models Pipeline*
750
 
751
+ *Report Date: 2026-01-07 08:35:42*
models/embeddings/aligned/bn_128d.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:68c66e714e796460e582954847cb35abb587c0db7712e7425e5b035727bf8a71
3
+ size 1522489051
models/embeddings/aligned/bn_128d.meta.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"lang": "bn", "dim": 128, "max_seq_len": 512, "is_aligned": true}
models/embeddings/aligned/bn_128d.projection.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c8e4d1c2fa3f959df7f016741c06c87ad9f287fe383323bc1e2889047af1a37b
3
+ size 65664
models/embeddings/aligned/bn_128d_metadata.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "language": "bn",
3
+ "dimension": 128,
4
+ "version": "aligned",
5
+ "hub_language": "en",
6
+ "seed_vocab_size": 68079,
7
+ "vocab_size": 472687
8
+ }
models/embeddings/aligned/bn_32d.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0539f5f093dd8322c982c9af1d262ccff60369a3317c1f6aa4bff095a116759
3
+ size 391465435
models/embeddings/aligned/bn_32d.meta.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"lang": "bn", "dim": 32, "max_seq_len": 512, "is_aligned": true}
models/embeddings/aligned/bn_32d.projection.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a40a9776f59f0cdfe0b5e0c350ac03106d0e80a652cc8779bd9ec60c0ed7d98
3
+ size 4224
models/embeddings/aligned/bn_32d_metadata.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "language": "bn",
3
+ "dimension": 32,
4
+ "version": "aligned",
5
+ "hub_language": "en",
6
+ "seed_vocab_size": 68079,
7
+ "vocab_size": 472687
8
+ }
models/embeddings/aligned/bn_64d.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a5240606f2e4e32f6b075e98ca2452149f390164f74fb35686ef8a9087536b0
3
+ size 768473307
models/embeddings/aligned/bn_64d.meta.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"lang": "bn", "dim": 64, "max_seq_len": 512, "is_aligned": true}
models/embeddings/aligned/bn_64d.projection.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c9cca8d773a7278be093c697acf22fd4397e79fae7cf2ceb7c92dbb098080cb0
3
+ size 16512
models/embeddings/aligned/bn_64d_metadata.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "language": "bn",
3
+ "dimension": 64,
4
+ "version": "aligned",
5
+ "hub_language": "en",
6
+ "seed_vocab_size": 68079,
7
+ "vocab_size": 472687
8
+ }
models/embeddings/monolingual/bn_128d.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:26e2e2343255830a534f320fde4263aaa50541a543b5fbc99b4a836d1708f6ed
3
- size 1554303761
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:68c66e714e796460e582954847cb35abb587c0db7712e7425e5b035727bf8a71
3
+ size 1522489051
models/embeddings/monolingual/bn_128d_metadata.json CHANGED
@@ -3,11 +3,13 @@
3
  "dimension": 128,
4
  "version": "monolingual",
5
  "training_params": {
6
- "dim": 128,
7
  "min_count": 5,
8
  "window": 5,
9
  "negative": 5,
10
- "epochs": 5
 
 
11
  },
12
- "vocab_size": 502529
13
  }
 
3
  "dimension": 128,
4
  "version": "monolingual",
5
  "training_params": {
6
+ "algorithm": "skipgram",
7
  "min_count": 5,
8
  "window": 5,
9
  "negative": 5,
10
+ "epochs": 5,
11
+ "encoding_method": "rope",
12
+ "dim": 128
13
  },
14
+ "vocab_size": 472687
15
  }
models/embeddings/monolingual/bn_32d.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:911654690f1911e79d553aca3a334439dafa8c950bae483cf0c07d3ea4e1772b
3
- size 400361489
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0539f5f093dd8322c982c9af1d262ccff60369a3317c1f6aa4bff095a116759
3
+ size 391465435
models/embeddings/monolingual/bn_32d_metadata.json CHANGED
@@ -3,11 +3,13 @@
3
  "dimension": 32,
4
  "version": "monolingual",
5
  "training_params": {
6
- "dim": 32,
7
  "min_count": 5,
8
  "window": 5,
9
  "negative": 5,
10
- "epochs": 5
 
 
11
  },
12
- "vocab_size": 502529
13
  }
 
3
  "dimension": 32,
4
  "version": "monolingual",
5
  "training_params": {
6
+ "algorithm": "skipgram",
7
  "min_count": 5,
8
  "window": 5,
9
  "negative": 5,
10
+ "epochs": 5,
11
+ "encoding_method": "rope",
12
+ "dim": 32
13
  },
14
+ "vocab_size": 472687
15
  }
models/embeddings/monolingual/bn_64d.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:94bb8ad1b61a560bfdb17d79f222765c5f109aca6fedef51a4f335af7d665217
3
- size 785008913
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a5240606f2e4e32f6b075e98ca2452149f390164f74fb35686ef8a9087536b0
3
+ size 768473307
models/embeddings/monolingual/bn_64d_metadata.json CHANGED
@@ -3,11 +3,13 @@
3
  "dimension": 64,
4
  "version": "monolingual",
5
  "training_params": {
6
- "dim": 64,
7
  "min_count": 5,
8
  "window": 5,
9
  "negative": 5,
10
- "epochs": 5
 
 
11
  },
12
- "vocab_size": 502529
13
  }
 
3
  "dimension": 64,
4
  "version": "monolingual",
5
  "training_params": {
6
+ "algorithm": "skipgram",
7
  "min_count": 5,
8
  "window": 5,
9
  "negative": 5,
10
+ "epochs": 5,
11
+ "encoding_method": "rope",
12
+ "dim": 64
13
  },
14
+ "vocab_size": 472687
15
  }
models/subword_markov/bn_markov_ctx1_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:13a0b6a9ac2105c1af4f886f06a71d9db23ecb5a32652e818e7c2ddf640bc671
3
- size 772286
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bee913e9687b7aed1884b6f654b28eafdcfe7591f8ac85e3e1f9cab7fb669f30
3
+ size 2979826
models/subword_markov/bn_markov_ctx1_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "context_size": 1,
3
  "variant": "subword",
4
  "language": "bn",
5
- "unique_contexts": 11996,
6
- "total_transitions": 553695182
7
  }
 
2
  "context_size": 1,
3
  "variant": "subword",
4
  "language": "bn",
5
+ "unique_contexts": 30042,
6
+ "total_transitions": 322811994
7
  }
models/subword_markov/bn_markov_ctx2_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e1c81da270a2a82d5b8c625aa355045c0a0670e6228e198544f60acda4472ec5
3
- size 5606771
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52ddbc3da358a1d2de9569e04c85887a7f5c0b6349661ae791f5fbe08b4fb9bc
3
+ size 21164179
models/subword_markov/bn_markov_ctx2_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "context_size": 2,
3
  "variant": "subword",
4
  "language": "bn",
5
- "unique_contexts": 117615,
6
- "total_transitions": 553516718
7
  }
 
2
  "context_size": 2,
3
  "variant": "subword",
4
  "language": "bn",
5
+ "unique_contexts": 436358,
6
+ "total_transitions": 322633866
7
  }
models/subword_markov/bn_markov_ctx3_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a4b88518401525add742d578b9efa13a5511741c545ce0dcc6ceeb6acb7704ab
3
- size 26159855
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55410138c445ba7227950ea53bd23fc17de183b6debb4d55f57ff5feb99dc2cc
3
+ size 99596508
models/subword_markov/bn_markov_ctx3_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "context_size": 3,
3
  "variant": "subword",
4
  "language": "bn",
5
- "unique_contexts": 727384,
6
- "total_transitions": 553338254
7
  }
 
2
  "context_size": 3,
3
  "variant": "subword",
4
  "language": "bn",
5
+ "unique_contexts": 2877364,
6
+ "total_transitions": 322455738
7
  }
models/subword_markov/bn_markov_ctx4_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4c54eb916a5048ffb603a48ea1d6d64f6bfe3b00818a8d1bffa427255daa9c99
3
- size 100901318
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7cc7e1d9e58c4380879adab5ceb35224de567178fe2ff64f9c504c42de2d8b99
3
+ size 342488328
models/subword_markov/bn_markov_ctx4_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "context_size": 4,
3
  "variant": "subword",
4
  "language": "bn",
5
- "unique_contexts": 3549554,
6
- "total_transitions": 553159790
7
  }
 
2
  "context_size": 4,
3
  "variant": "subword",
4
  "language": "bn",
5
+ "unique_contexts": 11819297,
6
+ "total_transitions": 322277610
7
  }
models/subword_ngram/bn_2gram_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:87c44bccc6719de458cb056280276826654f9ecfa18d99048424106a8f1b8f40
3
- size 469388
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fbf8fc71e8d1e830d2fddd16bb11b242c968e902d23bc55680c772166962f303
3
+ size 2349611
models/subword_ngram/bn_2gram_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "n": 2,
3
  "variant": "subword",
4
  "language": "bn",
5
- "unique_ngrams": 33180,
6
- "total_ngrams": 553695182
7
  }
 
2
  "n": 2,
3
  "variant": "subword",
4
  "language": "bn",
5
+ "unique_ngrams": 151712,
6
+ "total_ngrams": 322811994
7
  }
models/subword_ngram/bn_3gram_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:78fb7840741ea86971d4925542c9e6ce6e6c79ac8ce399abba98433705fb070b
3
- size 4114566
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dfcc6ae0e58e0f09d55e8cc261f9c636ca0f30341a8ee4d84f42eda6049d2b39
3
+ size 18658634
models/subword_ngram/bn_3gram_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "n": 3,
3
  "variant": "subword",
4
  "language": "bn",
5
- "unique_ngrams": 328172,
6
- "total_ngrams": 553516718
7
  }
 
2
  "n": 3,
3
  "variant": "subword",
4
  "language": "bn",
5
+ "unique_ngrams": 1149281,
6
+ "total_ngrams": 322633866
7
  }
models/subword_ngram/bn_4gram_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:46b2845e143d5e9021ee5b87cb38819dcdfb00f50601547315a5117b4da42688
3
- size 25302225
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab89edc239619a5c3da3703b8a892f8dc9d9443cb8f0b83b26ef15b3719a24f3
3
+ size 95979511
models/subword_ngram/bn_4gram_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "n": 4,
3
  "variant": "subword",
4
  "language": "bn",
5
- "unique_ngrams": 1934479,
6
- "total_ngrams": 553338254
7
  }
 
2
  "n": 4,
3
  "variant": "subword",
4
  "language": "bn",
5
+ "unique_ngrams": 5668680,
6
+ "total_ngrams": 322455738
7
  }
models/subword_ngram/bn_5gram_subword.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2fe48b6d0b51077cbfd219963c3bfd1ac3d955c94ec27e6c5a4d83301d473044
3
+ size 231529766
models/subword_ngram/bn_5gram_subword_metadata.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "n": 5,
3
+ "variant": "subword",
4
+ "language": "bn",
5
+ "unique_ngrams": 12813291,
6
+ "total_ngrams": 322277610
7
+ }
models/tokenizer/bn_tokenizer_16k.model CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:68e1ac6a6a41154f9552ef7791eb5aaad0d47e5f8c5657a570b191e0f483c20a
3
- size 641106
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c72f276c4670959b69a6b71906a8ebfea694c3627b7dc13630d54b2bd8b0c9e
3
+ size 643727
models/tokenizer/bn_tokenizer_16k.vocab CHANGED
The diff for this file is too large to render. See raw diff
 
models/tokenizer/bn_tokenizer_32k.model CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:39be559e8432dcd4b0837f4c1116b1daaaeb8f85ead3a786b09b36cd27681a88
3
- size 1074049
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3bc92ae339db46a16c14382611a1c0b63b4dae6b9831140d3a80723bc6519fc2
3
+ size 1074282
models/tokenizer/bn_tokenizer_32k.vocab CHANGED
The diff for this file is too large to render. See raw diff
 
models/tokenizer/bn_tokenizer_64k.model CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4d1cb3f78cd4392dd3e19ff9cde695d104459cebc3f75d35d45933142a9489e9
3
- size 1967297
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e4dc9fa0b527e7c29a1b9cce533d08c97606addc1e2e5246e7684f5010cdfd5
3
+ size 1958130
models/tokenizer/bn_tokenizer_64k.vocab CHANGED
The diff for this file is too large to render. See raw diff
 
models/tokenizer/bn_tokenizer_8k.model CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fe2dd93cbc1830854f019c57ff5e0efd8b11fcda823d0bb65cfbbef3ad4eca53
3
- size 433093
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:875e1c45c6faaaba63c82f02e50788d2a18b799f9821901f0d97a1ab73e6643d
3
+ size 436160
models/tokenizer/bn_tokenizer_8k.vocab CHANGED
The diff for this file is too large to render. See raw diff
 
models/vocabulary/bn_vocabulary.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8da3f061defc453cbf1bb0c1e36b511077f05f0d399b060245d1a6a437f8c4a1
3
- size 3901209
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:113c1e6f021ed34e9d055dc2e3a5f3c6c32149d04fa6150c0d28b08de6ccddba
3
+ size 14747596
models/vocabulary/bn_vocabulary_metadata.json CHANGED
@@ -1,16 +1,17 @@
1
  {
2
  "language": "bn",
3
- "vocabulary_size": 250463,
 
4
  "statistics": {
5
- "type_token_ratio": 0.0036646333232905415,
6
  "coverage": {
7
- "top_100": 0.8173536449340773,
8
- "top_1000": 0.9499165937132837,
9
- "top_5000": 0.9795696203113472,
10
- "top_10000": 0.9855947158392181
11
  },
12
- "hapax_count": 489412,
13
- "hapax_ratio": 0.6614793039364758,
14
- "total_documents": 178464
15
  }
16
  }
 
1
  {
2
  "language": "bn",
3
+ "vocabulary_size": 838913,
4
+ "variant": "full",
5
  "statistics": {
6
+ "type_token_ratio": 0.028463334355100994,
7
  "coverage": {
8
+ "top_100": 0.23472311509159952,
9
+ "top_1000": 0.49273622999452293,
10
+ "top_5000": 0.7004163862730208,
11
+ "top_10000": 0.7745431098907019
12
  },
13
+ "hapax_count": 1242930,
14
+ "hapax_ratio": 0.5970334938801821,
15
+ "total_documents": 178128
16
  }
17
  }
models/word_markov/bn_markov_ctx1_word.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3e4832f62be8b99ea4eeb5d4269e7bbe06d6de2b4621946fad64f9ec22957eea
3
- size 38908723
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79fe1bda80924de3abc69e1f62f8a82bf9082d23bf8c9fd44590c2059e2897c2
3
+ size 288772075
models/word_markov/bn_markov_ctx1_word_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "context_size": 1,
3
  "variant": "word",
4
  "language": "bn",
5
- "unique_contexts": 741598,
6
- "total_transitions": 376897683
7
  }
 
2
  "context_size": 1,
3
  "variant": "word",
4
  "language": "bn",
5
+ "unique_contexts": 2081488,
6
+ "total_transitions": 72963092
7
  }
models/word_markov/bn_markov_ctx2_word.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a4ba7c5295c6739716c57443fe46ae2ac3abef527a4acbe3169b854b8d2cda2f
3
- size 117081865
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:20ccc0069fa50aec10ee75ed6ea5351816fba18aef3fb75c4cd6abc5374e784a
3
+ size 1037331421
models/word_markov/bn_markov_ctx2_word_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "context_size": 2,
3
  "variant": "word",
4
  "language": "bn",
5
- "unique_contexts": 4117166,
6
- "total_transitions": 376719219
7
  }
 
2
  "context_size": 2,
3
  "variant": "word",
4
  "language": "bn",
5
+ "unique_contexts": 23273031,
6
+ "total_transitions": 72784964
7
  }