omarkamali commited on
Commit
d49af24
·
verified ·
1 Parent(s): ff6240b

Upload all models and assets for bxr (latest)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. README.md +226 -185
  3. models/embeddings/aligned/bxr_128d.bin +3 -0
  4. models/embeddings/aligned/bxr_128d.meta.json +1 -0
  5. models/embeddings/aligned/bxr_128d.projection.npy +3 -0
  6. models/embeddings/aligned/bxr_128d_metadata.json +8 -0
  7. models/embeddings/aligned/bxr_32d.bin +3 -0
  8. models/embeddings/aligned/bxr_32d.meta.json +1 -0
  9. models/embeddings/aligned/bxr_32d.projection.npy +3 -0
  10. models/embeddings/aligned/bxr_32d_metadata.json +8 -0
  11. models/embeddings/aligned/bxr_64d.bin +3 -0
  12. models/embeddings/aligned/bxr_64d.meta.json +1 -0
  13. models/embeddings/aligned/bxr_64d.projection.npy +3 -0
  14. models/embeddings/aligned/bxr_64d_metadata.json +8 -0
  15. models/embeddings/monolingual/bxr_128d.bin +2 -2
  16. models/embeddings/monolingual/bxr_128d_metadata.json +1 -1
  17. models/embeddings/monolingual/bxr_32d.bin +2 -2
  18. models/embeddings/monolingual/bxr_32d_metadata.json +1 -1
  19. models/embeddings/monolingual/bxr_64d.bin +2 -2
  20. models/embeddings/monolingual/bxr_64d_metadata.json +1 -1
  21. models/subword_markov/bxr_markov_ctx1_subword.parquet +2 -2
  22. models/subword_markov/bxr_markov_ctx1_subword_metadata.json +2 -2
  23. models/subword_markov/bxr_markov_ctx2_subword.parquet +2 -2
  24. models/subword_markov/bxr_markov_ctx2_subword_metadata.json +2 -2
  25. models/subword_markov/bxr_markov_ctx3_subword.parquet +2 -2
  26. models/subword_markov/bxr_markov_ctx3_subword_metadata.json +2 -2
  27. models/subword_markov/bxr_markov_ctx4_subword.parquet +2 -2
  28. models/subword_markov/bxr_markov_ctx4_subword_metadata.json +2 -2
  29. models/subword_ngram/bxr_2gram_subword.parquet +2 -2
  30. models/subword_ngram/bxr_2gram_subword_metadata.json +2 -2
  31. models/subword_ngram/bxr_3gram_subword.parquet +2 -2
  32. models/subword_ngram/bxr_3gram_subword_metadata.json +2 -2
  33. models/subword_ngram/bxr_4gram_subword.parquet +2 -2
  34. models/subword_ngram/bxr_4gram_subword_metadata.json +2 -2
  35. models/subword_ngram/bxr_5gram_subword.parquet +3 -0
  36. models/subword_ngram/bxr_5gram_subword_metadata.json +7 -0
  37. models/tokenizer/bxr_tokenizer_16k.model +2 -2
  38. models/tokenizer/bxr_tokenizer_16k.vocab +0 -0
  39. models/tokenizer/bxr_tokenizer_32k.model +2 -2
  40. models/tokenizer/bxr_tokenizer_32k.vocab +0 -0
  41. models/tokenizer/bxr_tokenizer_64k.model +2 -2
  42. models/tokenizer/bxr_tokenizer_64k.vocab +0 -0
  43. models/tokenizer/bxr_tokenizer_8k.model +2 -2
  44. models/tokenizer/bxr_tokenizer_8k.vocab +0 -0
  45. models/vocabulary/bxr_vocabulary.parquet +2 -2
  46. models/vocabulary/bxr_vocabulary_metadata.json +9 -9
  47. models/word_markov/bxr_markov_ctx1_word.parquet +2 -2
  48. models/word_markov/bxr_markov_ctx1_word_metadata.json +2 -2
  49. models/word_markov/bxr_markov_ctx2_word.parquet +2 -2
  50. models/word_markov/bxr_markov_ctx2_word_metadata.json +2 -2
.gitattributes CHANGED
@@ -39,3 +39,4 @@ visualizations/position_encoding_comparison.png filter=lfs diff=lfs merge=lfs -t
39
  visualizations/tsne_sentences.png filter=lfs diff=lfs merge=lfs -text
40
  visualizations/tsne_words.png filter=lfs diff=lfs merge=lfs -text
41
  visualizations/zipf_law.png filter=lfs diff=lfs merge=lfs -text
 
 
39
  visualizations/tsne_sentences.png filter=lfs diff=lfs merge=lfs -text
40
  visualizations/tsne_words.png filter=lfs diff=lfs merge=lfs -text
41
  visualizations/zipf_law.png filter=lfs diff=lfs merge=lfs -text
42
+ visualizations/embedding_tsne_multilingual.png filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,6 +1,6 @@
1
  ---
2
  language: bxr
3
- language_name: BXR
4
  language_family: mongolic
5
  tags:
6
  - wikilangs
@@ -10,11 +10,21 @@ tags:
10
  - n-gram
11
  - markov
12
  - wikipedia
 
 
 
 
 
 
 
 
 
 
13
  - monolingual
14
  - family-mongolic
15
  license: mit
16
  library_name: wikilangs
17
- pipeline_tag: feature-extraction
18
  datasets:
19
  - omarkamali/wikipedia-monthly
20
  dataset_info:
@@ -23,20 +33,20 @@ dataset_info:
23
  metrics:
24
  - name: best_compression_ratio
25
  type: compression
26
- value: 4.390
27
  - name: best_isotropy
28
  type: isotropy
29
- value: 0.8916
30
  - name: vocabulary_size
31
  type: vocab
32
  value: 0
33
  generated: 2026-01-03
34
  ---
35
 
36
- # BXR - Wikilangs Models
37
  ## Comprehensive Research Report & Full Ablation Study
38
 
39
- This repository contains NLP models trained and evaluated by Wikilangs, specifically on **BXR** Wikipedia data.
40
  We analyze tokenizers, n-gram models, Markov chains, vocabulary statistics, and word embeddings.
41
 
42
  ## 📋 Repository Contents
@@ -60,7 +70,7 @@ We analyze tokenizers, n-gram models, Markov chains, vocabulary statistics, and
60
  - [3. Markov Chain Evaluation](#3-markov-chain-evaluation)
61
  - [4. Vocabulary Analysis](#4-vocabulary-analysis)
62
  - [5. Word Embeddings Evaluation](#5-word-embeddings-evaluation)
63
- - [6. Morphological Analysis (Experimental)](#6-morphological-analysis)
64
  - [7. Summary & Recommendations](#7-summary--recommendations)
65
  - [Metrics Glossary](#appendix-metrics-glossary--interpretation-guide)
66
  - [Visualizations Index](#visualizations-index)
@@ -80,47 +90,47 @@ We analyze tokenizers, n-gram models, Markov chains, vocabulary statistics, and
80
 
81
  | Vocab Size | Compression | Avg Token Len | UNK Rate | Total Tokens |
82
  |------------|-------------|---------------|----------|--------------|
83
- | **8k** | 3.450x | 3.45 | 0.1418% | 628,340 |
84
- | **16k** | 3.842x | 3.84 | 0.1579% | 564,308 |
85
- | **32k** | 4.148x | 4.15 | 0.1705% | 522,647 |
86
- | **64k** | 4.390x 🏆 | 4.39 | 0.1804% | 493,909 |
87
 
88
  ### Tokenization Examples
89
 
90
  Below are sample sentences tokenized with each vocabulary size:
91
 
92
- **Sample 1:** `Цзяинь - Ород Википеэдийн Үбэр Монголой долоо хоногой үгүүлэл. Мүн үзэхэ Үбэр Мо...`
93
 
94
  | Vocab | Tokens | Count |
95
  |-------|--------|-------|
96
- | 8k | `▁цз я инь ▁- ▁ород ▁википеэдийн ▁үбэр ▁монголой ▁долоо ▁хоногой ... (+8 more)` | 18 |
97
- | 16k | `▁цз я инь ▁- ▁ород ▁википеэдийн ▁үбэр ▁монголой ▁долоо ▁хоногой ... (+8 more)` | 18 |
98
- | 32k | `▁цзя инь ▁- ▁ород ▁википеэдийн ▁үбэр ▁монголой ▁долоо ▁хоногой ▁үгүүлэл ... (+7 more)` | 17 |
99
- | 64k | `▁цзяинь ▁- ▁ород ▁википеэдийн ▁үбэр ▁монголой ▁долоо ▁хоногой ▁үгүүлэл . ... (+6 more)` | 16 |
100
 
101
- **Sample 2:** `Мобилизаци гээшэ зэбсэгтэ хүсэниие энхэ тайбангай байдалһаань дайнай байдалда ор...`
102
 
103
  | Vocab | Tokens | Count |
104
  |-------|--------|-------|
105
- | 8k | `▁м об ил изаци ▁гээшэ ▁зэбсэгтэ ▁хүсэн иие ▁энхэ ▁тайбан ... (+11 more)` | 21 |
106
- | 16k | `▁м об ил изаци ▁гээшэ ▁зэбсэгтэ ▁хүсэниие ▁энх�� ▁тайбан гай ... (+9 more)` | 19 |
107
- | 32k | `▁м обилизаци ▁гээшэ ▁зэбсэгтэ ▁хүсэниие ▁энхэ ▁тайбангай ▁байдалһаань ▁дайнай ▁байдалда ... (+5 more)` | 15 |
108
- | 64k | `▁мобилизаци ▁гээшэ ▁зэбсэгтэ ▁хүсэниие ▁энхэ ▁тайбангай ▁байдалһаань ▁дайнай ▁байдалда ▁оруулха ... (+4 more)` | 14 |
109
 
110
- **Sample 3:** `Гильзэбуугай һомоной түмэр патрон. Зүүлтэ зэбсэг`
111
 
112
  | Vocab | Tokens | Count |
113
  |-------|--------|-------|
114
- | 8k | `▁г иль зэ ▁— ▁буу гай ▁һом оной ▁түмэр ▁патр ... (+4 more)` | 14 |
115
- | 16k | `▁г иль зэ ▁— ▁буу гай ▁һомоной ▁түмэр ▁патр он ... (+3 more)` | 13 |
116
- | 32k | `▁г иль зэ ▁— ▁буу гай ▁һомоной ▁түмэр ▁патр он ... (+3 more)` | 13 |
117
- | 64k | `▁г иль зэ ▁— ▁буугай ▁һомоной ▁түмэр ▁патрон . ▁зүүлтэ ... (+1 more)` | 11 |
118
 
119
 
120
  ### Key Findings
121
 
122
- - **Best Compression:** 64k achieves 4.390x compression
123
- - **Lowest UNK Rate:** 8k with 0.1418% unknown tokens
124
  - **Trade-off:** Larger vocabularies improve compression but increase model size
125
  - **Recommendation:** 32k vocabulary provides optimal balance for production use
126
 
@@ -137,12 +147,14 @@ Below are sample sentences tokenized with each vocabulary size:
137
 
138
  | N-gram | Variant | Perplexity | Entropy | Unique N-grams | Top-100 Coverage | Top-1000 Coverage |
139
  |--------|---------|------------|---------|----------------|------------------|-------------------|
140
- | **2-gram** | Word | 4,169 | 12.03 | 8,128 | 19.5% | 49.4% |
141
- | **2-gram** | Subword | 452 🏆 | 8.82 | 3,823 | 56.9% | 96.7% |
142
- | **3-gram** | Word | 3,724 | 11.86 | 7,805 | 24.5% | 47.7% |
143
- | **3-gram** | Subword | 3,736 | 11.87 | 29,340 | 20.6% | 62.1% |
144
- | **4-gram** | Word | 7,537 | 12.88 | 14,616 | 19.0% | 34.7% |
145
- | **4-gram** | Subword | 18,031 | 14.14 | 124,835 | 9.4% | 34.5% |
 
 
146
 
147
  ### Top 5 N-grams by Size
148
 
@@ -150,68 +162,88 @@ Below are sample sentences tokenized with each vocabulary size:
150
 
151
  | Rank | N-gram | Count |
152
  |------|--------|-------|
153
- | 1 | `энэ үдэр` | 1,070 |
154
- | 2 | `гү али` | 1,030 |
155
- | 3 | `of the` | 465 |
156
- | 4 | `байна энэ` | 415 |
157
  | 5 | `бүгэдэ найрамдаха` | 396 |
158
 
159
  **3-grams (Word):**
160
 
161
  | Rank | N-gram | Count |
162
  |------|--------|-------|
163
- | 1 | `үдэр наһа бараһаниинь` | 353 |
164
- | 2 | `үдэр тохёоһон үйлэ` | 353 |
165
- | 3 | `энэ үдэр түрэһэниинь` | 353 |
166
- | 4 | `үйлэ ябадалай жагсаалта` | 353 |
167
- | 5 | `энэ үдэр тохёоһон` | 353 |
168
 
169
  **4-grams (Word):**
170
 
171
  | Rank | N-gram | Count |
172
  |------|--------|-------|
173
- | 1 | `энэ үдэр наһа бараһаниинь` | 353 |
174
- | 2 | `тохёоһон үйлэ ябадалай жагсаалта` | 353 |
175
- | 3 | `үдэр тохёоһон үйлэ ябадалай` | 353 |
176
- | 4 | `энэ үдэр тохёоһон үйлэ` | 353 |
177
- | 5 | `энэ үдэрэй тэмдэглэлтэ баяр` | 345 |
 
 
 
 
 
 
 
 
 
 
178
 
179
  **2-grams (Subword):**
180
 
181
  | Rank | N-gram | Count |
182
  |------|--------|-------|
183
- | 1 | `н _` | 82,295 |
184
- | 2 | `й _` | 56,691 |
185
- | 3 | `_ б` | 54,353 |
186
- | 4 | `_ х` | 50,092 |
187
- | 5 | `а й` | 48,574 |
188
 
189
  **3-grams (Subword):**
190
 
191
  | Rank | N-gram | Count |
192
  |------|--------|-------|
193
- | 1 | `а й _` | 24,558 |
194
- | 2 | `_ б а` | 24,246 |
195
- | 3 | `ы н _` | 18,435 |
196
- | 4 | `э й _` | 17,416 |
197
- | 5 | `а н _` | 16,805 |
198
 
199
  **4-grams (Subword):**
200
 
201
  | Rank | N-gram | Count |
202
  |------|--------|-------|
203
- | 1 | `_ б а й` | 12,907 |
204
- | 2 | `_ б о л` | 11,173 |
205
- | 3 | `б о л о` | 9,002 |
206
- | 4 | `и и н _` | 6,889 |
207
- | 5 | `_ у л а` | 6,870 |
 
 
 
 
 
 
 
 
 
 
208
 
209
 
210
  ### Key Findings
211
 
212
  - **Best Perplexity:** 2-gram (subword) with 452
213
  - **Entropy Trend:** Decreases with larger n-grams (more predictable)
214
- - **Coverage:** Top-1000 patterns cover ~35% of corpus
215
  - **Recommendation:** 4-gram or 5-gram for best predictive performance
216
 
217
  ---
@@ -227,14 +259,14 @@ Below are sample sentences tokenized with each vocabulary size:
227
 
228
  | Context | Variant | Avg Entropy | Perplexity | Branching Factor | Unique Contexts | Predictability |
229
  |---------|---------|-------------|------------|------------------|-----------------|----------------|
230
- | **1** | Word | 0.7391 | 1.669 | 4.14 | 92,909 | 26.1% |
231
- | **1** | Subword | 0.8623 | 1.818 | 5.69 | 2,141 | 13.8% |
232
- | **2** | Word | 0.1430 | 1.104 | 1.26 | 383,260 | 85.7% |
233
- | **2** | Subword | 0.8174 | 1.762 | 5.04 | 12,176 | 18.3% |
234
- | **3** | Word | 0.0340 | 1.024 | 1.05 | 482,888 | 96.6% |
235
- | **3** | Subword | 0.7977 | 1.738 | 3.77 | 61,348 | 20.2% |
236
- | **4** | Word | 0.0111 🏆 | 1.008 | 1.02 | 504,904 | 98.9% |
237
- | **4** | Subword | 0.5768 | 1.491 | 2.40 | 230,966 | 42.3% |
238
 
239
  ### Generated Text Samples (Word-based)
240
 
@@ -242,27 +274,27 @@ Below are text samples generated from each word-based Markov chain model:
242
 
243
  **Context Size 1:**
244
 
245
- 1. `ба үлэнтэй шэлэ нюруу шулуун харшанууд гэхэ мэтэ болобош 1 хушааһан 4 зуун орост нуран унажа`
246
- 2. `юм зүүлтэ гадаада ба хүн гэжэ намые арадай хуралай депутатаар һунгагдаһан юрэнхылэгшэ байгаа тула тэ...`
247
- 3. `энэ үедэ мадрид мадридынь шэнэ үгэнүүд бии гал носоохо гал задагай агаарта гү али алинда гарза`
248
 
249
  **Context Size 2:**
250
 
251
- 1. `энэ үдэр тохёоһон үйлэ ябадалай жагсаалта энэ үдэр түрэһэниинь оной урда үе энэ үдэр тохёоһон үйлэ я...`
252
- 2. `гү али бэеын дархалалай харюу урбалаар янза бүриин үнгэтэй улаан ногоон шара г м түлэб хиинүүдынь хи...`
253
- 3. `of the american library association energy data statistics for russia from the principles of water i...`
254
 
255
  **Context Size 3:**
256
 
257
- 1. `энэ үдэр түрэһэниинь энэ үдэр наһа бараһаниинь николай островский зүблэлтэ зохёолшо как закалялась с...`
258
- 2. `тохёоһон үйлэ ябадалай жагсаалта энэ үдэр түрэһэниинь энэ үдэр наһа бараһаниинь энэ үдэрэй тэмдэглэл...`
259
- 3. `энэ үдэр тохёоһон үйлэ ябадалай жагсаалта энэ үдэр түрэһэниинь энэ үдэр наһа бараһаниинь энэ үдэрэй ...`
260
 
261
  **Context Size 4:**
262
 
263
- 1. `үдэр тохёоһон үйлэ ябадалай жагсаалта энэ үдэр түрэһэниинь энэ үдэр наһа бараһаниинь энэ үдэрэй тэмд...`
264
- 2. `энэ үдэр тохёоһон үйлэ ябадалай жагсаалта энэ үдэр түрэһэниинь энэ үдэр наһа бараһаниинь энэ үдэрэй ...`
265
- 3. `тохёоһон үйлэ ябадалай жагсаалта энэ үдэр түрэһэниинь оной урда үе энэ үдэр наһа бараһаниинь энэ үдэ...`
266
 
267
 
268
  ### Generated Text Samples (Subword-based)
@@ -271,34 +303,34 @@ Below are text samples generated from each subword-based Markov chain model:
271
 
272
  **Context Size 1:**
273
 
274
- 1. `_raps_s_бон_аали`
275
- 2. `аандэрыноданаай_`
276
- 3. `эругэнь_ой_оте_м`
277
 
278
  **Context Size 2:**
279
 
280
- 1. `н_хүн_юм.,_5_бари`
281
- 2. `й_(ганые_/kazano!`
282
- 3. `_бай_һар_180—512_`
283
 
284
  **Context Size 3:**
285
 
286
- 1. `ай_ботар_(үндэ_үед`
287
- 2. `_баярын,_камын_ург`
288
- 3. `ын_5-дуңма_хэрэ_өө`
289
 
290
  **Context Size 4:**
291
 
292
- 1. `_байлгаха_агналда_х`
293
- 2. `_боложо_уласые_байр`
294
- 3. `болон_тэнгисангуй_б`
295
 
296
 
297
  ### Key Findings
298
 
299
  - **Best Predictability:** Context-4 (word) with 98.9% predictability
300
  - **Branching Factor:** Decreases with context size (more deterministic)
301
- - **Memory Trade-off:** Larger contexts require more storage (230,966 contexts)
302
  - **Recommendation:** Context-3 or Context-4 for text generation
303
 
304
  ---
@@ -314,48 +346,48 @@ Below are text samples generated from each subword-based Markov chain model:
314
 
315
  | Metric | Value |
316
  |--------|-------|
317
- | Vocabulary Size | 36,185 |
318
- | Total Tokens | 491,809 |
319
- | Mean Frequency | 13.59 |
320
  | Median Frequency | 3 |
321
- | Frequency Std Dev | 73.56 |
322
 
323
  ### Most Common Words
324
 
325
  | Rank | Word | Frequency |
326
  |------|------|-----------|
327
- | 1 | ба | 3,838 |
328
- | 2 | юм | 3,200 |
329
- | 3 | энэ | 3,020 |
330
- | 4 | ондо | 2,873 |
331
- | 5 | болон | 2,652 |
332
- | 6 | оной | 2,566 |
333
- | 7 | байна | 2,566 |
334
- | 8 | улас | 2,455 |
335
- | 9 | the | 2,159 |
336
- | 10 | of | 2,042 |
337
 
338
  ### Least Common Words (from vocabulary)
339
 
340
  | Rank | Word | Frequency |
341
  |------|------|-----------|
342
- | 1 | үүсэбэринүүд | 2 |
343
- | 2 | ᠮᠠᠨᠠᠶ | 2 |
344
- | 3 | ᠲᠠᠢ | 2 |
345
- | 4 | ᠮᠣᠩᠭᠤᠯ | 2 |
346
- | 5 | ᠤᠷᠤᠨ | 2 |
347
- | 6 | ᠮᠢᠨᠢ | 2 |
348
- | 7 | ᠦᠷ | 2 |
349
- | 8 | ᠵᠢᠷᠭᠠᠯ | 2 |
350
- | 9 | дүхэригтэй | 2 |
351
- | 10 | исибагай | 2 |
352
 
353
  ### Zipf's Law Analysis
354
 
355
  | Metric | Value |
356
  |--------|-------|
357
- | Zipf Coefficient | 0.9662 |
358
- | R² (Goodness of Fit) | 0.993759 |
359
  | Adherence Quality | **excellent** |
360
 
361
  ### Coverage Analysis
@@ -363,15 +395,15 @@ Below are text samples generated from each subword-based Markov chain model:
363
  | Top N Words | Coverage |
364
  |-------------|----------|
365
  | Top 100 | 22.2% |
366
- | Top 1,000 | 52.2% |
367
- | Top 5,000 | 74.6% |
368
- | Top 10,000 | 84.1% |
369
 
370
  ### Key Findings
371
 
372
- - **Zipf Compliance:** R²=0.9938 indicates excellent adherence to Zipf's law
373
  - **High Frequency Dominance:** Top 100 words cover 22.2% of corpus
374
- - **Long Tail:** 26,185 words needed for remaining 15.9% coverage
375
 
376
  ---
377
  ## 5. Word Embeddings Evaluation
@@ -387,37 +419,40 @@ Below are text samples generated from each subword-based Markov chain model:
387
 
388
  ### 5.1 Cross-Lingual Alignment
389
 
390
- > *Note: Multilingual alignment visualization not available for this language.*
 
 
391
 
392
 
393
  ### 5.2 Model Comparison
394
 
395
  | Model | Dimension | Isotropy | Semantic Density | Alignment R@1 | Alignment R@10 |
396
  |-------|-----------|----------|------------------|---------------|----------------|
397
- | **mono_32d** | 32 | 0.8916 🏆 | 0.3371 | N/A | N/A |
398
- | **mono_64d** | 64 | 0.8046 | 0.2601 | N/A | N/A |
399
- | **mono_128d** | 128 | 0.3726 | 0.2357 | N/A | N/A |
 
 
 
400
 
401
  ### Key Findings
402
 
403
- - **Best Isotropy:** mono_32d with 0.8916 (more uniform distribution)
404
- - **Semantic Density:** Average pairwise similarity of 0.2776. Lower values indicate better semantic separation.
405
- - **Alignment Quality:** No aligned models evaluated in this run.
406
  - **Recommendation:** 128d aligned for best cross-lingual performance
407
 
408
  ---
409
  ## 6. Morphological Analysis (Experimental)
410
 
411
- > ⚠️ **Warning:** This language shows low morphological productivity. The statistical signals used for this analysis may be noisy or less reliable than for morphologically rich languages.
412
-
413
  This section presents an automated morphological analysis derived from the statistical divergence between word-level and subword-level models. By analyzing where subword predictability spikes and where word-level coverage fails, we can infer linguistic structures without supervised data.
414
 
415
  ### 6.1 Productivity & Complexity
416
 
417
  | Metric | Value | Interpretation | Recommendation |
418
  |--------|-------|----------------|----------------|
419
- | Productivity Index | **0.000** | Low morphological productivity | ⚠️ Likely unreliable |
420
- | Idiomaticity Gap | **-1.000** | Low formulaic content | - |
421
 
422
  ### 6.2 Affix Inventory (Productive Units)
423
 
@@ -426,19 +461,20 @@ These are the most productive prefixes and suffixes identified by sampling the v
426
  #### Productive Prefixes
427
  | Prefix | Examples |
428
  |--------|----------|
429
- | `-ба` | байхдаа, балнад, байһан |
 
430
 
431
  #### Productive Suffixes
432
  | Suffix | Examples |
433
  |--------|----------|
434
- | `-н` | хилын, биотехнологиин, зайдан |
435
- | `-й` | феодорой, намуудай, зангай |
436
- | `-ай` | намуудай, зангай, дарангылалай |
437
- | `-ан` | зайдан, хааншалһан, буруудхан |
438
- | `-эй` | сэнтэй, клэй, тэригүүдэй |
439
- | `-ын` | хилын, доржын, эмнэлгын |
440
- | `-ые` | хүүгэдые, различные, вермахтые |
441
- | `-эн` | үйһэн, нэмэгдэһэн, дэбжүүлэн |
442
 
443
  ### 6.3 Bound Stems (Lexical Roots)
444
 
@@ -446,18 +482,18 @@ Bound stems are high-frequency subword units that are semantically cohesive but
446
 
447
  | Stem | Cohesion | Substitutability | Examples |
448
  |------|----------|------------------|----------|
449
- | `анай` | 1.88x | 75 contexts | ганай, манай, ханай |
450
- | `гуул` | 1.80x | 67 contexts | уугуул, агуулга, хайгуул |
451
- | `эгдэ` | 1.65x | 93 contexts | жэгдэ, нэгдэн, нэгдэл |
452
- | `азар` | 2.38x | 21 contexts | газар, базар, лазарь |
453
- | `дэһэ` | 1.85x | 44 contexts | үдэһэн, гэдэһэ, үндэһэ |
454
- | `энэй` | 1.75x | 53 contexts | сэнэй, үгэнэй, үһэнэй |
455
- | `эдэг` | 1.70x | 57 contexts | гэдэг, хэдэг, ерэдэг |
456
- | `алай` | 1.78x | 47 contexts | далай, малай, һалай |
457
- | `ниин` | 1.85x | 40 contexts | ниинь, даниин, линиин |
458
- | `нууд` | 1.62x | 56 contexts | онууд, орнууд, ионууд |
459
- | `үндэ` | 1.75x | 39 contexts | һүндэ, хүндэ, үндэр |
460
- | `айда` | 1.77x | 36 contexts | сайда, дайда, зайда |
461
 
462
  ### 6.4 Affix Compatibility (Co-occurrence)
463
 
@@ -465,13 +501,16 @@ This table shows which prefixes and suffixes most frequently co-occur on the sam
465
 
466
  | Prefix | Suffix | Frequency | Examples |
467
  |--------|--------|-----------|----------|
468
- | `-ба` | `-й` | 38 words | байнхэй, баримталалай |
469
- | `-ба` | `-ай` | 29 words | баримталалай, баталгаатай |
470
- | `-ба` | `-н` | 27 words | балжан, байн |
471
- | `-ба` | `-ан` | 17 words | балжан, барилгашан |
472
- | `-ба` | `-ые` | 13 words | байрлалые, баримтые |
473
- | `-ба` | `-ын` | 4 words | байгуулгын, багамын |
474
- | `-ба` | `-эй` | 1 words | байнхэй |
 
 
 
475
 
476
  ### 6.5 Recursive Morpheme Segmentation
477
 
@@ -479,26 +518,28 @@ Using **Recursive Hierarchical Substitutability**, we decompose complex words in
479
 
480
  | Word | Suggested Split | Confidence | Stem |
481
  |------|-----------------|------------|------|
482
- | түүхэшэдые | **`түүхэшэд-ые`** | 4.5 | `түүхэшэд` |
483
- | түшэмэлые | **`түшэмэл-ые`** | 4.5 | `түшэмэл` |
484
- | дамжуулгануудые | **`дамжуулганууд-ые`** | 4.5 | `дамжуулганууд` |
485
- | далайшадай | **`далайшад-ай`** | 4.5 | `далайшад` |
486
- | хубисалай | **`хубисал-ай`** | 4.5 | `хубисал` |
487
- | ниигэмүүдэй | **`ниигэмүүд-эй`** | 4.5 | `ниигэмүүд` |
488
- | хэлэгшэдэй | **`хэлэгшэд-эй`** | 4.5 | `хэлэгшэд` |
489
- | таряашадай | **`таряашад-ай`** | 4.5 | `таряашад` |
490
- | магадлалай | **`магадлал-ай`** | 4.5 | `магадлал` |
491
- | тогтоһоные | **`тогтоһон-ые`** | 4.5 | `тогтоһон` |
492
- | буурсагые | **`буурсаг-ые`** | 4.5 | `буурсаг` |
493
- | юумэнүүдые | **`юумэнүүд-ые`** | 4.5 | `юумэнүүд` |
494
- | дашинимаевай | **`дашинимаев-ай`** | 4.5 | `дашинимаев` |
495
- | найрамдалай | **`найрамдал-ай`** | 4.5 | `найрамдал` |
496
- | зохёолуудые | **`зохёолууд-ые`** | 4.5 | `зохёолууд` |
497
 
498
  ### 6.6 Linguistic Interpretation
499
 
500
  > **Automated Insight:**
501
- The language BXR appears to be more isolating or has a highly fixed vocabulary. Word-level models perform nearly as well as subword models, indicating fewer productive morphological processes.
 
 
502
 
503
  ---
504
  ## 7. Summary & Recommendations
@@ -509,7 +550,7 @@ The language BXR appears to be more isolating or has a highly fixed vocabulary.
509
 
510
  | Component | Recommended | Rationale |
511
  |-----------|-------------|-----------|
512
- | Tokenizer | **64k BPE** | Best compression (4.39x) |
513
  | N-gram | **2-gram** | Lowest perplexity (452) |
514
  | Markov | **Context-4** | Highest predictability (98.9%) |
515
  | Embeddings | **100d** | Balanced semantic capture and isotropy |
@@ -725,4 +766,4 @@ MIT License - Free for academic and commercial use.
725
  ---
726
  *Generated by Wikilangs Models Pipeline*
727
 
728
- *Report Date: 2026-01-03 09:00:32*
 
1
  ---
2
  language: bxr
3
+ language_name: Russia Buriat
4
  language_family: mongolic
5
  tags:
6
  - wikilangs
 
10
  - n-gram
11
  - markov
12
  - wikipedia
13
+ - feature-extraction
14
+ - sentence-similarity
15
+ - tokenization
16
+ - n-grams
17
+ - markov-chain
18
+ - text-mining
19
+ - fasttext
20
+ - babelvec
21
+ - vocabulous
22
+ - vocabulary
23
  - monolingual
24
  - family-mongolic
25
  license: mit
26
  library_name: wikilangs
27
+ pipeline_tag: text-generation
28
  datasets:
29
  - omarkamali/wikipedia-monthly
30
  dataset_info:
 
33
  metrics:
34
  - name: best_compression_ratio
35
  type: compression
36
+ value: 4.402
37
  - name: best_isotropy
38
  type: isotropy
39
+ value: 0.9019
40
  - name: vocabulary_size
41
  type: vocab
42
  value: 0
43
  generated: 2026-01-03
44
  ---
45
 
46
+ # Russia Buriat - Wikilangs Models
47
  ## Comprehensive Research Report & Full Ablation Study
48
 
49
+ This repository contains NLP models trained and evaluated by Wikilangs, specifically on **Russia Buriat** Wikipedia data.
50
  We analyze tokenizers, n-gram models, Markov chains, vocabulary statistics, and word embeddings.
51
 
52
  ## 📋 Repository Contents
 
70
  - [3. Markov Chain Evaluation](#3-markov-chain-evaluation)
71
  - [4. Vocabulary Analysis](#4-vocabulary-analysis)
72
  - [5. Word Embeddings Evaluation](#5-word-embeddings-evaluation)
73
+ - [6. Morphological Analysis (Experimental)](#6--morphological-analysis-experimental)
74
  - [7. Summary & Recommendations](#7-summary--recommendations)
75
  - [Metrics Glossary](#appendix-metrics-glossary--interpretation-guide)
76
  - [Visualizations Index](#visualizations-index)
 
90
 
91
  | Vocab Size | Compression | Avg Token Len | UNK Rate | Total Tokens |
92
  |------------|-------------|---------------|----------|--------------|
93
+ | **8k** | 3.459x | 3.46 | 0.1450% | 616,507 |
94
+ | **16k** | 3.854x | 3.86 | 0.1615% | 553,408 |
95
+ | **32k** | 4.159x | 4.16 | 0.1743% | 512,788 |
96
+ | **64k** | 4.402x 🏆 | 4.40 | 0.1845% | 484,538 |
97
 
98
  ### Tokenization Examples
99
 
100
  Below are sample sentences tokenized with each vocabulary size:
101
 
102
+ **Sample 1:** `Мэйси - Ород Википеэдийн Үбэр Монголой долоо хоногой үгүүлэл. Мүн үзэхэ Үбэр Мон...`
103
 
104
  | Vocab | Tokens | Count |
105
  |-------|--------|-------|
106
+ | 8k | `▁мэй си ▁- ▁ород ▁википеэдийн ▁үбэр ▁монголой ▁долоо ▁хоногой ▁үгүүлэл ... (+7 more)` | 17 |
107
+ | 16k | `▁мэй си ▁- ▁ород ▁википеэдийн ▁үбэр ▁монголой ▁долоо ▁хоногой ▁үгүүлэл ... (+7 more)` | 17 |
108
+ | 32k | `▁мэй си ▁- ▁ород ▁википеэдийн ▁үбэр ▁монголой ▁долоо ▁хоногой ▁үгүүлэл ... (+7 more)` | 17 |
109
+ | 64k | `▁мэйси ▁- ▁ород ▁википеэдийн ▁үбэр ▁монголой ▁долоо ▁хоногой ▁үгүүлэл . ... (+6 more)` | 16 |
110
 
111
+ **Sample 2:** `Уһан далайн сэрэгэй авиаци уһан соо бууха ба уһан дээрэһээ ниидэжэ гараха онго...`
112
 
113
  | Vocab | Tokens | Count |
114
  |-------|--------|-------|
115
+ | 8k | `▁уһан ▁далайн ▁сэрэгэй ▁ав иа ци ▁— ▁уһан ▁соо ▁буу ... (+16 more)` | 26 |
116
+ | 16k | `▁уһан ▁далайн ▁сэрэгэй ▁авиа ци ▁— ▁уһан ▁соо ▁бууха ▁ба ... (+13 more)` | 23 |
117
+ | 32k | `▁уһан ▁далайн ▁сэрэгэй ▁авиаци ▁— ▁уһан ▁соо ▁бууха ▁ба ▁уһан ... (+12 more)` | 22 |
118
+ | 64k | `▁уһан ▁далайн ▁сэрэгэй ▁авиаци ▁— ▁уһан ▁соо ▁бууха ▁ба ▁уһан ... (+12 more)` | 22 |
119
 
120
+ **Sample 3:** `Денонсацинэгэ гүрэнэй нүгөө гүрэндэ өөр—хоорондохи ябажа байгаа хэрээ, хэлсээ...`
121
 
122
  | Vocab | Tokens | Count |
123
  |-------|--------|-------|
124
+ | 8k | `▁д ен он са ци ▁— ▁нэгэ ▁гүрэнэй ▁нүгөө ▁гүрэндэ ... (+16 more)` | 26 |
125
+ | 16k | `▁ден он са ци ▁— ▁нэгэ ▁гүрэнэй ▁нүгөө ▁гүрэндэ ▁өөр ... (+14 more)` | 24 |
126
+ | 32k | `▁ден он са ци ▁— ▁нэгэ ▁гүрэнэй ▁нүгөө ▁гүрэндэ ▁өөр ... (+14 more)` | 24 |
127
+ | 64k | `▁денонсаци ▁— ▁нэгэ ▁гүрэнэй ▁нүгөө ▁гүрэндэ ▁өөр хоорондохи ▁ябажа ... (+9 more)` | 19 |
128
 
129
 
130
  ### Key Findings
131
 
132
+ - **Best Compression:** 64k achieves 4.402x compression
133
+ - **Lowest UNK Rate:** 8k with 0.1450% unknown tokens
134
  - **Trade-off:** Larger vocabularies improve compression but increase model size
135
  - **Recommendation:** 32k vocabulary provides optimal balance for production use
136
 
 
147
 
148
  | N-gram | Variant | Perplexity | Entropy | Unique N-grams | Top-100 Coverage | Top-1000 Coverage |
149
  |--------|---------|------------|---------|----------------|------------------|-------------------|
150
+ | **2-gram** | Word | 4,087 | 12.00 | 8,036 | 19.8% | 49.7% |
151
+ | **2-gram** | Subword | 452 🏆 | 8.82 | 3,815 | 56.9% | 96.7% |
152
+ | **3-gram** | Word | 3,571 | 11.80 | 7,655 | 25.2% | 48.6% |
153
+ | **3-gram** | Subword | 3,726 | 11.86 | 29,176 | 20.6% | 62.2% |
154
+ | **4-gram** | Word | 7,283 | 12.83 | 14,462 | 19.6% | 35.4% |
155
+ | **4-gram** | Subword | 17,919 | 14.13 | 123,764 | 9.4% | 34.6% |
156
+ | **5-gram** | Word | 5,323 | 12.38 | 10,833 | 22.1% | 38.6% |
157
+ | **5-gram** | Subword | 48,261 | 15.56 | 234,708 | 6.1% | 22.3% |
158
 
159
  ### Top 5 N-grams by Size
160
 
 
162
 
163
  | Rank | N-gram | Count |
164
  |------|--------|-------|
165
+ | 1 | `энэ үдэр` | 1,109 |
166
+ | 2 | `гү али` | 1,021 |
167
+ | 3 | `of the` | 462 |
168
+ | 4 | `байна энэ` | 425 |
169
  | 5 | `бүгэдэ найрамдаха` | 396 |
170
 
171
  **3-grams (Word):**
172
 
173
  | Rank | N-gram | Count |
174
  |------|--------|-------|
175
+ | 1 | `үйлэ ябадалай жагсаалта` | 366 |
176
+ | 2 | `энэ үдэр тохёоһон` | 366 |
177
+ | 3 | `тохёоһон үйлэ ябадалай` | 366 |
178
+ | 4 | `үдэр наһа бараһаниинь` | 366 |
179
+ | 5 | `энэ үдэр наһа` | 366 |
180
 
181
  **4-grams (Word):**
182
 
183
  | Rank | N-gram | Count |
184
  |------|--------|-------|
185
+ | 1 | `үдэр тохёоһон үйлэ ябадалай` | 366 |
186
+ | 2 | `энэ үдэр наһа бараһаниинь` | 366 |
187
+ | 3 | `энэ үдэр тохёоһон үйлэ` | 366 |
188
+ | 4 | `тохёоһон үйлэ ябадалай жагсаалта` | 366 |
189
+ | 5 | `энэ үдэрэй тэмдэглэлтэ баяр` | 358 |
190
+
191
+ **5-grams (Word):**
192
+
193
+ | Rank | N-gram | Count |
194
+ |------|--------|-------|
195
+ | 1 | `энэ үдэр тохёоһон үйлэ ябадалай` | 366 |
196
+ | 2 | `үдэр тохёоһон үйлэ ябадалай жагсаалта` | 366 |
197
+ | 3 | `тохёоһон үйлэ ябадалай жагсаалта энэ` | 340 |
198
+ | 4 | `ябадалай жагсаалта энэ үдэр түрэһэниинь` | 340 |
199
+ | 5 | `үйлэ ябадалай жагсаалта энэ үдэр` | 340 |
200
 
201
  **2-grams (Subword):**
202
 
203
  | Rank | N-gram | Count |
204
  |------|--------|-------|
205
+ | 1 | `н _` | 81,065 |
206
+ | 2 | `й _` | 55,911 |
207
+ | 3 | `_ б` | 53,676 |
208
+ | 4 | `_ х` | 49,355 |
209
+ | 5 | `а й` | 47,888 |
210
 
211
  **3-grams (Subword):**
212
 
213
  | Rank | N-gram | Count |
214
  |------|--------|-------|
215
+ | 1 | `а й _` | 24,178 |
216
+ | 2 | `_ б а` | 23,944 |
217
+ | 3 | `ы н _` | 18,168 |
218
+ | 4 | `э й _` | 17,283 |
219
+ | 5 | `а н _` | 16,564 |
220
 
221
  **4-grams (Subword):**
222
 
223
  | Rank | N-gram | Count |
224
  |------|--------|-------|
225
+ | 1 | `_ б а й` | 12,726 |
226
+ | 2 | `_ б о л` | 11,040 |
227
+ | 3 | `б о л о` | 8,901 |
228
+ | 4 | `и и н _` | 6,846 |
229
+ | 5 | `_ у л а` | 6,751 |
230
+
231
+ **5-grams (Subword):**
232
+
233
+ | Rank | N-gram | Count |
234
+ |------|--------|-------|
235
+ | 1 | `_ б о л о` | 8,849 |
236
+ | 2 | `_ у л а с` | 5,743 |
237
+ | 3 | `о н о й _` | 4,950 |
238
+ | 4 | `а н а й _` | 4,619 |
239
+ | 5 | `э һ э н _` | 4,162 |
240
 
241
 
242
  ### Key Findings
243
 
244
  - **Best Perplexity:** 2-gram (subword) with 452
245
  - **Entropy Trend:** Decreases with larger n-grams (more predictable)
246
+ - **Coverage:** Top-1000 patterns cover ~22% of corpus
247
  - **Recommendation:** 4-gram or 5-gram for best predictive performance
248
 
249
  ---
 
259
 
260
  | Context | Variant | Avg Entropy | Perplexity | Branching Factor | Unique Contexts | Predictability |
261
  |---------|---------|-------------|------------|------------------|-----------------|----------------|
262
+ | **1** | Word | 0.7365 | 1.666 | 4.12 | 92,015 | 26.3% |
263
+ | **1** | Subword | 0.8645 | 1.821 | 5.69 | 2,131 | 13.5% |
264
+ | **2** | Word | 0.1428 | 1.104 | 1.26 | 378,037 | 85.7% |
265
+ | **2** | Subword | 0.8166 | 1.761 | 5.04 | 12,123 | 18.3% |
266
+ | **3** | Word | 0.0341 | 1.024 | 1.05 | 476,205 | 96.6% |
267
+ | **3** | Subword | 0.7973 | 1.738 | 3.76 | 61,012 | 20.3% |
268
+ | **4** | Word | 0.0112 🏆 | 1.008 | 1.02 | 497,992 | 98.9% |
269
+ | **4** | Subword | 0.5747 | 1.489 | 2.39 | 229,261 | 42.5% |
270
 
271
  ### Generated Text Samples (Word-based)
272
 
 
274
 
275
  **Context Size 1:**
276
 
277
+ 1. `ба дайшадай толгойнууд олдоо һэн мүн магрибай ар��б уласай 5 сая ажаһуугшад боложо үгэһэн бэлэй ниисл...`
278
+ 2. `юм исаак ньютон джон нэрэтэй байгаад наһа бараа үйлэшэлгын хэлтэстэ хубаагдана эдэ олон жэлэй 189 дэ...`
279
+ 3. `энэ үдэр түрэһэниинь парацельс алхимик эмшэ эсперантогой байгуулагша гээд хэдэн нөлөө дэндүү их гүрн...`
280
 
281
  **Context Size 2:**
282
 
283
+ 1. `энэ үдэр тохёоһон үйлэ ябадалай жагсаалта 324 римэй эзэнтэ гүрэнэй үндэһэлэгшэд отто фон бисмарк фри...`
284
+ 2. `гү али зүрхэнэй өөрынхинь мэдэрэлэй тогтолсоогоор ябагдана агшалтын үеэр шуһанай һудаһуудта шуһан ша...`
285
+ 3. `of the iaea itu upu and wipo and a permanently functioning legislative administrative and supervisor...`
286
 
287
  **Context Size 3:**
288
 
289
+ 1. `тохёоһон үйлэ ябадалай жагсаалта энэ үдэр түрэһэниинь энэ үдэр наһа бараһаниинь энэ үдэрэй тэмдэглэл...`
290
+ 2. `үйлэ ябадалай жагсаалта энэ үдэр түрэһэниинь энэ үдэр наһа бараһаниинь энэ үдэрэй тэмдэглэлтэ баяр э...`
291
+ 3. `энэ үдэр түрэһэниинь энэ үдэр наһа бараһаниинь энэ үдэрэй тэмдэглэлтэ баяр энэ үдэр тохёоһон үйлэ яб...`
292
 
293
  **Context Size 4:**
294
 
295
+ 1. `үдэр тохёоһон үйлэ ябадалай жагсаалта энэ үдэр түрэһэниинь оной урда үе энэ үдэр наһа бараһаниинь эн...`
296
+ 2. `тохёоһон үйлэ ябадалай жагсаалта энэ үдэр түрэһэниинь энэ үдэр наһа бараһаниинь энэ үдэрэй тэмдэглэл...`
297
+ 3. `энэ үдэр тохёоһон үйлэ ябадалай жагсаалта энэ үдэр түрэһэниинь оной урда үе энэ үдэр наһа бараһаниин...`
298
 
299
 
300
  ### Generated Text Samples (Subword-based)
 
303
 
304
  **Context Size 1:**
305
 
306
+ 1. `_6,_сэн»_г,_үүга`
307
+ 2. _тэршэгай_гаһэд`
308
+ 3. `эраре_бан_каасэй`
309
 
310
  **Context Size 2:**
311
 
312
+ 1. `н_зари,_хажа._бан`
313
+ 2. `й_лэгэ,_plearunt_`
314
+ 3. `_баран._захмерита`
315
 
316
  **Context Size 3:**
317
 
318
+ 1. `ай_гэшүүн_хубиин_1`
319
+ 2. `_бан_холбоон_ба_ту`
320
+ 3. `ын_аралай_марилсуу`
321
 
322
  **Context Size 4:**
323
 
324
+ 1. `_байна._антика._мож`
325
+ 2. `_болоһоншье_үлүү_эр`
326
+ 3. `болобошье,_каирай_н`
327
 
328
 
329
  ### Key Findings
330
 
331
  - **Best Predictability:** Context-4 (word) with 98.9% predictability
332
  - **Branching Factor:** Decreases with context size (more deterministic)
333
+ - **Memory Trade-off:** Larger contexts require more storage (229,261 contexts)
334
  - **Recommendation:** Context-3 or Context-4 for text generation
335
 
336
  ---
 
346
 
347
  | Metric | Value |
348
  |--------|-------|
349
+ | Vocabulary Size | 35,751 |
350
+ | Total Tokens | 485,385 |
351
+ | Mean Frequency | 13.58 |
352
  | Median Frequency | 3 |
353
+ | Frequency Std Dev | 73.26 |
354
 
355
  ### Most Common Words
356
 
357
  | Rank | Word | Frequency |
358
  |------|------|-----------|
359
+ | 1 | ба | 3,777 |
360
+ | 2 | юм | 3,165 |
361
+ | 3 | энэ | 3,056 |
362
+ | 4 | ондо | 2,831 |
363
+ | 5 | болон | 2,629 |
364
+ | 6 | байна | 2,533 |
365
+ | 7 | оной | 2,521 |
366
+ | 8 | улас | 2,428 |
367
+ | 9 | the | 2,147 |
368
+ | 10 | үдэр | 2,079 |
369
 
370
  ### Least Common Words (from vocabulary)
371
 
372
  | Rank | Word | Frequency |
373
  |------|------|-----------|
374
+ | 1 | ᠮᠠᠨᠠᠶ | 2 |
375
+ | 2 | ᠲᠠᠢ | 2 |
376
+ | 3 | ᠮᠣᠩᠭᠤᠯ | 2 |
377
+ | 4 | ᠤᠷᠤᠨ | 2 |
378
+ | 5 | ᠮᠢᠨᠢ | 2 |
379
+ | 6 | ᠦᠷ | 2 |
380
+ | 7 | ᠵᠢᠷᠭᠠᠯ | 2 |
381
+ | 8 | дүхэригтэй | 2 |
382
+ | 9 | исибагай | 2 |
383
+ | 10 | ылын | 2 |
384
 
385
  ### Zipf's Law Analysis
386
 
387
  | Metric | Value |
388
  |--------|-------|
389
+ | Zipf Coefficient | 0.9688 |
390
+ | R² (Goodness of Fit) | 0.993514 |
391
  | Adherence Quality | **excellent** |
392
 
393
  ### Coverage Analysis
 
395
  | Top N Words | Coverage |
396
  |-------------|----------|
397
  | Top 100 | 22.2% |
398
+ | Top 1,000 | 52.4% |
399
+ | Top 5,000 | 74.8% |
400
+ | Top 10,000 | 84.3% |
401
 
402
  ### Key Findings
403
 
404
+ - **Zipf Compliance:** R²=0.9935 indicates excellent adherence to Zipf's law
405
  - **High Frequency Dominance:** Top 100 words cover 22.2% of corpus
406
+ - **Long Tail:** 25,751 words needed for remaining 15.7% coverage
407
 
408
  ---
409
  ## 5. Word Embeddings Evaluation
 
419
 
420
  ### 5.1 Cross-Lingual Alignment
421
 
422
+ ![Alignment Quality](visualizations/embedding_alignment_quality.png)
423
+
424
+ ![Multilingual t-SNE](visualizations/embedding_tsne_multilingual.png)
425
 
426
 
427
  ### 5.2 Model Comparison
428
 
429
  | Model | Dimension | Isotropy | Semantic Density | Alignment R@1 | Alignment R@10 |
430
  |-------|-----------|----------|------------------|---------------|----------------|
431
+ | **mono_32d** | 32 | 0.9019 🏆 | 0.3176 | N/A | N/A |
432
+ | **mono_64d** | 64 | 0.7924 | 0.2625 | N/A | N/A |
433
+ | **mono_128d** | 128 | 0.3620 | 0.2359 | N/A | N/A |
434
+ | **aligned_32d** | 32 | 0.9019 | 0.3203 | 0.0100 | 0.1160 |
435
+ | **aligned_64d** | 64 | 0.7924 | 0.2588 | 0.0220 | 0.1580 |
436
+ | **aligned_128d** | 128 | 0.3620 | 0.2402 | 0.0480 | 0.2140 |
437
 
438
  ### Key Findings
439
 
440
+ - **Best Isotropy:** mono_32d with 0.9019 (more uniform distribution)
441
+ - **Semantic Density:** Average pairwise similarity of 0.2725. Lower values indicate better semantic separation.
442
+ - **Alignment Quality:** Aligned models achieve up to 4.8% R@1 in cross-lingual retrieval.
443
  - **Recommendation:** 128d aligned for best cross-lingual performance
444
 
445
  ---
446
  ## 6. Morphological Analysis (Experimental)
447
 
 
 
448
  This section presents an automated morphological analysis derived from the statistical divergence between word-level and subword-level models. By analyzing where subword predictability spikes and where word-level coverage fails, we can infer linguistic structures without supervised data.
449
 
450
  ### 6.1 Productivity & Complexity
451
 
452
  | Metric | Value | Interpretation | Recommendation |
453
  |--------|-------|----------------|----------------|
454
+ | Productivity Index | **5.000** | High morphological productivity | Reliable analysis |
455
+ | Idiomaticity Gap | **0.728** | High formulaic/idiomatic content | - |
456
 
457
  ### 6.2 Affix Inventory (Productive Units)
458
 
 
461
  #### Productive Prefixes
462
  | Prefix | Examples |
463
  |--------|----------|
464
+ | `-ба` | байгаар, байр, баряуд |
465
+ | `-ха` | харагдана, халимагууд, хангахын |
466
 
467
  #### Productive Suffixes
468
  | Suffix | Examples |
469
  |--------|----------|
470
+ | `-н` | шатааһан, португалиин, догшин |
471
+ | `-й` | монголой, шэрхэгтэй, санхүүгай |
472
+ | `-ай` | санхүүгай, билзуухай, байгуулгануудтай |
473
+ | `-ан` | шатааһан, урлаһан, абатан |
474
+ | `-эй` | шэрхэгтэй, ерэнхэй, нүхэтэй |
475
+ | `-ые` | диграфые, конгрессые, логикые |
476
+ | `-ын` | хилын, нэмэгдэхын, хангахын |
477
+ | `-нь` | уклонь, утаашань, вангиинь |
478
 
479
  ### 6.3 Bound Stems (Lexical Roots)
480
 
 
482
 
483
  | Stem | Cohesion | Substitutability | Examples |
484
  |------|----------|------------------|----------|
485
+ | `гуул` | 1.87x | 66 contexts | уугуул, хайгуул, агуулжа |
486
+ | `энэй` | 1.92x | 53 contexts | сэнэй, эзэнэй, энэнэй |
487
+ | `анай` | 1.74x | 74 contexts | манай, танай, ванай |
488
+ | `ниин` | 1.99x | 40 contexts | ниинь, даниин, кениин |
489
+ | `азар` | 2.36x | 21 contexts | газар, базар, лазарь |
490
+ | `нүүд` | 1.92x | 41 contexts | үенүүд, гүнүүд, эснүүд |
491
+ | `алай` | 1.85x | 47 contexts | һалай, малай, алайр |
492
+ | `дэһэ` | 1.87x | 44 contexts | гэдэһэ, үндэһэ, үдэһэн |
493
+ | `эдэг` | 1.76x | 56 contexts | хэдэг, гэдэг, үзэдэг |
494
+ | `эгдэ` | 1.57x | 91 contexts | жэгдэ, дэгдэн, нэгдэн |
495
+ | `оһон` | 1.91x | 40 contexts | тоһон, хооһон, ороһон |
496
+ | `ууда` | 1.72x | 57 contexts | уудам, уудаг, буудал |
497
 
498
  ### 6.4 Affix Compatibility (Co-occurrence)
499
 
 
501
 
502
  | Prefix | Suffix | Frequency | Examples |
503
  |--------|--------|-----------|----------|
504
+ | `-ба` | `-н` | 36 words | багамын, байгуулсан |
505
+ | `-ха` | `-н` | 29 words | хамаарһан, харбаан |
506
+ | `-ба` | `-й` | 28 words | байгууламжануудай, баттерфляй |
507
+ | `-ха` | `-й` | 26 words | харбинай, хатарай |
508
+ | `-ха` | `-ай` | 23 words | харбинай, хатарай |
509
+ | `-ха` | `-ан` | 21 words | хамаарһан, харбаан |
510
+ | `-ба` | `-ан` | 21 words | байгуулсан, барилдаан |
511
+ | `-ба` | `-ай` | 18 words | байгууламжануудай, баатарай |
512
+ | `-ха` | `-аа` | 13 words | хаанһаа, харууллаа |
513
+ | `-ба` | `-аа` | 11 words | байдалаараа, бараа |
514
 
515
  ### 6.5 Recursive Morpheme Segmentation
516
 
 
518
 
519
  | Word | Suggested Split | Confidence | Stem |
520
  |------|-----------------|------------|------|
521
+ | басаганай | **`ба-саган-ай`** | 6.0 | `саган` |
522
+ | онсолигые | **`онсолиг-ые`** | 4.5 | `онсолиг` |
523
+ | гибралтарай | **`гибралтар-ай`** | 4.5 | `гибралтар` |
524
+ | оронуудаа | **`оронууд-аа`** | 4.5 | `оронууд` |
525
+ | туристуудай | **`туристууд-ай`** | 4.5 | `туристууд` |
526
+ | эблэрэлэй | **`эблэрэл-эй`** | 4.5 | `эблэрэл` |
527
+ | шалгалтые | **`шалгалт-ые`** | 4.5 | `шалгалт` |
528
+ | шулуунуудые | **`шулуунууд-ые`** | 4.5 | `шулуунууд` |
529
+ | хүсэнүүдые | **`хүсэнүүд-ые`** | 4.5 | `хүсэнүүд` |
530
+ | бэшэхэдэнь | **`бэшэхэдэ-нь`** | 4.5 | `бэшэхэдэ` |
531
+ | хубилбаринь | **`хубилбари-нь`** | 4.5 | `хубилбари` |
532
+ | үзүүрнүүдые | **`үзүүрнүүд-ые`** | 4.5 | `үзүүрнүүд` |
533
+ | моринойнь | **`мориной-нь`** | 4.5 | `мориной` |
534
+ | реализмын | **`реализм-ын`** | 4.5 | `реализм` |
535
+ | сэрэгүүдые | **`сэрэгүүд-ые`** | 4.5 | `сэрэгүүд` |
536
 
537
  ### 6.6 Linguistic Interpretation
538
 
539
  > **Automated Insight:**
540
+ The language Russia Buriat shows high morphological productivity. The subword models are significantly more efficient than word models, suggesting a rich system of affixation or compounding.
541
+
542
+ > **Note on Idiomaticity:** The high Idiomaticity Gap suggests a large number of frequent multi-word expressions or formulaic sequences that are statistically distinct from their component parts.
543
 
544
  ---
545
  ## 7. Summary & Recommendations
 
550
 
551
  | Component | Recommended | Rationale |
552
  |-----------|-------------|-----------|
553
+ | Tokenizer | **64k BPE** | Best compression (4.40x) |
554
  | N-gram | **2-gram** | Lowest perplexity (452) |
555
  | Markov | **Context-4** | Highest predictability (98.9%) |
556
  | Embeddings | **100d** | Balanced semantic capture and isotropy |
 
766
  ---
767
  *Generated by Wikilangs Models Pipeline*
768
 
769
+ *Report Date: 2026-01-03 19:55:46*
models/embeddings/aligned/bxr_128d.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:172f899284639436ddce6499851ed3eed6da42fc9a384c3b5308803d0e390be5
3
+ size 1038708787
models/embeddings/aligned/bxr_128d.meta.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"lang": "bxr", "dim": 128, "max_seq_len": 512, "is_aligned": true}
models/embeddings/aligned/bxr_128d.projection.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c785d969d3099dc4bd076c150506f7c27c81cfb77cc44d85ffc4161801ca957f
3
+ size 65664
models/embeddings/aligned/bxr_128d_metadata.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "language": "bxr",
3
+ "dimension": 128,
4
+ "version": "aligned",
5
+ "hub_language": "en",
6
+ "seed_vocab_size": 2377,
7
+ "vocab_size": 14055
8
+ }
models/embeddings/aligned/bxr_32d.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a57d92bbd9734f329c9a94b4a167c386486deabe8011adb84b0dc6b41665fdc
3
+ size 259914547
models/embeddings/aligned/bxr_32d.meta.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"lang": "bxr", "dim": 32, "max_seq_len": 512, "is_aligned": true}
models/embeddings/aligned/bxr_32d.projection.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d84d55644d798c788da0b7a35dc71d4d28e0af9ddac8942ed99182153b9022e9
3
+ size 4224
models/embeddings/aligned/bxr_32d_metadata.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "language": "bxr",
3
+ "dimension": 32,
4
+ "version": "aligned",
5
+ "hub_language": "en",
6
+ "seed_vocab_size": 2377,
7
+ "vocab_size": 14055
8
+ }
models/embeddings/aligned/bxr_64d.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31797d27ef08af5cfc33cd37eb6403b8830b43a2384cee115e2187e33f6f3567
3
+ size 519512627
models/embeddings/aligned/bxr_64d.meta.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"lang": "bxr", "dim": 64, "max_seq_len": 512, "is_aligned": true}
models/embeddings/aligned/bxr_64d.projection.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:296853c47c6dca141665279cf92cd6239125203b235581ebbb8b4e0df0d4f179
3
+ size 16512
models/embeddings/aligned/bxr_64d_metadata.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "language": "bxr",
3
+ "dimension": 64,
4
+ "version": "aligned",
5
+ "hub_language": "en",
6
+ "seed_vocab_size": 2377,
7
+ "vocab_size": 14055
8
+ }
models/embeddings/monolingual/bxr_128d.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9a68f121ed60ec837dcd0cd6dfe40ccae7bda27cefb1882915867e69840e1104
3
- size 1038925515
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:172f899284639436ddce6499851ed3eed6da42fc9a384c3b5308803d0e390be5
3
+ size 1038708787
models/embeddings/monolingual/bxr_128d_metadata.json CHANGED
@@ -11,5 +11,5 @@
11
  "encoding_method": "rope",
12
  "dim": 128
13
  },
14
- "vocab_size": 14262
15
  }
 
11
  "encoding_method": "rope",
12
  "dim": 128
13
  },
14
+ "vocab_size": 14055
15
  }
models/embeddings/monolingual/bxr_32d.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bf82d275c6ec9e3300c42d4bf6f1d83083710befa2f67e76dcf383dcfb7c187a
3
- size 259972299
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a57d92bbd9734f329c9a94b4a167c386486deabe8011adb84b0dc6b41665fdc
3
+ size 259914547
models/embeddings/monolingual/bxr_32d_metadata.json CHANGED
@@ -11,5 +11,5 @@
11
  "encoding_method": "rope",
12
  "dim": 32
13
  },
14
- "vocab_size": 14262
15
  }
 
11
  "encoding_method": "rope",
12
  "dim": 32
13
  },
14
+ "vocab_size": 14055
15
  }
models/embeddings/monolingual/bxr_64d.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:846d84f5d2248a39c8ed392e72d06b7956f7871585cd833febab2c57b28b0799
3
- size 519623371
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31797d27ef08af5cfc33cd37eb6403b8830b43a2384cee115e2187e33f6f3567
3
+ size 519512627
models/embeddings/monolingual/bxr_64d_metadata.json CHANGED
@@ -11,5 +11,5 @@
11
  "encoding_method": "rope",
12
  "dim": 64
13
  },
14
- "vocab_size": 14262
15
  }
 
11
  "encoding_method": "rope",
12
  "dim": 64
13
  },
14
+ "vocab_size": 14055
15
  }
models/subword_markov/bxr_markov_ctx1_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8ef28afcd8620169ed8fa2c34876e8625f7c7d635864fbd2796f37f30dba4cf1
3
- size 102324
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:293e9e02dce724d7d299cbe107a10523d43b37eb72da8462882e0eac348ac329
3
+ size 101727
models/subword_markov/bxr_markov_ctx1_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "context_size": 1,
3
  "variant": "subword",
4
  "language": "bxr",
5
- "unique_contexts": 2141,
6
- "total_transitions": 3947454
7
  }
 
2
  "context_size": 1,
3
  "variant": "subword",
4
  "language": "bxr",
5
+ "unique_contexts": 2131,
6
+ "total_transitions": 3900304
7
  }
models/subword_markov/bxr_markov_ctx2_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e8eeb9396bbe777fa33dbcb0a026a02660f3421472954259ef5fb21e30676aba
3
- size 526873
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a615345c8b10c613c94aab3e7c6fb5f2734e848c8b331ca146732c286324642
3
+ size 513516
models/subword_markov/bxr_markov_ctx2_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "context_size": 2,
3
  "variant": "subword",
4
  "language": "bxr",
5
- "unique_contexts": 12176,
6
- "total_transitions": 3944697
7
  }
 
2
  "context_size": 2,
3
  "variant": "subword",
4
  "language": "bxr",
5
+ "unique_contexts": 12123,
6
+ "total_transitions": 3897537
7
  }
models/subword_markov/bxr_markov_ctx3_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:251c7577b1bff07e9bd8f78d3e3413f809f22133920660f8ff8fd5953d1df234
3
- size 1738691
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a9ee2902f7aaf5d3edd1ea0dc81f7cfb62b6abca7e206fc0397c007629224ba4
3
+ size 1722195
models/subword_markov/bxr_markov_ctx3_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "context_size": 3,
3
  "variant": "subword",
4
  "language": "bxr",
5
- "unique_contexts": 61348,
6
- "total_transitions": 3941940
7
  }
 
2
  "context_size": 3,
3
  "variant": "subword",
4
  "language": "bxr",
5
+ "unique_contexts": 61012,
6
+ "total_transitions": 3894770
7
  }
models/subword_markov/bxr_markov_ctx4_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e6704187c11e7bf62086d0438a41d230986b7075412d814372c74e9c42a43bb8
3
- size 4955182
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e580fc7987ee03846340336c7e0ab11562bc1fc258080cffed351b5d8f6d789
3
+ size 4915742
models/subword_markov/bxr_markov_ctx4_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "context_size": 4,
3
  "variant": "subword",
4
  "language": "bxr",
5
- "unique_contexts": 230966,
6
- "total_transitions": 3939183
7
  }
 
2
  "context_size": 4,
3
  "variant": "subword",
4
  "language": "bxr",
5
+ "unique_contexts": 229261,
6
+ "total_transitions": 3892003
7
  }
models/subword_ngram/bxr_2gram_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4dd1815af3d79add27e46b93017d57f0b62838b802a873a7a07c137fb6540c3d
3
- size 51809
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c970573bc00321c29090c3e39278271d9babe2c7ebe7651224fe27e2403e724
3
+ size 51663
models/subword_ngram/bxr_2gram_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "n": 2,
3
  "variant": "subword",
4
  "language": "bxr",
5
- "unique_ngrams": 3823,
6
- "total_ngrams": 3947454
7
  }
 
2
  "n": 2,
3
  "variant": "subword",
4
  "language": "bxr",
5
+ "unique_ngrams": 3815,
6
+ "total_ngrams": 3900304
7
  }
models/subword_ngram/bxr_3gram_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:63257791e85ed8eaf2b882c6823a3a702fe1a0ea07f2c59e64c50dd97653a352
3
- size 376817
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:53f60887f7680b105d429d09e3e4b7131fca53225c28baa8a7770f72f960f665
3
+ size 376607
models/subword_ngram/bxr_3gram_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "n": 3,
3
  "variant": "subword",
4
  "language": "bxr",
5
- "unique_ngrams": 29340,
6
- "total_ngrams": 3944697
7
  }
 
2
  "n": 3,
3
  "variant": "subword",
4
  "language": "bxr",
5
+ "unique_ngrams": 29176,
6
+ "total_ngrams": 3897537
7
  }
models/subword_ngram/bxr_4gram_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7fc6a6ba4fa3d0662727116dddfc49d4ebb04b8fe918900b45bddaff9a5e475c
3
- size 1528848
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8fa2736d561ab20de5bd87197ef8df71272258581c81664b282841815e95d5d
3
+ size 1520439
models/subword_ngram/bxr_4gram_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "n": 4,
3
  "variant": "subword",
4
  "language": "bxr",
5
- "unique_ngrams": 124835,
6
- "total_ngrams": 3941940
7
  }
 
2
  "n": 4,
3
  "variant": "subword",
4
  "language": "bxr",
5
+ "unique_ngrams": 123764,
6
+ "total_ngrams": 3894770
7
  }
models/subword_ngram/bxr_5gram_subword.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04b1c6cdffcae19db3787c20a35580f9838d314ef2c046bce0608922d9539fdb
3
+ size 3080903
models/subword_ngram/bxr_5gram_subword_metadata.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "n": 5,
3
+ "variant": "subword",
4
+ "language": "bxr",
5
+ "unique_ngrams": 234708,
6
+ "total_ngrams": 3892003
7
+ }
models/tokenizer/bxr_tokenizer_16k.model CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9ec3513a9e102b31e37ebbe9c74465e68178a75caa31640e2da4dd18964909d1
3
- size 572848
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6466fb53efc61a06539c545999886f09575ed8148f8ba5aa60ba12484ecf9b7
3
+ size 573527
models/tokenizer/bxr_tokenizer_16k.vocab CHANGED
The diff for this file is too large to render. See raw diff
 
models/tokenizer/bxr_tokenizer_32k.model CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e83a11fb6d2c68714a56a8f79737f3bdf4020f6741555062a3277802b3b50563
3
- size 936687
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ba98ca87d23060587cc040f5d457fe1e109a3f6624d46e586d436f50d6b2e882
3
+ size 936516
models/tokenizer/bxr_tokenizer_32k.vocab CHANGED
The diff for this file is too large to render. See raw diff
 
models/tokenizer/bxr_tokenizer_64k.model CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1a9a5aa8bf4ae1cf3f260d408856d0de604cee508f58aa294462236f70101270
3
- size 1699640
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c684b9c660cac22c56d1016985f83e4dd024a78bce0217470464e28a93c9b151
3
+ size 1700593
models/tokenizer/bxr_tokenizer_64k.vocab CHANGED
The diff for this file is too large to render. See raw diff
 
models/tokenizer/bxr_tokenizer_8k.model CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8bb41ea3e440a8f23950afc663ca4194ddb8fe1cbbfd3fc96c5e7a3187bdb9a0
3
- size 400889
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e73d24aa19dde68bc2e0c4c40fc5f45d0509dd052bb2223834139dce115379b6
3
+ size 401028
models/tokenizer/bxr_tokenizer_8k.vocab CHANGED
The diff for this file is too large to render. See raw diff
 
models/vocabulary/bxr_vocabulary.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:71921497bda1a7f95ca89688025ce27f6680e9797e7176d62d5eeb5de52180fa
3
- size 687102
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:80f255597637065e8418da1b760b1e22f97981a2723c96db42725d30ff084b8b
3
+ size 675289
models/vocabulary/bxr_vocabulary_metadata.json CHANGED
@@ -1,17 +1,17 @@
1
  {
2
  "language": "bxr",
3
- "vocabulary_size": 36185,
4
  "variant": "full",
5
  "statistics": {
6
- "type_token_ratio": 0.16949986693740954,
7
  "coverage": {
8
- "top_100": 0.19864421979752614,
9
- "top_1000": 0.46809049714371126,
10
- "top_5000": 0.6691097930421025,
11
- "top_10000": 0.7539690930235101
12
  },
13
- "hapax_count": 56805,
14
- "hapax_ratio": 0.6108721367889021,
15
- "total_documents": 2757
16
  }
17
  }
 
1
  {
2
  "language": "bxr",
3
+ "vocabulary_size": 35751,
4
  "variant": "full",
5
  "statistics": {
6
+ "type_token_ratio": 0.17000503940147416,
7
  "coverage": {
8
+ "top_100": 0.1989179131340093,
9
+ "top_1000": 0.469338103228355,
10
+ "top_5000": 0.6704471407395921,
11
+ "top_10000": 0.7549872538215461
12
  },
13
+ "hapax_count": 56346,
14
+ "hapax_ratio": 0.6118114596566664,
15
+ "total_documents": 2767
16
  }
17
  }
models/word_markov/bxr_markov_ctx1_word.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8d83d9d3e8e86f09c7979a3b6839e87cbf8a1c801cae88a26b8e2568e80c0af1
3
- size 4628667
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ba2bcae0c44eed43d2c7e7a02dab530b098ba46720f12a947d40c50c626b6744
3
+ size 4569472
models/word_markov/bxr_markov_ctx1_word_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "context_size": 1,
3
  "variant": "word",
4
  "language": "bxr",
5
- "unique_contexts": 92909,
6
- "total_transitions": 545857
7
  }
 
2
  "context_size": 1,
3
  "variant": "word",
4
  "language": "bxr",
5
+ "unique_contexts": 92015,
6
+ "total_transitions": 538964
7
  }
models/word_markov/bxr_markov_ctx2_word.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:08a52284e5b3cf3a5d895964ac029e73604bb505096a1643018fcbce3b7f0871
3
- size 11044627
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa645f6c7ca05c76ed36c372451efd8f9c8eaf4ef88fdbe687946397959ae364
3
+ size 10866637
models/word_markov/bxr_markov_ctx2_word_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "context_size": 2,
3
  "variant": "word",
4
  "language": "bxr",
5
- "unique_contexts": 383260,
6
- "total_transitions": 543100
7
  }
 
2
  "context_size": 2,
3
  "variant": "word",
4
  "language": "bxr",
5
+ "unique_contexts": 378037,
6
+ "total_transitions": 536197
7
  }