omarkamali commited on
Commit
32f8b6a
·
verified ·
1 Parent(s): df8efce

Upload all models and assets for ce (latest)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. README.md +229 -193
  3. models/embeddings/aligned/ce_128d.bin +3 -0
  4. models/embeddings/aligned/ce_128d.meta.json +1 -0
  5. models/embeddings/aligned/ce_128d.projection.npy +3 -0
  6. models/embeddings/aligned/ce_128d_metadata.json +8 -0
  7. models/embeddings/aligned/ce_32d.bin +3 -0
  8. models/embeddings/aligned/ce_32d.meta.json +1 -0
  9. models/embeddings/aligned/ce_32d.projection.npy +3 -0
  10. models/embeddings/aligned/ce_32d_metadata.json +8 -0
  11. models/embeddings/aligned/ce_64d.bin +3 -0
  12. models/embeddings/aligned/ce_64d.meta.json +1 -0
  13. models/embeddings/aligned/ce_64d.projection.npy +3 -0
  14. models/embeddings/aligned/ce_64d_metadata.json +8 -0
  15. models/embeddings/monolingual/ce_128d.bin +2 -2
  16. models/embeddings/monolingual/ce_128d_metadata.json +1 -1
  17. models/embeddings/monolingual/ce_32d.bin +2 -2
  18. models/embeddings/monolingual/ce_32d_metadata.json +1 -1
  19. models/embeddings/monolingual/ce_64d.bin +2 -2
  20. models/embeddings/monolingual/ce_64d_metadata.json +1 -1
  21. models/subword_markov/ce_markov_ctx1_subword.parquet +2 -2
  22. models/subword_markov/ce_markov_ctx1_subword_metadata.json +2 -2
  23. models/subword_markov/ce_markov_ctx2_subword.parquet +2 -2
  24. models/subword_markov/ce_markov_ctx2_subword_metadata.json +2 -2
  25. models/subword_markov/ce_markov_ctx3_subword.parquet +2 -2
  26. models/subword_markov/ce_markov_ctx3_subword_metadata.json +2 -2
  27. models/subword_markov/ce_markov_ctx4_subword.parquet +2 -2
  28. models/subword_markov/ce_markov_ctx4_subword_metadata.json +2 -2
  29. models/subword_ngram/ce_2gram_subword.parquet +2 -2
  30. models/subword_ngram/ce_2gram_subword_metadata.json +2 -2
  31. models/subword_ngram/ce_3gram_subword.parquet +2 -2
  32. models/subword_ngram/ce_3gram_subword_metadata.json +2 -2
  33. models/subword_ngram/ce_4gram_subword.parquet +2 -2
  34. models/subword_ngram/ce_4gram_subword_metadata.json +2 -2
  35. models/subword_ngram/ce_5gram_subword.parquet +3 -0
  36. models/subword_ngram/ce_5gram_subword_metadata.json +7 -0
  37. models/tokenizer/ce_tokenizer_16k.model +2 -2
  38. models/tokenizer/ce_tokenizer_16k.vocab +0 -0
  39. models/tokenizer/ce_tokenizer_32k.model +2 -2
  40. models/tokenizer/ce_tokenizer_32k.vocab +0 -0
  41. models/tokenizer/ce_tokenizer_64k.model +2 -2
  42. models/tokenizer/ce_tokenizer_64k.vocab +0 -0
  43. models/tokenizer/ce_tokenizer_8k.model +2 -2
  44. models/tokenizer/ce_tokenizer_8k.vocab +0 -0
  45. models/vocabulary/ce_vocabulary.parquet +2 -2
  46. models/vocabulary/ce_vocabulary_metadata.json +9 -9
  47. models/word_markov/ce_markov_ctx1_word.parquet +2 -2
  48. models/word_markov/ce_markov_ctx1_word_metadata.json +2 -2
  49. models/word_markov/ce_markov_ctx2_word.parquet +2 -2
  50. models/word_markov/ce_markov_ctx2_word_metadata.json +2 -2
.gitattributes CHANGED
@@ -39,3 +39,4 @@ visualizations/position_encoding_comparison.png filter=lfs diff=lfs merge=lfs -t
39
  visualizations/tsne_sentences.png filter=lfs diff=lfs merge=lfs -text
40
  visualizations/tsne_words.png filter=lfs diff=lfs merge=lfs -text
41
  visualizations/zipf_law.png filter=lfs diff=lfs merge=lfs -text
 
 
39
  visualizations/tsne_sentences.png filter=lfs diff=lfs merge=lfs -text
40
  visualizations/tsne_words.png filter=lfs diff=lfs merge=lfs -text
41
  visualizations/zipf_law.png filter=lfs diff=lfs merge=lfs -text
42
+ visualizations/embedding_tsne_multilingual.png filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,6 +1,6 @@
1
  ---
2
  language: ce
3
- language_name: CE
4
  language_family: caucasian_northeast
5
  tags:
6
  - wikilangs
@@ -10,11 +10,21 @@ tags:
10
  - n-gram
11
  - markov
12
  - wikipedia
 
 
 
 
 
 
 
 
 
 
13
  - monolingual
14
  - family-caucasian_northeast
15
  license: mit
16
  library_name: wikilangs
17
- pipeline_tag: feature-extraction
18
  datasets:
19
  - omarkamali/wikipedia-monthly
20
  dataset_info:
@@ -23,20 +33,20 @@ dataset_info:
23
  metrics:
24
  - name: best_compression_ratio
25
  type: compression
26
- value: 3.783
27
  - name: best_isotropy
28
  type: isotropy
29
- value: 0.8761
30
  - name: vocabulary_size
31
  type: vocab
32
  value: 0
33
  generated: 2026-01-03
34
  ---
35
 
36
- # CE - Wikilangs Models
37
  ## Comprehensive Research Report & Full Ablation Study
38
 
39
- This repository contains NLP models trained and evaluated by Wikilangs, specifically on **CE** Wikipedia data.
40
  We analyze tokenizers, n-gram models, Markov chains, vocabulary statistics, and word embeddings.
41
 
42
  ## 📋 Repository Contents
@@ -60,7 +70,7 @@ We analyze tokenizers, n-gram models, Markov chains, vocabulary statistics, and
60
  - [3. Markov Chain Evaluation](#3-markov-chain-evaluation)
61
  - [4. Vocabulary Analysis](#4-vocabulary-analysis)
62
  - [5. Word Embeddings Evaluation](#5-word-embeddings-evaluation)
63
- - [6. Morphological Analysis (Experimental)](#6-morphological-analysis)
64
  - [7. Summary & Recommendations](#7-summary--recommendations)
65
  - [Metrics Glossary](#appendix-metrics-glossary--interpretation-guide)
66
  - [Visualizations Index](#visualizations-index)
@@ -80,47 +90,47 @@ We analyze tokenizers, n-gram models, Markov chains, vocabulary statistics, and
80
 
81
  | Vocab Size | Compression | Avg Token Len | UNK Rate | Total Tokens |
82
  |------------|-------------|---------------|----------|--------------|
83
- | **8k** | 2.792x | 2.80 | 0.9604% | 543,837 |
84
- | **16k** | 3.140x | 3.15 | 1.0803% | 483,478 |
85
- | **32k** | 3.480x | 3.49 | 1.1970% | 436,328 |
86
- | **64k** | 3.783x 🏆 | 3.79 | 1.3016% | 401,281 |
87
 
88
  ### Tokenization Examples
89
 
90
  Below are sample sentences tokenized with each vocabulary size:
91
 
92
- **Sample 1:** `Жаныспай (Акмолан область) Жаныспай (Костанайн область)`
93
 
94
  | Vocab | Tokens | Count |
95
  |-------|--------|-------|
96
- | 8k | `▁жан ыс п ай ▁( ак молан ▁область ) ▁жан ... (+8 more)` | 18 |
97
- | 16k | `▁жан ыс пай ▁( акмолан ▁область ) ▁жан ыс пай ... (+5 more)` | 15 |
98
- | 32k | `▁жан ыс пай ▁( акмолан ▁область ) ▁жан ыс пай ... (+4 more)` | 14 |
99
- | 64k | `▁жан ыс пай ▁( акмолан ▁область ) ▁жан ыс пай ... (+4 more)` | 14 |
100
 
101
- **Sample 2:** `Антиго (Висконсин) Антиго (Маса-Карара) Антиго (гӀала, Висконсин)`
102
 
103
  | Vocab | Tokens | Count |
104
  |-------|--------|-------|
105
- | 8k | `▁анти го ▁( ви сконсин ) ▁анти го ▁( ма ... (+12 more)` | 22 |
106
- | 16k | `▁анти го ▁( висконсин ) ▁анти го ▁( ма са ... (+11 more)` | 21 |
107
- | 32k | `▁анти го ▁( висконсин ) ▁анти го ▁( маса - ... (+9 more)` | 19 |
108
- | 64k | `▁анти го ▁( висконсин ) ▁анти го ▁( маса - ... (+9 more)` | 19 |
109
 
110
- **Sample 3:** `Барда (Иркутскан область) Барда (Пермийн мохк) Барда (гӀала)`
111
 
112
  | Vocab | Tokens | Count |
113
  |-------|--------|-------|
114
- | 8k | `▁бар да ▁( иркутскан ▁область ) ▁бар да ▁( пермийн ... (+7 more)` | 17 |
115
- | 16k | `▁бар да ▁( иркутскан ▁область ) ▁бар да ▁( пермийн ... (+7 more)` | 17 |
116
- | 32k | `▁барда ▁( иркутскан ▁область ) ▁барда ( пермийн ▁мохк ) ... (+4 more)` | 14 |
117
- | 64k | `▁барда ▁( иркутскан ▁область ) ▁барда ( пермийн ▁мохк ) ... (+4 more)` | 14 |
118
 
119
 
120
  ### Key Findings
121
 
122
- - **Best Compression:** 64k achieves 3.783x compression
123
- - **Lowest UNK Rate:** 8k with 0.9604% unknown tokens
124
  - **Trade-off:** Larger vocabularies improve compression but increase model size
125
  - **Recommendation:** 32k vocabulary provides optimal balance for production use
126
 
@@ -137,12 +147,14 @@ Below are sample sentences tokenized with each vocabulary size:
137
 
138
  | N-gram | Variant | Perplexity | Entropy | Unique N-grams | Top-100 Coverage | Top-1000 Coverage |
139
  |--------|---------|------------|---------|----------------|------------------|-------------------|
140
- | **2-gram** | Word | 2,545 | 11.31 | 100,140 | 25.5% | 70.0% |
141
- | **2-gram** | Subword | 423 🏆 | 8.72 | 6,176 | 55.1% | 98.2% |
142
- | **3-gram** | Word | 3,286 | 11.68 | 157,541 | 21.2% | 65.9% |
143
- | **3-gram** | Subword | 2,337 | 11.19 | 58,954 | 23.8% | 69.8% |
144
- | **4-gram** | Word | 4,089 | 12.00 | 330,019 | 18.2% | 63.2% |
145
- | **4-gram** | Subword | 5,832 | 12.51 | 337,533 | 16.4% | 50.9% |
 
 
146
 
147
  ### Top 5 N-grams by Size
148
 
@@ -150,68 +162,88 @@ Below are sample sentences tokenized with each vocabulary size:
150
 
151
  | Rank | N-gram | Count |
152
  |------|--------|-------|
153
- | 1 | `нах беха` | 927,008 |
154
- | 2 | `беха меттигаш` | 876,464 |
155
- | 3 | `билгалдахарш хьажоргаш` | 387,483 |
156
- | 4 | `климат кхузахь` | 294,017 |
157
- | 5 | `сахьтан аса` | 272,866 |
158
 
159
  **3-grams (Word):**
160
 
161
  | Rank | N-gram | Count |
162
  |------|--------|-------|
163
- | 1 | `нах беха меттигаш` | 876,426 |
164
- | 2 | `кӏоштан нах беха` | 256,950 |
165
- | 3 | `климат кхузахь климат` | 254,686 |
166
- | 4 | `бахархой билгалдахарш хьажоргаш` | 156,558 |
167
- | 5 | `сахьтан аса йу` | 135,690 |
168
 
169
  **4-grams (Word):**
170
 
171
  | Rank | N-gram | Count |
172
  |------|--------|-------|
173
- | 1 | `кӏоштан нах беха меттигаш` | 256,946 |
174
  | 2 | `лелаш ду сахьтан аса` | 134,397 |
175
  | 3 | `нийса лелаш ду сахьтан` | 134,397 |
176
  | 4 | `сахьтан аса йу utc` | 133,768 |
177
  | 5 | `ду сахьтан аса йу` | 133,768 |
178
 
 
 
 
 
 
 
 
 
 
 
179
  **2-grams (Subword):**
180
 
181
  | Rank | N-gram | Count |
182
  |------|--------|-------|
183
- | 1 | `а _` | 8,696,976 |
184
- | 2 | `. _` | 8,337,924 |
185
- | 3 | `н _` | 7,066,559 |
186
- | 4 | `а н` | 6,445,422 |
187
- | 5 | `р а` | 5,305,199 |
188
 
189
  **3-grams (Subword):**
190
 
191
  | Rank | N-gram | Count |
192
  |------|--------|-------|
193
- | 1 | `а н _` | 4,127,441 |
194
- | 2 | `_ — _` | 2,719,160 |
195
- | 3 | ш _` | 1,910,774 |
196
- | 4 | н _` | 1,668,837 |
197
- | 5 | `а р а` | 1,610,648 |
198
 
199
  **4-grams (Subword):**
200
 
201
  | Rank | N-gram | Count |
202
  |------|--------|-------|
203
- | 1 | `т а н _` | 1,416,987 |
204
- | 2 | `а х а р` | 1,374,119 |
205
- | 3 | `. _ _` | 1,045,081 |
206
- | 4 | `а _ м е` | 1,006,220 |
207
- | 5 | `_ м е т` | 999,858 |
 
 
 
 
 
 
 
 
 
 
208
 
209
 
210
  ### Key Findings
211
 
212
- - **Best Perplexity:** 2-gram (subword) with 423
213
  - **Entropy Trend:** Decreases with larger n-grams (more predictable)
214
- - **Coverage:** Top-1000 patterns cover ~51% of corpus
215
  - **Recommendation:** 4-gram or 5-gram for best predictive performance
216
 
217
  ---
@@ -227,14 +259,14 @@ Below are sample sentences tokenized with each vocabulary size:
227
 
228
  | Context | Variant | Avg Entropy | Perplexity | Branching Factor | Unique Contexts | Predictability |
229
  |---------|---------|-------------|------------|------------------|-----------------|----------------|
230
- | **1** | Word | 0.6226 | 1.540 | 3.90 | 520,111 | 37.7% |
231
- | **1** | Subword | 0.9426 | 1.922 | 9.07 | 1,553 | 5.7% |
232
- | **2** | Word | 0.1849 | 1.137 | 1.44 | 2,019,671 | 81.5% |
233
- | **2** | Subword | 0.9737 | 1.964 | 7.37 | 14,069 | 2.6% |
234
- | **3** | Word | 0.0632 | 1.045 | 1.13 | 2,889,994 | 93.7% |
235
- | **3** | Subword | 0.8560 | 1.810 | 4.97 | 103,627 | 14.4% |
236
- | **4** | Word | 0.0320 🏆 | 1.022 | 1.08 | 3,246,178 | 96.8% |
237
- | **4** | Subword | 0.7168 | 1.643 | 3.27 | 515,118 | 28.3% |
238
 
239
  ### Generated Text Samples (Word-based)
240
 
@@ -242,27 +274,27 @@ Below are text samples generated from each word-based Markov chain model:
242
 
243
  **Context Size 1:**
244
 
245
- 1. `а ду йалташ хастоьмаш малхбален кӏошташкара пачхьалкхан европин дехьайолуш алсама гӏийлачу мехца бек...`
246
- 2. `нах беха меттигаш нах беха меттигаш провинцин нах беха меттигаш кӏоштан нах беха меттигаш воеводалли...`
247
- 3. `беха меттигаш штатан йукъахь квинс университет им м в пономарёва м прохоров т 82 т и`
248
 
249
  **Context Size 2:**
250
 
251
- 1. `нах беха меттигаш нах беха меттигаш нисйина нах беха меттигаш кӏоштан нах беха меттигаш кӏоштан нах ...`
252
- 2. `беха меттигаш кӏоштан нах беха меттигаш кӏоштан нах беха меттигаш воеводаллин нах беха меттигаш нах ...`
253
- 3. `билгалдахарш хьажоргаш черкассин областан индексаш кӏоштан нах беха меттигаш микрокӏошташ нах беха м...`
254
 
255
  **Context Size 3:**
256
 
257
- 1. `нах беха меттигаш микрокӏошташ нах беха меттигаш нисйина нах беха меттигаш нах беха меттигаш микрокӏ...`
258
- 2. `кӏоштан нах беха меттигаш нисйина нах беха меттигаш нах беха меттигаш кӏоштан нах беха меттигаш нах ...`
259
- 3. `климат кхузахь климат барамехь континенталан йу аьхка йовха хуьлу ткъа ӏа барамехь шийла хуьлу шаран...`
260
 
261
  **Context Size 4:**
262
 
263
- 1. `нийса лелаш ду сахьтан аса йу utc 3 билгалдахарш хьажоргаш неклиновскан кӏоштан индексаш кӏоштан нах...`
264
- 2. `лелаш ду сахьтан аса йу utc 3 билгалдахарш хьажоргаш селижарован кӏоштан индексаш кӏоштан нах беха м...`
265
- 3. `ду сахьтан аса йу utc 3 билгалдахарш хьажоргаш максатихан кӏоштан индексаш кӏоштан нах беха меттигаш...`
266
 
267
 
268
  ### Generated Text Samples (Subword-based)
@@ -271,34 +303,34 @@ Below are text samples generated from each subword-based Markov chain model:
271
 
272
  **Context Size 1:**
273
 
274
- 1. `_/_циллан_olevia`
275
- 2. _перерхашес._ба`
276
- 3. `нилию._7959-со_к`
277
 
278
  **Context Size 2:**
279
 
280
- 1. `а_койн_сахар_тӏуь`
281
- 2. `._ре_нашкая_:_спу`
282
- 3. `н_схойн_стр_штаме`
283
 
284
  **Context Size 3:**
285
 
286
- 1. `ан_аркатерия_исти_`
287
- 2. `_—_итан_новгорокӏо`
288
- 3. `аш_беха_местник_гу`
289
 
290
  **Context Size 4:**
291
 
292
- 1. `тан_кӏоштан_воеводс`
293
- 2. `ахарш_хьажоргаш_нах`
294
- 3. `._—_b.,_heidelberg,`
295
 
296
 
297
  ### Key Findings
298
 
299
- - **Best Predictability:** Context-4 (word) with 96.8% predictability
300
  - **Branching Factor:** Decreases with context size (more deterministic)
301
- - **Memory Trade-off:** Larger contexts require more storage (515,118 contexts)
302
  - **Recommendation:** Context-3 or Context-4 for text generation
303
 
304
  ---
@@ -314,64 +346,64 @@ Below are text samples generated from each subword-based Markov chain model:
314
 
315
  | Metric | Value |
316
  |--------|-------|
317
- | Vocabulary Size | 230,774 |
318
- | Total Tokens | 54,539,322 |
319
- | Mean Frequency | 236.33 |
320
  | Median Frequency | 3 |
321
- | Frequency Std Dev | 7087.98 |
322
 
323
  ### Most Common Words
324
 
325
  | Rank | Word | Frequency |
326
  |------|------|-----------|
327
- | 1 | а | 1,429,788 |
328
- | 2 | нах | 929,389 |
329
- | 3 | беха | 927,412 |
330
- | 4 | меттигаш | 892,206 |
331
- | 5 | в | 665,820 |
332
- | 6 | климат | 663,481 |
333
- | 7 | м | 649,926 |
334
- | 8 | йу | 631,461 |
335
- | 9 | билгалдахарш | 595,304 |
336
- | 10 | с | 497,975 |
337
 
338
  ### Least Common Words (from vocabulary)
339
 
340
  | Rank | Word | Frequency |
341
  |------|------|-----------|
342
- | 1 | горушкинскан | 2 |
343
- | 2 | тулинскан | 2 |
344
- | 3 | долгопольскан | 2 |
345
- | 4 | погостищенскан | 2 |
346
- | 5 | кохановскан | 2 |
347
- | 6 | морховскан | 2 |
348
- | 7 | нежадовскан | 2 |
349
- | 8 | липиницкан | 2 |
350
- | 9 | зачепичи | 2 |
351
- | 10 | меетиг | 2 |
352
 
353
  ### Zipf's Law Analysis
354
 
355
  | Metric | Value |
356
  |--------|-------|
357
- | Zipf Coefficient | 1.8318 |
358
- | R² (Goodness of Fit) | 0.964473 |
359
  | Adherence Quality | **excellent** |
360
 
361
  ### Coverage Analysis
362
 
363
  | Top N Words | Coverage |
364
  |-------------|----------|
365
- | Top 100 | 44.4% |
366
- | Top 1,000 | 86.7% |
367
- | Top 5,000 | 96.7% |
368
- | Top 10,000 | 97.7% |
369
 
370
  ### Key Findings
371
 
372
- - **Zipf Compliance:** R²=0.9645 indicates excellent adherence to Zipf's law
373
- - **High Frequency Dominance:** Top 100 words cover 44.4% of corpus
374
- - **Long Tail:** 220,774 words needed for remaining 2.3% coverage
375
 
376
  ---
377
  ## 5. Word Embeddings Evaluation
@@ -387,37 +419,40 @@ Below are text samples generated from each subword-based Markov chain model:
387
 
388
  ### 5.1 Cross-Lingual Alignment
389
 
390
- > *Note: Multilingual alignment visualization not available for this language.*
 
 
391
 
392
 
393
  ### 5.2 Model Comparison
394
 
395
  | Model | Dimension | Isotropy | Semantic Density | Alignment R@1 | Alignment R@10 |
396
  |-------|-----------|----------|------------------|---------------|----------------|
397
- | **mono_32d** | 32 | 0.8761 🏆 | 0.3710 | N/A | N/A |
398
- | **mono_64d** | 64 | 0.8520 | 0.3045 | N/A | N/A |
399
- | **mono_128d** | 128 | 0.7849 | 0.2825 | N/A | N/A |
 
 
 
400
 
401
  ### Key Findings
402
 
403
- - **Best Isotropy:** mono_32d with 0.8761 (more uniform distribution)
404
- - **Semantic Density:** Average pairwise similarity of 0.3193. Lower values indicate better semantic separation.
405
- - **Alignment Quality:** No aligned models evaluated in this run.
406
  - **Recommendation:** 128d aligned for best cross-lingual performance
407
 
408
  ---
409
  ## 6. Morphological Analysis (Experimental)
410
 
411
- > ⚠️ **Warning:** This language shows low morphological productivity. The statistical signals used for this analysis may be noisy or less reliable than for morphologically rich languages.
412
-
413
  This section presents an automated morphological analysis derived from the statistical divergence between word-level and subword-level models. By analyzing where subword predictability spikes and where word-level coverage fails, we can infer linguistic structures without supervised data.
414
 
415
  ### 6.1 Productivity & Complexity
416
 
417
  | Metric | Value | Interpretation | Recommendation |
418
  |--------|-------|----------------|----------------|
419
- | Productivity Index | **0.000** | Low morphological productivity | ⚠️ Likely unreliable |
420
- | Idiomaticity Gap | **-1.000** | Low formulaic content | - |
421
 
422
  ### 6.2 Affix Inventory (Productive Units)
423
 
@@ -426,21 +461,20 @@ These are the most productive prefixes and suffixes identified by sampling the v
426
  #### Productive Prefixes
427
  | Prefix | Examples |
428
  |--------|----------|
429
- | `-ка` | картографии, карайора, карпат |
430
- | `-ко` | количество, кочаны, кошехаблан |
431
- | `-ма` | майкен, маршаллвилл, машано |
432
 
433
  #### Productive Suffixes
434
  | Suffix | Examples |
435
  |--------|----------|
436
- | `-а` | ривица, валенсуэла, карайора |
437
- | `-о` | монтеморо, количество, мятнево |
438
- | `-н` | расистийн, майкен, тефран |
439
- | `-ан` | тефран, дмитрован, кертан |
440
- | `-во` | количество, мятнево, крайково |
441
- | `-ки` | исаковски, юридически, перлавки |
442
- | `-ово` | крайково, перегудово, дубново |
443
- | `-ка` | узника, кукушка, тлаика |
444
 
445
  ### 6.3 Bound Stems (Lexical Roots)
446
 
@@ -448,18 +482,18 @@ Bound stems are high-frequency subword units that are semantically cohesive but
448
 
449
  | Stem | Cohesion | Substitutability | Examples |
450
  |------|----------|------------------|----------|
451
- | `архо` | 2.04x | 122 contexts | архон, тархо, лархо |
452
- | `галд` | 2.73x | 16 contexts | галдо, галда, угалде |
453
- | `ргаш` | 2.16x | 34 contexts | ургаш, бергаш, цергаш |
454
- | `лгал` | 2.58x | 17 contexts | билгал, билгало, билгала |
455
- | `етти` | 1.89x | 42 contexts | бетти, нетти, меттин |
456
- | `харх` | 1.88x | 41 contexts | ахархо, вахарх, мухарх |
457
- | `халл` | 1.51x | 92 contexts | халла, халле, халль |
458
- | `ийла` | 1.86x | 35 contexts | кийла, шийла, мийла |
459
- | `игаш` | 2.25x | 18 contexts | бигаш, цигаш, книгаш |
460
- | `рхой` | 2.21x | 19 contexts | лархой, сурхой, сурхойн |
461
- | `ласт` | 1.59x | 60 contexts | пласт, ласта, селаст |
462
- | `ттиг` | 1.99x | 25 contexts | меттиг, гаттиг, ме́ттиг |
463
 
464
  ### 6.4 Affix Compatibility (Co-occurrence)
465
 
@@ -467,16 +501,16 @@ This table shows which prefixes and suffixes most frequently co-occur on the sam
467
 
468
  | Prefix | Suffix | Frequency | Examples |
469
  |--------|--------|-----------|----------|
470
- | `-ко` | `-а` | 51 words | королиха, кокориха |
471
- | `-ка` | `-а` | 43 words | карпеевка, камила |
472
- | `-ка` | `-о` | 38 words | картелево, катюшино |
473
- | `-ма` | `-а` | 35 words | машакепара, малакода |
474
- | `-ко` | `-о` | 33 words | косогорово, косяково |
475
- | `-ка` | `-н` | 31 words | калустовгӏеран, камблен |
476
- | `-ма` | `-н` | 24 words | малоярославецан, марьинкан |
477
- | `-ко` | `-н` | 23 words | коритен, койдин |
478
- | `-ма` | `-о` | 22 words | маторо, манкузо |
479
- | `-ко` | `-во` | 18 words | косогорово, косяково |
480
 
481
  ### 6.5 Recursive Morpheme Segmentation
482
 
@@ -484,26 +518,28 @@ Using **Recursive Hierarchical Substitutability**, we decompose complex words in
484
 
485
  | Word | Suggested Split | Confidence | Stem |
486
  |------|-----------------|------------|------|
487
- | полканово | **`полк-ан-ово`** | 6.0 | `полк` |
488
- | андрюшкино | **`андрюш-ки-но`** | 6.0 | `андрюш` |
489
- | зимовники | **`зимовни-ки`** | 4.5 | `зимовни` |
490
- | гринвичан | **`гринвич-ан`** | 4.5 | `гринвич` |
491
- | гуннбьёрнан | **`гуннбьёрн-ан`** | 4.5 | `гуннбьёрн` |
492
- | хромосоман | **`хромосом-ан`** | 4.5 | `хромосом` |
493
- | боьлкъазаран | **`боьлкъазар-ан`** | 4.5 | `боьлкъазар` |
494
- | хӏуманашна | **`хӏуманаш-на`** | 4.5 | `хӏуманаш` |
495
- | ынтымакан | **`ынтымак-ан`** | 4.5 | `ынтымак` |
496
- | бартолина | **`бартоли-на`** | 4.5 | `бартоли` |
497
- | судженскан | **`судженск-ан`** | 4.5 | `судженск` |
498
- | бузиновка | **`бузинов-ка`** | 4.5 | `бузинов` |
499
- | тракторашна | **`трактораш-на`** | 4.5 | `трактораш` |
500
- | пайхӏамаран | **`пайхӏамар-ан`** | 4.5 | `пайхӏамар` |
501
- | нуьрнберган | **`нуьрнберг-ан`** | 4.5 | `нуьрнберг` |
502
 
503
  ### 6.6 Linguistic Interpretation
504
 
505
  > **Automated Insight:**
506
- The language CE appears to be more isolating or has a highly fixed vocabulary. Word-level models perform nearly as well as subword models, indicating fewer productive morphological processes.
 
 
507
 
508
  ---
509
  ## 7. Summary & Recommendations
@@ -514,9 +550,9 @@ The language CE appears to be more isolating or has a highly fixed vocabulary. W
514
 
515
  | Component | Recommended | Rationale |
516
  |-----------|-------------|-----------|
517
- | Tokenizer | **64k BPE** | Best compression (3.78x) |
518
- | N-gram | **2-gram** | Lowest perplexity (423) |
519
- | Markov | **Context-4** | Highest predictability (96.8%) |
520
  | Embeddings | **100d** | Balanced semantic capture and isotropy |
521
 
522
 
@@ -730,4 +766,4 @@ MIT License - Free for academic and commercial use.
730
  ---
731
  *Generated by Wikilangs Models Pipeline*
732
 
733
- *Report Date: 2026-01-03 10:17:57*
 
1
  ---
2
  language: ce
3
+ language_name: Chechen
4
  language_family: caucasian_northeast
5
  tags:
6
  - wikilangs
 
10
  - n-gram
11
  - markov
12
  - wikipedia
13
+ - feature-extraction
14
+ - sentence-similarity
15
+ - tokenization
16
+ - n-grams
17
+ - markov-chain
18
+ - text-mining
19
+ - fasttext
20
+ - babelvec
21
+ - vocabulous
22
+ - vocabulary
23
  - monolingual
24
  - family-caucasian_northeast
25
  license: mit
26
  library_name: wikilangs
27
+ pipeline_tag: text-generation
28
  datasets:
29
  - omarkamali/wikipedia-monthly
30
  dataset_info:
 
33
  metrics:
34
  - name: best_compression_ratio
35
  type: compression
36
+ value: 3.737
37
  - name: best_isotropy
38
  type: isotropy
39
+ value: 0.8747
40
  - name: vocabulary_size
41
  type: vocab
42
  value: 0
43
  generated: 2026-01-03
44
  ---
45
 
46
+ # Chechen - Wikilangs Models
47
  ## Comprehensive Research Report & Full Ablation Study
48
 
49
+ This repository contains NLP models trained and evaluated by Wikilangs, specifically on **Chechen** Wikipedia data.
50
  We analyze tokenizers, n-gram models, Markov chains, vocabulary statistics, and word embeddings.
51
 
52
  ## 📋 Repository Contents
 
70
  - [3. Markov Chain Evaluation](#3-markov-chain-evaluation)
71
  - [4. Vocabulary Analysis](#4-vocabulary-analysis)
72
  - [5. Word Embeddings Evaluation](#5-word-embeddings-evaluation)
73
+ - [6. Morphological Analysis (Experimental)](#6--morphological-analysis-experimental)
74
  - [7. Summary & Recommendations](#7-summary--recommendations)
75
  - [Metrics Glossary](#appendix-metrics-glossary--interpretation-guide)
76
  - [Visualizations Index](#visualizations-index)
 
90
 
91
  | Vocab Size | Compression | Avg Token Len | UNK Rate | Total Tokens |
92
  |------------|-------------|---------------|----------|--------------|
93
+ | **8k** | 2.792x | 2.80 | 0.9605% | 541,154 |
94
+ | **16k** | 3.113x | 3.12 | 1.0708% | 485,447 |
95
+ | **32k** | 3.423x | 3.43 | 1.1775% | 441,435 |
96
+ | **64k** | 3.737x 🏆 | 3.74 | 1.2855% | 404,354 |
97
 
98
  ### Tokenization Examples
99
 
100
  Below are sample sentences tokenized with each vocabulary size:
101
 
102
+ **Sample 1:** `Бейца (Бихор) Бейца (Клуж) Бейца (Марамуреш) Бейца (Муреш) Бейца (Хунедоара) Бей...`
103
 
104
  | Vocab | Tokens | Count |
105
  |-------|--------|-------|
106
+ | 8k | `▁бей ца ▁( б их ор ) ▁бей ца ▁( ... (+30 more)` | 40 |
107
+ | 16k | `▁бей ца ▁( б ихор ) ▁бей ца ▁( к ... (+24 more)` | 34 |
108
+ | 32k | `▁бей ца ▁( бихор ) ▁бей ца ▁( клуж ) ... (+20 more)` | 30 |
109
+ | 64k | `▁бейца ▁( бихор ) ▁бейца ▁( клуж ) ▁бейца ▁( ... (+14 more)` | 24 |
110
 
111
+ **Sample 2:** `Киякты (Актобен область) Киякты (Мангистаунан область)`
112
 
113
  | Vocab | Tokens | Count |
114
  |-------|--------|-------|
115
+ | 8k | `▁к ия кт ы ▁( акт обен ▁область ) ▁к ... (+10 more)` | 20 |
116
+ | 16k | `▁к ия кты ▁( акт обен ▁область ) ▁к ия ... (+8 more)` | 18 |
117
+ | 32k | `▁кия кты ▁( актобен ▁область ) ▁кия кты ▁( ман ... (+3 more)` | 13 |
118
+ | 64k | `▁кия кты ▁( актобен ▁область ) ▁кия кты ▁( мангистаунан ... (+2 more)` | 12 |
119
 
120
+ **Sample 3:** `ХӀаджали (40° 14' N 47° 16' E), (Бардан кӀошт) ХӀаджали (40° 27' N 47° 05' E), (...`
121
 
122
  | Vocab | Tokens | Count |
123
  |-------|--------|-------|
124
+ | 8k | `▁хӏа дж али ▁( 4 0 ° 1 4 ... (+44 more)` | 54 |
125
+ | 16k | `▁хӏадж али ▁( 4 0 ° 1 4 ' ... (+42 more)` | 52 |
126
+ | 32k | `▁хӏадж али ▁( 4 0 °1 4 ' ... (+40 more)` | 50 |
127
+ | 64k | `▁хӏадж али ▁( 4 0 °1 4 ' ... (+40 more)` | 50 |
128
 
129
 
130
  ### Key Findings
131
 
132
+ - **Best Compression:** 64k achieves 3.737x compression
133
+ - **Lowest UNK Rate:** 8k with 0.9605% unknown tokens
134
  - **Trade-off:** Larger vocabularies improve compression but increase model size
135
  - **Recommendation:** 32k vocabulary provides optimal balance for production use
136
 
 
147
 
148
  | N-gram | Variant | Perplexity | Entropy | Unique N-grams | Top-100 Coverage | Top-1000 Coverage |
149
  |--------|---------|------------|---------|----------------|------------------|-------------------|
150
+ | **2-gram** | Word | 3,390 | 11.73 | 113,212 | 22.9% | 62.3% |
151
+ | **2-gram** | Subword | 435 🏆 | 8.77 | 6,171 | 54.5% | 98.0% |
152
+ | **3-gram** | Word | 4,361 | 12.09 | 176,983 | 18.9% | 57.8% |
153
+ | **3-gram** | Subword | 2,517 | 11.30 | 59,082 | 23.1% | 68.3% |
154
+ | **4-gram** | Word | 5,357 | 12.39 | 387,928 | 16.4% | 55.1% |
155
+ | **4-gram** | Subword | 6,651 | 12.70 | 339,742 | 15.1% | 48.5% |
156
+ | **5-gram** | Word | 5,776 | 12.50 | 363,840 | 15.2% | 53.7% |
157
+ | **5-gram** | Subword | 11,240 | 13.46 | 966,556 | 12.7% | 40.2% |
158
 
159
  ### Top 5 N-grams by Size
160
 
 
162
 
163
  | Rank | N-gram | Count |
164
  |------|--------|-------|
165
+ | 1 | `нах беха` | 1,039,295 |
166
+ | 2 | `беха меттигаш` | 953,014 |
167
+ | 3 | `билгалдахарш хьажоргаш` | 387,484 |
168
+ | 4 | `климат кхузахь` | 314,080 |
169
+ | 5 | `кхузахь климат` | 293,860 |
170
 
171
  **3-grams (Word):**
172
 
173
  | Rank | N-gram | Count |
174
  |------|--------|-------|
175
+ | 1 | `нах беха меттигаш` | 952,977 |
176
+ | 2 | `климат кхузахь климат` | 274,749 |
177
+ | 3 | `кӏоштан нах беха` | 256,927 |
178
+ | 4 | `бахархой билгалдахарш хьажоргаш` | 156,557 |
179
+ | 5 | `ред а м` | 153,110 |
180
 
181
  **4-grams (Word):**
182
 
183
  | Rank | N-gram | Count |
184
  |------|--------|-------|
185
+ | 1 | `кӏоштан нах беха меттигаш` | 256,923 |
186
  | 2 | `лелаш ду сахьтан аса` | 134,397 |
187
  | 3 | `нийса лелаш ду сахьтан` | 134,397 |
188
  | 4 | `сахьтан аса йу utc` | 133,768 |
189
  | 5 | `ду сахьтан аса йу` | 133,768 |
190
 
191
+ **5-grams (Word):**
192
+
193
+ | Rank | N-gram | Count |
194
+ |------|--------|-------|
195
+ | 1 | `нийса лелаш ду сахьтан аса` | 134,397 |
196
+ | 2 | `ду сахьтан аса йу utc` | 133,768 |
197
+ | 3 | `лелаш ду сахьтан аса йу` | 133,768 |
198
+ | 4 | `индексаш кӏоштан нах беха меттигаш` | 122,584 |
199
+ | 5 | `аьхка йовха хуьлу ткъа ӏа` | 113,661 |
200
+
201
  **2-grams (Subword):**
202
 
203
  | Rank | N-gram | Count |
204
  |------|--------|-------|
205
+ | 1 | `а _` | 10,875,281 |
206
+ | 2 | `. _` | 9,874,426 |
207
+ | 3 | `н _` | 8,151,111 |
208
+ | 4 | `а н` | 7,675,531 |
209
+ | 5 | `р а` | 6,751,030 |
210
 
211
  **3-grams (Subword):**
212
 
213
  | Rank | N-gram | Count |
214
  |------|--------|-------|
215
+ | 1 | `а н _` | 4,716,126 |
216
+ | 2 | `_ — _` | 2,941,993 |
217
+ | 3 | а _` | 2,306,576 |
218
+ | 4 | ш _` | 2,292,649 |
219
+ | 5 | `а х ь` | 2,054,431 |
220
 
221
  **4-grams (Subword):**
222
 
223
  | Rank | N-gram | Count |
224
  |------|--------|-------|
225
+ | 1 | `т а н _` | 1,577,468 |
226
+ | 2 | `а х а р` | 1,505,060 |
227
+ | 3 | _ м е` | 1,193,821 |
228
+ | 4 | `а х ь _` | 1,177,180 |
229
+ | 5 | `_ м е т` | 1,177,138 |
230
+
231
+ **5-grams (Subword):**
232
+
233
+ | Rank | N-gram | Count |
234
+ |------|--------|-------|
235
+ | 1 | `_ м е т т` | 1,166,495 |
236
+ | 2 | `м е т т и` | 1,154,656 |
237
+ | 3 | `е т т и г` | 1,154,628 |
238
+ | 4 | `а _ м е т` | 1,067,312 |
239
+ | 5 | `_ н а х _` | 1,048,954 |
240
 
241
 
242
  ### Key Findings
243
 
244
+ - **Best Perplexity:** 2-gram (subword) with 435
245
  - **Entropy Trend:** Decreases with larger n-grams (more predictable)
246
+ - **Coverage:** Top-1000 patterns cover ~40% of corpus
247
  - **Recommendation:** 4-gram or 5-gram for best predictive performance
248
 
249
  ---
 
259
 
260
  | Context | Variant | Avg Entropy | Perplexity | Branching Factor | Unique Contexts | Predictability |
261
  |---------|---------|-------------|------------|------------------|-----------------|----------------|
262
+ | **1** | Word | 0.6776 | 1.600 | 4.20 | 526,205 | 32.2% |
263
+ | **1** | Subword | 0.9453 | 1.926 | 9.06 | 1,550 | 5.5% |
264
+ | **2** | Word | 0.1950 | 1.145 | 1.49 | 2,194,953 | 80.5% |
265
+ | **2** | Subword | 0.9623 | 1.948 | 7.39 | 14,021 | 3.8% |
266
+ | **3** | Word | 0.0756 | 1.054 | 1.15 | 3,239,505 | 92.4% |
267
+ | **3** | Subword | 0.8389 | 1.789 | 4.99 | 103,540 | 16.1% |
268
+ | **4** | Word | 0.0367 🏆 | 1.026 | 1.08 | 3,672,181 | 96.3% |
269
+ | **4** | Subword | 0.7073 | 1.633 | 3.29 | 516,039 | 29.3% |
270
 
271
  ### Generated Text Samples (Word-based)
272
 
 
274
 
275
  **Context Size 1:**
276
 
277
+ 1. `а зонехьклимат тверской области бахархойн дукхалла бахархойн дукхалла бахархойн дукхалла климат йу л...`
278
+ 2. `нах беха меттигаш жудецан нах беха меттигаш штатан йукъахь дара кочими монки гуайкура перику индейн ...`
279
+ 3. `беха меттигаш нах беха меттигаш нисйина нах беха меттигаш нисйина нах беха меттигаш кӏоштан индексаш...`
280
 
281
  **Context Size 2:**
282
 
283
+ 1. `нах беха меттигаш нах беха меттигаш лаха калифорни штатан йукъахь йу бахархой билгалдахарш литератур...`
284
+ 2. `беха меттигаш воеводаллин нах беха меттигаш нисйина нах беха меттигаш нисйина нах беха меттигаш нах ...`
285
+ 3. `билгалдахарш хьажоргаш спас деменскан кӏошт калугин областан спас деменскан кӏоштара дӏатесна эвла б...`
286
 
287
  **Context Size 3:**
288
 
289
+ 1. `нах беха меттигаш кӏоштан нах беха меттигаш штатан нах беха меттигаш штатан нах беха меттигаш штатан...`
290
+ 2. `климат кхузахь климат йу лаьттайуккъера хӏордан барамехь йекъа а йовха ӏа шийла ца хуьйлат а галкина...`
291
+ 3. `кӏоштан нах беха меттигаш штатан нах беха меттигаш нах беха меттигаш нисйина нах беха меттигаш нисйи...`
292
 
293
  **Context Size 4:**
294
 
295
+ 1. `лелаш ду сахьтан аса йу utc 3 билгалдахарш хьажоргаш устьян кӏоштан индексаш кӏоштан нах беха меттиг...`
296
+ 2. `нийса лелаш ду сахьтан аса йу utc 3 билгалдахарш хьажоргаш приморскан кӏоштан индексаш областан прим...`
297
+ 3. `ду сахьтан аса йу utc 7 билгалдахарш мохк`
298
 
299
 
300
  ### Generated Text Samples (Subword-based)
 
303
 
304
  **Context Size 1:**
305
 
306
+ 1. `_йаду_—_н_бще_вх`
307
+ 2. `анташ_гахахарха_`
308
+ 3. `нцалальталарклус`
309
 
310
  **Context Size 2:**
311
 
312
+ 1. `а_хила_дуьлинецес`
313
+ 2. `._у-фактябра_эххь`
314
+ 3. `н_йоккъах_бехь_ст`
315
 
316
  **Context Size 3:**
317
 
318
+ 1. `ан_областан_сизал_`
319
+ 2. `_—_январь_современ`
320
+ 3. `ра_хьолехьажоргаш_`
321
 
322
  **Context Size 4:**
323
 
324
+ 1. `тан_асан_коммунан_х`
325
+ 2. `ахарш_хьажоргаши_(д`
326
+ 3. _меттигаш_коммунан`
327
 
328
 
329
  ### Key Findings
330
 
331
+ - **Best Predictability:** Context-4 (word) with 96.3% predictability
332
  - **Branching Factor:** Decreases with context size (more deterministic)
333
+ - **Memory Trade-off:** Larger contexts require more storage (516,039 contexts)
334
  - **Recommendation:** Context-3 or Context-4 for text generation
335
 
336
  ---
 
346
 
347
  | Metric | Value |
348
  |--------|-------|
349
+ | Vocabulary Size | 238,347 |
350
+ | Total Tokens | 67,032,110 |
351
+ | Mean Frequency | 281.24 |
352
  | Median Frequency | 3 |
353
+ | Frequency Std Dev | 8160.67 |
354
 
355
  ### Most Common Words
356
 
357
  | Rank | Word | Frequency |
358
  |------|------|-----------|
359
+ | 1 | а | 1,815,637 |
360
+ | 2 | нах | 1,049,193 |
361
+ | 3 | беха | 1,039,696 |
362
+ | 4 | м��ттигаш | 968,757 |
363
+ | 5 | йу | 814,157 |
364
+ | 6 | м | 798,557 |
365
+ | 7 | климат | 741,272 |
366
+ | 8 | в | 736,957 |
367
+ | 9 | билгалдахарш | 631,076 |
368
+ | 10 | с | 588,454 |
369
 
370
  ### Least Common Words (from vocabulary)
371
 
372
  | Rank | Word | Frequency |
373
  |------|------|-----------|
374
+ | 1 | эмпачадо | 2 |
375
+ | 2 | энано | 2 |
376
+ | 3 | эскопетал | 2 |
377
+ | 4 | эскриторио | 2 |
378
+ | 5 | макариос | 2 |
379
+ | 6 | эроика | 2 |
380
+ | 7 | скирринг | 2 |
381
+ | 8 | зигуинчор | 2 |
382
+ | 9 | зигуиншор | 2 |
383
+ | 10 | люксембургхо | 2 |
384
 
385
  ### Zipf's Law Analysis
386
 
387
  | Metric | Value |
388
  |--------|-------|
389
+ | Zipf Coefficient | 1.8633 |
390
+ | R² (Goodness of Fit) | 0.948539 |
391
  | Adherence Quality | **excellent** |
392
 
393
  ### Coverage Analysis
394
 
395
  | Top N Words | Coverage |
396
  |-------------|----------|
397
+ | Top 100 | 41.8% |
398
+ | Top 1,000 | 83.4% |
399
+ | Top 5,000 | 96.8% |
400
+ | Top 10,000 | 97.8% |
401
 
402
  ### Key Findings
403
 
404
+ - **Zipf Compliance:** R²=0.9485 indicates excellent adherence to Zipf's law
405
+ - **High Frequency Dominance:** Top 100 words cover 41.8% of corpus
406
+ - **Long Tail:** 228,347 words needed for remaining 2.2% coverage
407
 
408
  ---
409
  ## 5. Word Embeddings Evaluation
 
419
 
420
  ### 5.1 Cross-Lingual Alignment
421
 
422
+ ![Alignment Quality](visualizations/embedding_alignment_quality.png)
423
+
424
+ ![Multilingual t-SNE](visualizations/embedding_tsne_multilingual.png)
425
 
426
 
427
  ### 5.2 Model Comparison
428
 
429
  | Model | Dimension | Isotropy | Semantic Density | Alignment R@1 | Alignment R@10 |
430
  |-------|-----------|----------|------------------|---------------|----------------|
431
+ | **mono_32d** | 32 | 0.8747 | 0.3629 | N/A | N/A |
432
+ | **mono_64d** | 64 | 0.8592 | 0.2868 | N/A | N/A |
433
+ | **mono_128d** | 128 | 0.7998 | 0.2691 | N/A | N/A |
434
+ | **aligned_32d** | 32 | 0.8747 🏆 | 0.3562 | 0.0120 | 0.0960 |
435
+ | **aligned_64d** | 64 | 0.8592 | 0.3007 | 0.0320 | 0.2180 |
436
+ | **aligned_128d** | 128 | 0.7998 | 0.2615 | 0.1100 | 0.3620 |
437
 
438
  ### Key Findings
439
 
440
+ - **Best Isotropy:** aligned_32d with 0.8747 (more uniform distribution)
441
+ - **Semantic Density:** Average pairwise similarity of 0.3062. Lower values indicate better semantic separation.
442
+ - **Alignment Quality:** Aligned models achieve up to 11.0% R@1 in cross-lingual retrieval.
443
  - **Recommendation:** 128d aligned for best cross-lingual performance
444
 
445
  ---
446
  ## 6. Morphological Analysis (Experimental)
447
 
 
 
448
  This section presents an automated morphological analysis derived from the statistical divergence between word-level and subword-level models. By analyzing where subword predictability spikes and where word-level coverage fails, we can infer linguistic structures without supervised data.
449
 
450
  ### 6.1 Productivity & Complexity
451
 
452
  | Metric | Value | Interpretation | Recommendation |
453
  |--------|-------|----------------|----------------|
454
+ | Productivity Index | **5.000** | High morphological productivity | Reliable analysis |
455
+ | Idiomaticity Gap | **0.335** | High formulaic/idiomatic content | - |
456
 
457
  ### 6.2 Affix Inventory (Productive Units)
458
 
 
461
  #### Productive Prefixes
462
  | Prefix | Examples |
463
  |--------|----------|
464
+ | `-ка` | каркаусь, кассагумахи, кафка |
465
+ | `-ко` | костровскан, коховка, колумбехь |
 
466
 
467
  #### Productive Suffixes
468
  | Suffix | Examples |
469
  |--------|----------|
470
+ | `-а` | ледара, жиховка, масленка |
471
+ | `-н` | галийн, кувшин, самодийн |
472
+ | `-о` | белшево, санторо, эрцо |
473
+ | `-ан` | тӏаьрсиган, менделеевскан, костровскан |
474
+ | `-во` | белшево, миллерово, горяново |
475
+ | `-ка` | жиховка, масленка, раковка |
476
+ | `-ово` | миллерово, горяново, атынаково |
477
+ | `-ки` | недниковски, новокубански, ибараки |
478
 
479
  ### 6.3 Bound Stems (Lexical Roots)
480
 
 
482
 
483
  | Stem | Cohesion | Substitutability | Examples |
484
  |------|----------|------------------|----------|
485
+ | `архо` | 2.00x | 121 contexts | архон, лархо, тархо |
486
+ | `исто` | 1.91x | 130 contexts | мисто, чисто, исток |
487
+ | `галд` | 2.88x | 16 contexts | галда, галдо, галдун |
488
+ | `ргаш` | 2.28x | 34 contexts | ургаш, воргаш, мургаш |
489
+ | `харх` | 2.14x | 41 contexts | йахарх, хархув, мухарх |
490
+ | `икин` | 1.84x | 62 contexts | викин, рикин, бикин |
491
+ | `халл` | 1.55x | 92 contexts | халле, халль, халла |
492
+ | `рхой` | 2.30x | 19 contexts | лархой, сурхой, ахархой |
493
+ | `лгал` | 2.36x | 17 contexts | билгал, билгало, билгала |
494
+ | `игаш` | 2.34x | 17 contexts | бигаш, цигаш, эхигаш |
495
+ | `етти` | 1.73x | 42 contexts | бетти, нетти, петтит |
496
+ | `ттиг` | 1.96x | 25 contexts | меттиг, гаттиг, ме́ттиг |
497
 
498
  ### 6.4 Affix Compatibility (Co-occurrence)
499
 
 
501
 
502
  | Prefix | Suffix | Frequency | Examples |
503
  |--------|--------|-----------|----------|
504
+ | `-ко` | `-а` | 44 words | комната, колохта |
505
+ | `-ка` | `-о` | 40 words | кастелларо, карманково |
506
+ | `-ка` | `-а` | 38 words | казчана, кажа |
507
+ | `-ко` | `-о` | 35 words | корково, кощейково |
508
+ | `-ка` | `-н` | 27 words | кассон, капланецкан |
509
+ | `-ко` | `-н` | 23 words | конкистадоран, коюнлун |
510
+ | `-ко` | `-во` | 17 words | корково, кощейково |
511
+ | `-ка` | `-во` | 16 words | карманково, каптырево |
512
+ | `-ка` | `-ан` | 15 words | капланецкан, каштан |
513
+ | `-ко` | `-ан` | 13 words | конкистадоран, котован |
514
 
515
  ### 6.5 Recursive Morpheme Segmentation
516
 
 
518
 
519
  | Word | Suggested Split | Confidence | Stem |
520
  |------|-----------------|------------|------|
521
+ | евдокимовски | **`евдокимовс-ки`** | 4.5 | `евдокимовс` |
522
+ | заказникан | **`заказник-ан`** | 4.5 | `заказник` |
523
+ | череповецан | **`череповец-ан`** | 4.5 | `череповец` |
524
+ | господиново | **`господин-ово`** | 4.5 | `господин` |
525
+ | вайнахана | **`вайнаха-на`** | 4.5 | `вайнаха` |
526
+ | воротынскан | **`воротынск-ан`** | 4.5 | `воротынск` |
527
+ | кинофильман | **`кинофильм-ан`** | 4.5 | `кинофильм` |
528
+ | дийцаршна | **`дийцарш-на`** | 4.5 | `дийцарш` |
529
+ | театрашка | **`театраш-ка`** | 4.5 | `театраш` |
530
+ | федотован | **`федотов-ан`** | 4.5 | `федотов` |
531
+ | веселовка | **`веселов-ка`** | 4.5 | `веселов` |
532
+ | маядыково | **`маядык-ово`** | 4.5 | `маядык` |
533
+ | ходоровка | **`ходоров-ка`** | 4.5 | `ходоров` |
534
+ | новиковски | **`новиковс-ки`** | 4.5 | `новиковс` |
535
+ | меженашна | **`меженаш-на`** | 4.5 | `меженаш` |
536
 
537
  ### 6.6 Linguistic Interpretation
538
 
539
  > **Automated Insight:**
540
+ The language Chechen shows high morphological productivity. The subword models are significantly more efficient than word models, suggesting a rich system of affixation or compounding.
541
+
542
+ > **Note on Idiomaticity:** The high Idiomaticity Gap suggests a large number of frequent multi-word expressions or formulaic sequences that are statistically distinct from their component parts.
543
 
544
  ---
545
  ## 7. Summary & Recommendations
 
550
 
551
  | Component | Recommended | Rationale |
552
  |-----------|-------------|-----------|
553
+ | Tokenizer | **64k BPE** | Best compression (3.74x) |
554
+ | N-gram | **2-gram** | Lowest perplexity (435) |
555
+ | Markov | **Context-4** | Highest predictability (96.3%) |
556
  | Embeddings | **100d** | Balanced semantic capture and isotropy |
557
 
558
 
 
766
  ---
767
  *Generated by Wikilangs Models Pipeline*
768
 
769
+ *Report Date: 2026-01-03 20:55:32*
models/embeddings/aligned/ce_128d.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:804babe867ee021ce44c607b7457a28540a5895dfd149f5bbe5a18e3f3169fae
3
+ size 1118769088
models/embeddings/aligned/ce_128d.meta.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"lang": "ce", "dim": 128, "max_seq_len": 512, "is_aligned": true}
models/embeddings/aligned/ce_128d.projection.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9ad86d291c49412509312a81414e26a38ae899866004f0b0ed8689fcb8f3cc79
3
+ size 65664
models/embeddings/aligned/ce_128d_metadata.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "language": "ce",
3
+ "dimension": 128,
4
+ "version": "aligned",
5
+ "hub_language": "en",
6
+ "seed_vocab_size": 4892,
7
+ "vocab_size": 90375
8
+ }
models/embeddings/aligned/ce_32d.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d3fef0e3ac299eaf7fc872e53e0b3987742e57372c805b348d744c4b5805039
3
+ size 281361088
models/embeddings/aligned/ce_32d.meta.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"lang": "ce", "dim": 32, "max_seq_len": 512, "is_aligned": true}
models/embeddings/aligned/ce_32d.projection.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b6fda6bb36357faa948730cc0cf573ffd5f12adab21ea85d197c0454d34ae13
3
+ size 4224
models/embeddings/aligned/ce_32d_metadata.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "language": "ce",
3
+ "dimension": 32,
4
+ "version": "aligned",
5
+ "hub_language": "en",
6
+ "seed_vocab_size": 4892,
7
+ "vocab_size": 90375
8
+ }
models/embeddings/aligned/ce_64d.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f4ca7e1a4f850e490603f7ae8f624cf64d83cccc57d820c6d1271612b3509eae
3
+ size 560497088
models/embeddings/aligned/ce_64d.meta.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"lang": "ce", "dim": 64, "max_seq_len": 512, "is_aligned": true}
models/embeddings/aligned/ce_64d.projection.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7fcddc9c3d445d47e337bdaa7647336d97192a2c6f5a231183424371f81364f
3
+ size 16512
models/embeddings/aligned/ce_64d_metadata.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "language": "ce",
3
+ "dimension": 64,
4
+ "version": "aligned",
5
+ "hub_language": "en",
6
+ "seed_vocab_size": 4892,
7
+ "vocab_size": 90375
8
+ }
models/embeddings/monolingual/ce_128d.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:552e31d70a010dcf9ef87e857ff88199b6929bbb6fb3bcdccca9386585c7aa73
3
- size 1106869199
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:804babe867ee021ce44c607b7457a28540a5895dfd149f5bbe5a18e3f3169fae
3
+ size 1118769088
models/embeddings/monolingual/ce_128d_metadata.json CHANGED
@@ -11,5 +11,5 @@
11
  "encoding_method": "rope",
12
  "dim": 128
13
  },
14
- "vocab_size": 79041
15
  }
 
11
  "encoding_method": "rope",
12
  "dim": 128
13
  },
14
+ "vocab_size": 90375
15
  }
models/embeddings/monolingual/ce_32d.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5102b3e58f419aba998033902e9953b5363500654754d4e63832e110651c49fa
3
- size 278165711
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d3fef0e3ac299eaf7fc872e53e0b3987742e57372c805b348d744c4b5805039
3
+ size 281361088
models/embeddings/monolingual/ce_32d_metadata.json CHANGED
@@ -11,5 +11,5 @@
11
  "encoding_method": "rope",
12
  "dim": 32
13
  },
14
- "vocab_size": 79041
15
  }
 
11
  "encoding_method": "rope",
12
  "dim": 32
13
  },
14
+ "vocab_size": 90375
15
  }
models/embeddings/monolingual/ce_64d.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:78021fa809a68d2e2ee1a5da53c5c92c25afcb13d930f8165b5ca92116725dfd
3
- size 554400207
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f4ca7e1a4f850e490603f7ae8f624cf64d83cccc57d820c6d1271612b3509eae
3
+ size 560497088
models/embeddings/monolingual/ce_64d_metadata.json CHANGED
@@ -11,5 +11,5 @@
11
  "encoding_method": "rope",
12
  "dim": 64
13
  },
14
- "vocab_size": 79041
15
  }
 
11
  "encoding_method": "rope",
12
  "dim": 64
13
  },
14
+ "vocab_size": 90375
15
  }
models/subword_markov/ce_markov_ctx1_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9b662cf799690d78b190708472b117520cd7cbcad0ac633b1286d43e5c79ae3a
3
- size 117929
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c57a4d4ceea5f7fe12600bb28c41fad86be451a3e4dd7f8d4821e1f2f83d3eae
3
+ size 117962
models/subword_markov/ce_markov_ctx1_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "context_size": 1,
3
  "variant": "subword",
4
  "language": "ce",
5
- "unique_contexts": 1553,
6
- "total_transitions": 402142071
7
  }
 
2
  "context_size": 1,
3
  "variant": "subword",
4
  "language": "ce",
5
+ "unique_contexts": 1550,
6
+ "total_transitions": 490058478
7
  }
models/subword_markov/ce_markov_ctx2_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c4dfc6fd0f57c29113d3b3fec461c0764a0522c46a13186880ed52b9d6958571
3
- size 872325
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cce7d0440c1a5456afc909be02ef73aeb4b5fc823d5d54072d8c97d71df627c6
3
+ size 896409
models/subword_markov/ce_markov_ctx2_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "context_size": 2,
3
  "variant": "subword",
4
  "language": "ce",
5
- "unique_contexts": 14069,
6
- "total_transitions": 401528698
7
  }
 
2
  "context_size": 2,
3
  "variant": "subword",
4
  "language": "ce",
5
+ "unique_contexts": 14021,
6
+ "total_transitions": 489384727
7
  }
models/subword_markov/ce_markov_ctx3_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c3414e9127a5344c044f38d2218042344b271b5e8396bc6d4d72233e2b2d1118
3
- size 4229687
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a84f6e9c5476b3344964bbfbf972b02218fa2bbc42dd0d83b8be1017ae4748c5
3
+ size 4456718
models/subword_markov/ce_markov_ctx3_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "context_size": 3,
3
  "variant": "subword",
4
  "language": "ce",
5
- "unique_contexts": 103627,
6
- "total_transitions": 400915325
7
  }
 
2
  "context_size": 3,
3
  "variant": "subword",
4
  "language": "ce",
5
+ "unique_contexts": 103540,
6
+ "total_transitions": 488710976
7
  }
models/subword_markov/ce_markov_ctx4_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f130a9685a8aed9c6b457e355c462d4bb6bc97cd44b8ef634c042ea0133165d1
3
- size 15358547
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c5d0342e97c5a3a41a98188f773d55336f985ab4718a41306a8a2597b1ea68be
3
+ size 15743617
models/subword_markov/ce_markov_ctx4_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "context_size": 4,
3
  "variant": "subword",
4
  "language": "ce",
5
- "unique_contexts": 515118,
6
- "total_transitions": 400301952
7
  }
 
2
  "context_size": 4,
3
  "variant": "subword",
4
  "language": "ce",
5
+ "unique_contexts": 516039,
6
+ "total_transitions": 488037225
7
  }
models/subword_ngram/ce_2gram_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:461d6692fb3cde3c1a7fd56b30b64912a0c140583d19fdeca1497422c4effcb1
3
- size 97261
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0bb422efe1e8d6c87d00fcb556d9ebaf0d19e765218cf54607a698e8eb27175e
3
+ size 99026
models/subword_ngram/ce_2gram_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "n": 2,
3
  "variant": "subword",
4
  "language": "ce",
5
- "unique_ngrams": 6176,
6
- "total_ngrams": 402142071
7
  }
 
2
  "n": 2,
3
  "variant": "subword",
4
  "language": "ce",
5
+ "unique_ngrams": 6171,
6
+ "total_ngrams": 490058478
7
  }
models/subword_ngram/ce_3gram_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:58a82a7f67b113302603309f686885d9e1c8883bfbb49e51f83b18800eabd3e3
3
- size 816823
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c5d97f00b9951c8184f997befc14730e82e17bae96a9ec6de73b27243c3dca6
3
+ size 832316
models/subword_ngram/ce_3gram_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "n": 3,
3
  "variant": "subword",
4
  "language": "ce",
5
- "unique_ngrams": 58954,
6
- "total_ngrams": 401528698
7
  }
 
2
  "n": 3,
3
  "variant": "subword",
4
  "language": "ce",
5
+ "unique_ngrams": 59082,
6
+ "total_ngrams": 489384727
7
  }
models/subword_ngram/ce_4gram_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:47cefb2349ec2159ead2601f3247b831fb17076c78f2ad1df98bfeca85c04804
3
- size 4353133
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b5c6939fabfe098e3c087df323974dddd341335f35469a5fb05f9a54fd2337ee
3
+ size 4399891
models/subword_ngram/ce_4gram_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "n": 4,
3
  "variant": "subword",
4
  "language": "ce",
5
- "unique_ngrams": 337533,
6
- "total_ngrams": 400915325
7
  }
 
2
  "n": 4,
3
  "variant": "subword",
4
  "language": "ce",
5
+ "unique_ngrams": 339742,
6
+ "total_ngrams": 488710976
7
  }
models/subword_ngram/ce_5gram_subword.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:08a9e418d6cd4f29d15097039516b9809d8acf9222859e259a0a4fee934026d9
3
+ size 13224434
models/subword_ngram/ce_5gram_subword_metadata.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "n": 5,
3
+ "variant": "subword",
4
+ "language": "ce",
5
+ "unique_ngrams": 966556,
6
+ "total_ngrams": 488037225
7
+ }
models/tokenizer/ce_tokenizer_16k.model CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:47044bfafe2471fb7dd149ae56b1ee71a3dc0dae2187dde2da97d70536d94302
3
- size 583986
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01ad5c1196d4613f22784d2521687c198c888b7e7dd8a6649706e746984bf9f2
3
+ size 582744
models/tokenizer/ce_tokenizer_16k.vocab CHANGED
The diff for this file is too large to render. See raw diff
 
models/tokenizer/ce_tokenizer_32k.model CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4dc3aa08231d203aaef76058c47ffd13abd5516006e009bda38a089e8f521043
3
- size 941717
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e289f6fd8e9b238f0148774df190c869953e1555e6948ec6db0690eadf1fd5ce
3
+ size 945454
models/tokenizer/ce_tokenizer_32k.vocab CHANGED
The diff for this file is too large to render. See raw diff
 
models/tokenizer/ce_tokenizer_64k.model CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b0ca3910cc123379fbdb7db536fdc659e073f98693c0cdc2028daa98f77221fe
3
- size 1671632
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99cfc51be84fc25c0da941a5a4501091b7d2ac044665bf3f53817ceea5b8115a
3
+ size 1687271
models/tokenizer/ce_tokenizer_64k.vocab CHANGED
The diff for this file is too large to render. See raw diff
 
models/tokenizer/ce_tokenizer_8k.model CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6f2a3488af9a58357efbe0e4a03cc1777791f582cd49bc1b578b466f3c6fe09e
3
- size 409035
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad7d6f81cb0f634e0b57805278b1de80ab36729b4e1e14dd5ddac423f296422c
3
+ size 408272
models/tokenizer/ce_tokenizer_8k.vocab CHANGED
The diff for this file is too large to render. See raw diff
 
models/vocabulary/ce_vocabulary.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:53491c1be3ddba1539a45ab8004c1a5fe96048b27ea345ef740d3827b2c5eb4a
3
- size 3729004
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e4cb00df21ff5146979ebfe86cb61c1c26130bcb717a37bd3e60cd98f3341c11
3
+ size 3842393
models/vocabulary/ce_vocabulary_metadata.json CHANGED
@@ -1,17 +1,17 @@
1
  {
2
  "language": "ce",
3
- "vocabulary_size": 230774,
4
  "variant": "full",
5
  "statistics": {
6
- "type_token_ratio": 0.009492890208497928,
7
  "coverage": {
8
- "top_100": 0.4413509820362693,
9
- "top_1000": 0.8620556765599773,
10
- "top_5000": 0.9621095823063379,
11
- "top_10000": 0.9714254859934246
12
  },
13
- "hapax_count": 289712,
14
- "hapax_ratio": 0.5566182375702708,
15
- "total_documents": 613373
16
  }
17
  }
 
1
  {
2
  "language": "ce",
3
+ "vocabulary_size": 238347,
4
  "variant": "full",
5
  "statistics": {
6
+ "type_token_ratio": 0.007822593641512705,
7
  "coverage": {
8
+ "top_100": 0.4165497840379191,
9
+ "top_1000": 0.8303732763968381,
10
+ "top_5000": 0.9637552715646315,
11
+ "top_10000": 0.97350025474454
12
  },
13
+ "hapax_count": 288273,
14
+ "hapax_ratio": 0.5474023014697504,
15
+ "total_documents": 673751
16
  }
17
  }
models/word_markov/ce_markov_ctx1_word.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c3bb6f9d89f6d5115c2926090a43cce4d79d1f587ec3a35363d32b6e7597bef4
3
- size 26676852
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f073287368b50d266950b64f8f41a50996ae94389e99e453e6fd236f83850f3
3
+ size 28332972
models/word_markov/ce_markov_ctx1_word_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "context_size": 1,
3
  "variant": "word",
4
  "language": "ce",
5
- "unique_contexts": 520111,
6
- "total_transitions": 54215661
7
  }
 
2
  "context_size": 1,
3
  "variant": "word",
4
  "language": "ce",
5
+ "unique_contexts": 526205,
6
+ "total_transitions": 66646632
7
  }
models/word_markov/ce_markov_ctx2_word.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e50bac15d55d8209d7a94c5b47b626d9ec9efcfbc87ea034378579c8baae771a
3
- size 61192411
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac996dc8869c383c39c0fcdb3395b81e0c327ab8a0a27c2a17c5bebc89555de7
3
+ size 67997578
models/word_markov/ce_markov_ctx2_word_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "context_size": 2,
3
  "variant": "word",
4
  "language": "ce",
5
- "unique_contexts": 2019671,
6
- "total_transitions": 53602288
7
  }
 
2
  "context_size": 2,
3
  "variant": "word",
4
  "language": "ce",
5
+ "unique_contexts": 2194953,
6
+ "total_transitions": 65972881
7
  }