omarkamali commited on
Commit
72f5007
·
verified ·
1 Parent(s): 7caef94

Upload all models and assets for av (latest)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. README.md +232 -197
  3. models/embeddings/aligned/av_128d.bin +3 -0
  4. models/embeddings/aligned/av_128d.meta.json +1 -0
  5. models/embeddings/aligned/av_128d.projection.npy +3 -0
  6. models/embeddings/aligned/av_128d_metadata.json +8 -0
  7. models/embeddings/aligned/av_32d.bin +3 -0
  8. models/embeddings/aligned/av_32d.meta.json +1 -0
  9. models/embeddings/aligned/av_32d.projection.npy +3 -0
  10. models/embeddings/aligned/av_32d_metadata.json +8 -0
  11. models/embeddings/aligned/av_64d.bin +3 -0
  12. models/embeddings/aligned/av_64d.meta.json +1 -0
  13. models/embeddings/aligned/av_64d.projection.npy +3 -0
  14. models/embeddings/aligned/av_64d_metadata.json +8 -0
  15. models/embeddings/monolingual/av_128d.bin +2 -2
  16. models/embeddings/monolingual/av_128d_metadata.json +1 -1
  17. models/embeddings/monolingual/av_32d.bin +2 -2
  18. models/embeddings/monolingual/av_32d_metadata.json +1 -1
  19. models/embeddings/monolingual/av_64d.bin +2 -2
  20. models/embeddings/monolingual/av_64d_metadata.json +1 -1
  21. models/subword_markov/av_markov_ctx1_subword.parquet +2 -2
  22. models/subword_markov/av_markov_ctx1_subword_metadata.json +2 -2
  23. models/subword_markov/av_markov_ctx2_subword.parquet +2 -2
  24. models/subword_markov/av_markov_ctx2_subword_metadata.json +2 -2
  25. models/subword_markov/av_markov_ctx3_subword.parquet +2 -2
  26. models/subword_markov/av_markov_ctx3_subword_metadata.json +2 -2
  27. models/subword_markov/av_markov_ctx4_subword.parquet +2 -2
  28. models/subword_markov/av_markov_ctx4_subword_metadata.json +2 -2
  29. models/subword_ngram/av_2gram_subword.parquet +2 -2
  30. models/subword_ngram/av_2gram_subword_metadata.json +2 -2
  31. models/subword_ngram/av_3gram_subword.parquet +2 -2
  32. models/subword_ngram/av_3gram_subword_metadata.json +2 -2
  33. models/subword_ngram/av_4gram_subword.parquet +2 -2
  34. models/subword_ngram/av_4gram_subword_metadata.json +2 -2
  35. models/subword_ngram/av_5gram_subword.parquet +3 -0
  36. models/subword_ngram/av_5gram_subword_metadata.json +7 -0
  37. models/tokenizer/av_tokenizer_16k.model +2 -2
  38. models/tokenizer/av_tokenizer_16k.vocab +0 -0
  39. models/tokenizer/av_tokenizer_32k.model +2 -2
  40. models/tokenizer/av_tokenizer_32k.vocab +0 -0
  41. models/tokenizer/av_tokenizer_64k.model +2 -2
  42. models/tokenizer/av_tokenizer_64k.vocab +0 -0
  43. models/tokenizer/av_tokenizer_8k.model +2 -2
  44. models/tokenizer/av_tokenizer_8k.vocab +0 -0
  45. models/vocabulary/av_vocabulary.parquet +2 -2
  46. models/vocabulary/av_vocabulary_metadata.json +9 -9
  47. models/word_markov/av_markov_ctx1_word.parquet +2 -2
  48. models/word_markov/av_markov_ctx1_word_metadata.json +2 -2
  49. models/word_markov/av_markov_ctx2_word.parquet +2 -2
  50. models/word_markov/av_markov_ctx2_word_metadata.json +2 -2
.gitattributes CHANGED
@@ -39,3 +39,4 @@ visualizations/position_encoding_comparison.png filter=lfs diff=lfs merge=lfs -t
39
  visualizations/tsne_sentences.png filter=lfs diff=lfs merge=lfs -text
40
  visualizations/tsne_words.png filter=lfs diff=lfs merge=lfs -text
41
  visualizations/zipf_law.png filter=lfs diff=lfs merge=lfs -text
 
 
39
  visualizations/tsne_sentences.png filter=lfs diff=lfs merge=lfs -text
40
  visualizations/tsne_words.png filter=lfs diff=lfs merge=lfs -text
41
  visualizations/zipf_law.png filter=lfs diff=lfs merge=lfs -text
42
+ visualizations/embedding_tsne_multilingual.png filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,6 +1,6 @@
1
  ---
2
  language: av
3
- language_name: AV
4
  language_family: caucasian_northeast
5
  tags:
6
  - wikilangs
@@ -10,11 +10,21 @@ tags:
10
  - n-gram
11
  - markov
12
  - wikipedia
 
 
 
 
 
 
 
 
 
 
13
  - monolingual
14
  - family-caucasian_northeast
15
  license: mit
16
  library_name: wikilangs
17
- pipeline_tag: feature-extraction
18
  datasets:
19
  - omarkamali/wikipedia-monthly
20
  dataset_info:
@@ -23,20 +33,20 @@ dataset_info:
23
  metrics:
24
  - name: best_compression_ratio
25
  type: compression
26
- value: 4.697
27
  - name: best_isotropy
28
  type: isotropy
29
- value: 0.8716
30
  - name: vocabulary_size
31
  type: vocab
32
  value: 0
33
  generated: 2026-01-03
34
  ---
35
 
36
- # AV - Wikilangs Models
37
  ## Comprehensive Research Report & Full Ablation Study
38
 
39
- This repository contains NLP models trained and evaluated by Wikilangs, specifically on **AV** Wikipedia data.
40
  We analyze tokenizers, n-gram models, Markov chains, vocabulary statistics, and word embeddings.
41
 
42
  ## 📋 Repository Contents
@@ -60,7 +70,7 @@ We analyze tokenizers, n-gram models, Markov chains, vocabulary statistics, and
60
  - [3. Markov Chain Evaluation](#3-markov-chain-evaluation)
61
  - [4. Vocabulary Analysis](#4-vocabulary-analysis)
62
  - [5. Word Embeddings Evaluation](#5-word-embeddings-evaluation)
63
- - [6. Morphological Analysis (Experimental)](#6-morphological-analysis)
64
  - [7. Summary & Recommendations](#7-summary--recommendations)
65
  - [Metrics Glossary](#appendix-metrics-glossary--interpretation-guide)
66
  - [Visualizations Index](#visualizations-index)
@@ -80,47 +90,47 @@ We analyze tokenizers, n-gram models, Markov chains, vocabulary statistics, and
80
 
81
  | Vocab Size | Compression | Avg Token Len | UNK Rate | Total Tokens |
82
  |------------|-------------|---------------|----------|--------------|
83
- | **8k** | 3.636x | 3.64 | 0.0717% | 252,363 |
84
- | **16k** | 4.040x | 4.04 | 0.0797% | 227,147 |
85
- | **32k** | 4.391x | 4.40 | 0.0866% | 208,961 |
86
- | **64k** | 4.697x 🏆 | 4.70 | 0.0927% | 195,348 |
87
 
88
  ### Tokenization Examples
89
 
90
  Below are sample sentences tokenized with each vocabulary size:
91
 
92
- **Sample 1:** `Хъипчахъ () гъорлъе уна жибго Хъипчахъ росу. Гьеб росулъ гьабула . росаби`
93
 
94
  | Vocab | Tokens | Count |
95
  |-------|--------|-------|
96
- | 8k | `▁хъ ип ч ахъ ▁() ▁гъорлъе ▁уна ▁жибго ▁хъ ип ... (+9 more)` | 19 |
97
- | 16k | `▁хъ ип ч ахъ ▁() ▁гъорлъе ▁уна ▁жибго ▁хъ ип ... (+9 more)` | 19 |
98
- | 32k | `▁хъипчахъ ▁() ▁гъорлъе ▁уна ▁жибго ▁хъипчахъ ▁росу . ▁гьеб ▁росулъ ... (+3 more)` | 13 |
99
- | 64k | `▁хъипчахъ ▁() ▁гъорлъе ▁уна ▁жибго ▁хъипчахъ ▁росу . ▁гьеб ▁росулъ ... (+3 more)` | 13 |
100
 
101
- **Sample 2:** `26-абилеб июль грегорианияб календаралда рекъон къо (високоснияб соналъ свер...`
102
 
103
  | Vocab | Tokens | Count |
104
  |-------|--------|-------|
105
- | 8k | `▁ 2 6 - абилеб ▁июль ▁— ▁грегорианияб ▁календаралда ▁рекъон ... (+19 more)` | 29 |
106
- | 16k | `▁ 2 6 - абилеб ▁июль ▁— ▁грегорианияб ▁календаралда ▁рекъон ... (+19 more)` | 29 |
107
- | 32k | `▁ 2 6 - абилеб ▁июль ▁— ▁грегорианияб ▁календаралда ▁рекъон ... (+19 more)` | 29 |
108
- | 64k | `▁ 2 6 - абилеб ▁июль ▁— ▁грегорианияб ▁календаралда ▁рекъон ... (+19 more)` | 29 |
109
 
110
- **Sample 3:** `() ккола Билкан районалда гъорлъе унеб росу. росаби`
111
 
112
  | Vocab | Tokens | Count |
113
  |-------|--------|-------|
114
- | 8k | `▁() ▁ккола ▁билкан ▁районалда ▁гъорлъе ▁унеб ▁росу . ▁росаби` | 9 |
115
- | 16k | `▁() ▁ккола ▁билкан ▁районалда ▁гъорлъе ▁унеб ▁росу . ▁росаби` | 9 |
116
- | 32k | `▁() ▁ккола ▁билкан ▁районалда ▁гъорлъе ▁унеб ▁росу . ▁росаби` | 9 |
117
- | 64k | `▁() ▁ккола ▁билкан ▁районалда ▁гъорлъе ▁унеб ▁росу . ▁росаби` | 9 |
118
 
119
 
120
  ### Key Findings
121
 
122
- - **Best Compression:** 64k achieves 4.697x compression
123
- - **Lowest UNK Rate:** 8k with 0.0717% unknown tokens
124
  - **Trade-off:** Larger vocabularies improve compression but increase model size
125
  - **Recommendation:** 32k vocabulary provides optimal balance for production use
126
 
@@ -137,12 +147,14 @@ Below are sample sentences tokenized with each vocabulary size:
137
 
138
  | N-gram | Variant | Perplexity | Entropy | Unique N-grams | Top-100 Coverage | Top-1000 Coverage |
139
  |--------|---------|------------|---------|----------------|------------------|-------------------|
140
- | **2-gram** | Word | 3,247 | 11.66 | 6,413 | 22.5% | 54.2% |
141
- | **2-gram** | Subword | 428 🏆 | 8.74 | 4,133 | 57.8% | 96.7% |
142
- | **3-gram** | Word | 2,834 | 11.47 | 6,427 | 26.0% | 57.0% |
143
- | **3-gram** | Subword | 3,424 | 11.74 | 28,949 | 23.6% | 62.9% |
144
- | **4-gram** | Word | 8,629 | 13.07 | 17,392 | 16.9% | 37.2% |
145
- | **4-gram** | Subword | 15,875 | 13.95 | 119,337 | 12.4% | 36.8% |
 
 
146
 
147
  ### Top 5 N-grams by Size
148
 
@@ -150,68 +162,88 @@ Below are sample sentences tokenized with each vocabulary size:
150
 
151
  | Rank | N-gram | Count |
152
  |------|--------|-------|
153
- | 1 | `росу буго` | 509 |
154
- | 2 | `лъугьа бахъинал` | 496 |
155
- | 3 | `география росу` | 461 |
156
- | 4 | `цо цо` | 455 |
157
- | 5 | `of the` | 441 |
158
 
159
  **3-grams (Word):**
160
 
161
  | Rank | N-gram | Count |
162
  |------|--------|-------|
163
- | 1 | `география росу буго` | 448 |
164
- | 2 | `лъугьа бахъинал гьаруна` | 368 |
165
- | 3 | `бахъинал гьаруна хвана` | 358 |
166
- | 4 | `байрамал лъугьа бахъинал` | 353 |
167
- | 5 | `гьаруна хвана ишараби` | 352 |
168
 
169
  **4-grams (Word):**
170
 
171
  | Rank | N-gram | Count |
172
  |------|--------|-------|
173
- | 1 | `лъугьа бахъинал гьаруна х��ана` | 358 |
174
- | 2 | `байрамал лъугьа бахъинал гьаруна` | 352 |
175
- | 3 | `къо байрамал лъугьа бахъинал` | 351 |
176
- | 4 | `бахъинал гьаруна хвана ишараби` | 349 |
177
- | 5 | `демография ккола моноэтникияб авар` | 329 |
 
 
 
 
 
 
 
 
 
 
178
 
179
  **2-grams (Subword):**
180
 
181
  | Rank | N-gram | Count |
182
  |------|--------|-------|
183
- | 1 | `а л` | 82,724 |
184
- | 2 | `л _` | 63,062 |
185
- | 3 | `л ъ` | 52,236 |
186
- | 4 | `а _` | 52,185 |
187
- | 5 | `у л` | 49,900 |
188
 
189
  **3-grams (Subword):**
190
 
191
  | Rank | N-gram | Count |
192
  |------|--------|-------|
193
- | 1 | `у л _` | 33,240 |
194
- | 2 | `л ъ у` | 30,603 |
195
- | 3 | `ъ у л` | 25,387 |
196
- | 4 | `а л ъ` | 23,574 |
197
- | 5 | `_ г ь` | 22,295 |
198
 
199
  **4-grams (Subword):**
200
 
201
  | Rank | N-gram | Count |
202
  |------|--------|-------|
203
- | 1 | `л ъ у л` | 23,988 |
204
- | 2 | `ъ у л _` | 21,518 |
205
- | 3 | `а л ъ у` | 16,083 |
206
- | 4 | `а л д а` | 11,383 |
207
- | 5 | `_ г ь е` | 11,094 |
 
 
 
 
 
 
 
 
 
 
208
 
209
 
210
  ### Key Findings
211
 
212
- - **Best Perplexity:** 2-gram (subword) with 428
213
  - **Entropy Trend:** Decreases with larger n-grams (more predictable)
214
- - **Coverage:** Top-1000 patterns cover ~37% of corpus
215
  - **Recommendation:** 4-gram or 5-gram for best predictive performance
216
 
217
  ---
@@ -227,14 +259,14 @@ Below are sample sentences tokenized with each vocabulary size:
227
 
228
  | Context | Variant | Avg Entropy | Perplexity | Branching Factor | Unique Contexts | Predictability |
229
  |---------|---------|-------------|------------|------------------|-----------------|----------------|
230
- | **1** | Word | 0.6602 | 1.580 | 3.57 | 91,234 | 34.0% |
231
- | **1** | Subword | 1.1781 | 2.263 | 9.32 | 1,145 | 0.0% |
232
- | **2** | Word | 0.1256 | 1.091 | 1.21 | 324,656 | 87.4% |
233
- | **2** | Subword | 0.9990 | 1.999 | 5.68 | 10,664 | 0.1% |
234
- | **3** | Word | 0.0281 | 1.020 | 1.04 | 392,645 | 97.2% |
235
- | **3** | Subword | 0.7935 | 1.733 | 3.66 | 60,534 | 20.6% |
236
- | **4** | Word | 0.0114 🏆 | 1.008 | 1.02 | 406,500 | 98.9% |
237
- | **4** | Subword | 0.5614 | 1.476 | 2.33 | 221,628 | 43.9% |
238
 
239
  ### Generated Text Samples (Word-based)
240
 
@@ -242,27 +274,27 @@ Below are text samples generated from each word-based Markov chain model:
242
 
243
  **Context Size 1:**
244
 
245
- 1. `ва бищун хирияб рокьуе рецц гьабун росулӏ историкияб кьучӏги x гіасру ккола гъуниб округалъул цӏигӏу...`
246
- 2. `буго гьединго гьолокьги бекьизабун буго шумеразул ги къасимехалъ бачӏингун чагӏазда макьаби контрола...`
247
- 3. `ккола гьижрияб соналъул 29 август цояб гьелъул буго гьанже батизе бегьула 1 гуржистаналъул бищун бор...`
248
 
249
  **Context Size 2:**
250
 
251
- 1. `росу буго лъарагӏлъиялда хасавхъала мухъалда хасавхъалаялдаса 24 км ялъ жанубиябгин бакъбаккудехун а...`
252
- 2. `лъугьа бахъинал гьаруна хвана ишараби мугъчӏваял гь балагье хіужаби иццал адабият гіурус маціалда бу...`
253
- 3. `география росу буго лъарагӏлъиялда дибирилросу мухъалда дибирилросуялдаса 10 км ялъ шималиябгин бакъ...`
254
 
255
  **Context Size 3:**
256
 
257
- 1. `география росу буго мухъалъул марказ хӏебдаса 15 километралъ бакъбаккудехун халкъ мугъчӏваял регӏела...`
258
- 2. `лъугьа бахъинал гьаруна хвана ишараби мугъчӏваял гь балагье трактат адабият тайпаби изданиял`
259
- 3. `байрамал лъугьа бахъинал гьаруна хвана ишараби мугъчӏваял гь балагье трактат адабият тайпаби издания...`
260
 
261
  **Context Size 4:**
262
 
263
- 1. `байрамал лъугьа бахъинал гьаруна хвана ишараби мугъчӏваял гь балагье`
264
- 2. `къо байрамал лъугьа бахъинал гьаруна хвана ишараби мугъчӏваял гь балагье`
265
- 3. `лъугьа бахъинал гьаруна хвана ишараби мугъчӏваял гь балагье`
266
 
267
 
268
  ### Generated Text Samples (Subword-based)
@@ -271,34 +303,34 @@ Below are text samples generated from each subword-based Markov chain model:
271
 
272
  **Context Size 1:**
273
 
274
- 1. `_iv–_гъугіо_usth`
275
- 2. `абиза_и._д_2%_ке`
276
- 3. `л_—_ilissoldan_|`
277
 
278
  **Context Size 2:**
279
 
280
- 1. `алъахъану_кконие_`
281
- 2. `л_адекалабаяракӏ)`
282
- 3. `лъул_кіаялъул_на_`
283
 
284
  **Context Size 3:**
285
 
286
- 1. `ул_къотӏагораний_в`
287
- 2. `лъул_реал_карт_гӏа`
288
- 3. `ъулгун_ар-рип_хъал`
289
 
290
  **Context Size 4:**
291
 
292
- 1. `лъул_хіалалда_чӏали`
293
- 2. `ъул_большая_и_казбе`
294
- 3. `алъул_руго_9:_мугъч`
295
 
296
 
297
  ### Key Findings
298
 
299
- - **Best Predictability:** Context-4 (word) with 98.9% predictability
300
  - **Branching Factor:** Decreases with context size (more deterministic)
301
- - **Memory Trade-off:** Larger contexts require more storage (221,628 contexts)
302
  - **Recommendation:** Context-3 or Context-4 for text generation
303
 
304
  ---
@@ -314,64 +346,64 @@ Below are text samples generated from each subword-based Markov chain model:
314
 
315
  | Metric | Value |
316
  |--------|-------|
317
- | Vocabulary Size | 34,392 |
318
- | Total Tokens | 405,867 |
319
- | Mean Frequency | 11.80 |
320
  | Median Frequency | 3 |
321
- | Frequency Std Dev | 73.46 |
322
 
323
  ### Most Common Words
324
 
325
  | Rank | Word | Frequency |
326
  |------|------|-----------|
327
- | 1 | ва | 7,245 |
328
- | 2 | буго | 5,074 |
329
- | 3 | ккола | 2,830 |
330
- | 4 | бугеб | 2,699 |
331
- | 5 | гьеб | 2,222 |
332
- | 6 | росу | 2,175 |
333
- | 7 | мухъалъул | 2,030 |
334
- | 8 | цо | 1,833 |
335
- | 9 | the | 1,815 |
336
- | 10 | соналъ | 1,799 |
337
 
338
  ### Least Common Words (from vocabulary)
339
 
340
  | Rank | Word | Frequency |
341
  |------|------|-----------|
342
- | 1 | долтул | 2 |
343
- | 2 | кӏалалдаса | 2 |
344
- | 3 | шаргі | 2 |
345
- | 4 | харитӏун | 2 |
346
- | 5 | луткунги | 2 |
347
- | 6 | беглъуда | 2 |
348
- | 7 | къацӏар | 2 |
349
- | 8 | мичегь | 2 |
350
- | 9 | хъурукал | 2 |
351
- | 10 | мягьле | 2 |
352
 
353
  ### Zipf's Law Analysis
354
 
355
  | Metric | Value |
356
  |--------|-------|
357
- | Zipf Coefficient | 0.9506 |
358
- | R² (Goodness of Fit) | 0.993368 |
359
  | Adherence Quality | **excellent** |
360
 
361
  ### Coverage Analysis
362
 
363
  | Top N Words | Coverage |
364
  |-------------|----------|
365
- | Top 100 | 22.5% |
366
- | Top 1,000 | 50.8% |
367
- | Top 5,000 | 73.6% |
368
- | Top 10,000 | 83.3% |
369
 
370
  ### Key Findings
371
 
372
- - **Zipf Compliance:** R²=0.9934 indicates excellent adherence to Zipf's law
373
- - **High Frequency Dominance:** Top 100 words cover 22.5% of corpus
374
- - **Long Tail:** 24,392 words needed for remaining 16.7% coverage
375
 
376
  ---
377
  ## 5. Word Embeddings Evaluation
@@ -387,37 +419,40 @@ Below are text samples generated from each subword-based Markov chain model:
387
 
388
  ### 5.1 Cross-Lingual Alignment
389
 
390
- > *Note: Multilingual alignment visualization not available for this language.*
 
 
391
 
392
 
393
  ### 5.2 Model Comparison
394
 
395
  | Model | Dimension | Isotropy | Semantic Density | Alignment R@1 | Alignment R@10 |
396
  |-------|-----------|----------|------------------|---------------|----------------|
397
- | **mono_32d** | 32 | 0.8716 🏆 | 0.3278 | N/A | N/A |
398
- | **mono_64d** | 64 | 0.7240 | 0.2821 | N/A | N/A |
399
- | **mono_128d** | 128 | 0.2461 | 0.2702 | N/A | N/A |
 
 
 
400
 
401
  ### Key Findings
402
 
403
- - **Best Isotropy:** mono_32d with 0.8716 (more uniform distribution)
404
- - **Semantic Density:** Average pairwise similarity of 0.2934. Lower values indicate better semantic separation.
405
- - **Alignment Quality:** No aligned models evaluated in this run.
406
  - **Recommendation:** 128d aligned for best cross-lingual performance
407
 
408
  ---
409
  ## 6. Morphological Analysis (Experimental)
410
 
411
- > ⚠️ **Warning:** This language shows low morphological productivity. The statistical signals used for this analysis may be noisy or less reliable than for morphologically rich languages.
412
-
413
  This section presents an automated morphological analysis derived from the statistical divergence between word-level and subword-level models. By analyzing where subword predictability spikes and where word-level coverage fails, we can infer linguistic structures without supervised data.
414
 
415
  ### 6.1 Productivity & Complexity
416
 
417
  | Metric | Value | Interpretation | Recommendation |
418
  |--------|-------|----------------|----------------|
419
- | Productivity Index | **0.000** | Low morphological productivity | ⚠️ Likely unreliable |
420
- | Idiomaticity Gap | **-1.000** | Low formulaic content | - |
421
 
422
  ### 6.2 Affix Inventory (Productive Units)
423
 
@@ -426,21 +461,19 @@ These are the most productive prefixes and suffixes identified by sampling the v
426
  #### Productive Prefixes
427
  | Prefix | Examples |
428
  |--------|----------|
429
- | `-гь` | гьамчукъотӏи, гьечӏони, гьаркьалги |
430
- | `-гӏ` | гӏасру, гӏаракъи, гӏелмуялде |
431
- | `-ма` | материялъул, машгьадалда, магіарухъ |
432
 
433
  #### Productive Suffixes
434
  | Suffix | Examples |
435
  |--------|----------|
436
- | `-л` | сабабал, материялъул, рикӏкӏиналъул |
437
- | `-а` | елена, современника, шагьаралда |
438
- | `-ул` | материялъул, рикӏкӏиналъул, хӏажиевасул |
439
- | `-да` | шагьаралда, машгьадалда, флорида |
440
- | `-ъул` | материялъул, рикӏкӏиналъул, медициналъул |
441
- | `-лъул` | материялъул, рикӏкӏиналъул, медициналъул |
442
- | `-ал` | сабабал, кьурахарал, къезавидал |
443
- | `-лда` | шагьаралда, машгьадалда, борталда |
444
 
445
  ### 6.3 Bound Stems (Lexical Roots)
446
 
@@ -448,18 +481,18 @@ Bound stems are high-frequency subword units that are semantically cohesive but
448
 
449
  | Stem | Cohesion | Substitutability | Examples |
450
  |------|----------|------------------|----------|
451
- | `алъу` | 1.82x | 100 contexts | алъул, далъун, ралъуе |
452
- | `агьа` | 1.89x | 59 contexts | дагьа, багьа, загьаб |
453
- | `ялъу` | 2.04x | 43 contexts | ялъул, аялъул, ялъуни |
454
- | `ьабу` | 2.16x | 29 contexts | гьабу, гьабун, кьабун |
455
- | `иялъ` | 1.96x | 36 contexts | абиялъе, химиялъ, лъиялъе |
456
- | `иялд` | 1.83x | 35 contexts | сиялда, азиялда, азиялде |
457
- | `анал` | 1.42x | 70 contexts | данал, канал, ханал |
458
- | `ралъ` | 1.49x | 53 contexts | ралъад, ралъуе, хералъ |
459
- | `буге` | 2.00x | 17 contexts | бугел, бугез, бугеб |
460
- | `иста` | 2.02x | 16 contexts | систан, христа, лазистан |
461
- | `лдас` | 2.06x | 15 contexts | лдаса, алдаса, ялдаса |
462
- | `азда` | 1.62x | 32 contexts | мазда, раздан, ишазда |
463
 
464
  ### 6.4 Affix Compatibility (Co-occurrence)
465
 
@@ -467,16 +500,16 @@ This table shows which prefixes and suffixes most frequently co-occur on the sam
467
 
468
  | Prefix | Suffix | Frequency | Examples |
469
  |--------|--------|-----------|----------|
470
- | `-гь` | `-л` | 44 words | гьавамухъал, гьудулзабазул |
471
- | `-ма` | `-л` | 40 words | мажлисалъул, маринил |
472
- | `-гӏ` | `-л` | 38 words | гӏурусазул, гӏалиевалъул |
473
- | `-ма` | `-а` | 35 words | макъалоялда, малъана |
474
- | `-гӏ` | `-а` | 29 words | гӏуцӏиялда, гӏодула |
475
- | `-гь` | `-а` | 28 words | гьада, гьала |
476
- | `-гӏ` | `-ул` | 25 words | гӏурусазул, гӏалиевалъул |
477
- | `-гь` | `-ул` | 24 words | гьудулзабазул, гьезул |
478
- | `-ма` | `-ул` | 21 words | мажлисалъул, мактабалъул |
479
- | `-ма` | `-да` | 16 words | макъалоялда, макъалаялда |
480
 
481
  ### 6.5 Recursive Morpheme Segmentation
482
 
@@ -484,26 +517,28 @@ Using **Recursive Hierarchical Substitutability**, we decompose complex words in
484
 
485
  | Word | Suggested Split | Confidence | Stem |
486
  |------|-----------------|------------|------|
487
- | руччабаздаги | **`руччабаз-да-ги`** | 6.0 | `руччабаз` |
488
- | хронологиялъул | **`хронология-лъул`** | 4.5 | `хронология` |
489
- | теориялда | **`теория-лда`** | 4.5 | `теория` |
490
- | къавмазул | **`къавмаз-ул`** | 4.5 | `къавмаз` |
491
- | къанагӏатал | **`къанагӏат-ал`** | 4.5 | `къанагӏат` |
492
- | групалъул | **`група-лъул`** | 4.5 | `група` |
493
- | ракьалъул | **`ракьа-лъул`** | 4.5 | `ракьа` |
494
- | такрарлъул | **`такрар-лъул`** | 4.5 | `такрар` |
495
- | алвеолариялги | **`алвеолариял-ги`** | 4.5 | `алвеолариял` |
496
- | европалъул | **`европа-лъул`** | 4.5 | `европа` |
497
- | гьабулаго | **`гь-абулаго`** | 4.5 | `абулаго` |
498
- | рахъалъги | **`рахъалъ-ги`** | 4.5 | `рахъалъ` |
499
- | пассажирги | **`пассажир-ги`** | 4.5 | `пассажир` |
500
- | партиялъул | **`партия-лъул`** | 4.5 | `партия` |
501
- | оппозициялъул | **`оппозиция-лъул`** | 4.5 | `оппозиция` |
502
 
503
  ### 6.6 Linguistic Interpretation
504
 
505
  > **Automated Insight:**
506
- The language AV appears to be more isolating or has a highly fixed vocabulary. Word-level models perform nearly as well as subword models, indicating fewer productive morphological processes.
 
 
507
 
508
  ---
509
  ## 7. Summary & Recommendations
@@ -514,9 +549,9 @@ The language AV appears to be more isolating or has a highly fixed vocabulary. W
514
 
515
  | Component | Recommended | Rationale |
516
  |-----------|-------------|-----------|
517
- | Tokenizer | **64k BPE** | Best compression (4.70x) |
518
- | N-gram | **2-gram** | Lowest perplexity (428) |
519
- | Markov | **Context-4** | Highest predictability (98.9%) |
520
  | Embeddings | **100d** | Balanced semantic capture and isotropy |
521
 
522
 
@@ -730,4 +765,4 @@ MIT License - Free for academic and commercial use.
730
  ---
731
  *Generated by Wikilangs Models Pipeline*
732
 
733
- *Report Date: 2026-01-03 05:23:28*
 
1
  ---
2
  language: av
3
+ language_name: Avar
4
  language_family: caucasian_northeast
5
  tags:
6
  - wikilangs
 
10
  - n-gram
11
  - markov
12
  - wikipedia
13
+ - feature-extraction
14
+ - sentence-similarity
15
+ - tokenization
16
+ - n-grams
17
+ - markov-chain
18
+ - text-mining
19
+ - fasttext
20
+ - babelvec
21
+ - vocabulous
22
+ - vocabulary
23
  - monolingual
24
  - family-caucasian_northeast
25
  license: mit
26
  library_name: wikilangs
27
+ pipeline_tag: text-generation
28
  datasets:
29
  - omarkamali/wikipedia-monthly
30
  dataset_info:
 
33
  metrics:
34
  - name: best_compression_ratio
35
  type: compression
36
+ value: 4.685
37
  - name: best_isotropy
38
  type: isotropy
39
+ value: 0.8604
40
  - name: vocabulary_size
41
  type: vocab
42
  value: 0
43
  generated: 2026-01-03
44
  ---
45
 
46
+ # Avar - Wikilangs Models
47
  ## Comprehensive Research Report & Full Ablation Study
48
 
49
+ This repository contains NLP models trained and evaluated by Wikilangs, specifically on **Avar** Wikipedia data.
50
  We analyze tokenizers, n-gram models, Markov chains, vocabulary statistics, and word embeddings.
51
 
52
  ## 📋 Repository Contents
 
70
  - [3. Markov Chain Evaluation](#3-markov-chain-evaluation)
71
  - [4. Vocabulary Analysis](#4-vocabulary-analysis)
72
  - [5. Word Embeddings Evaluation](#5-word-embeddings-evaluation)
73
+ - [6. Morphological Analysis (Experimental)](#6--morphological-analysis-experimental)
74
  - [7. Summary & Recommendations](#7-summary--recommendations)
75
  - [Metrics Glossary](#appendix-metrics-glossary--interpretation-guide)
76
  - [Visualizations Index](#visualizations-index)
 
90
 
91
  | Vocab Size | Compression | Avg Token Len | UNK Rate | Total Tokens |
92
  |------------|-------------|---------------|----------|--------------|
93
+ | **8k** | 3.628x | 3.63 | 0.0828% | 245,293 |
94
+ | **16k** | 4.030x | 4.03 | 0.0919% | 220,825 |
95
+ | **32k** | 4.383x | 4.39 | 0.1000% | 203,018 |
96
+ | **64k** | 4.685x 🏆 | 4.69 | 0.1069% | 189,944 |
97
 
98
  ### Tokenization Examples
99
 
100
  Below are sample sentences tokenized with each vocabulary size:
101
 
102
+ **Sample 1:** `19-абилеб Октябр грегорианияб календаралда рекъон къо (високоснияб соналъ св...`
103
 
104
  | Vocab | Tokens | Count |
105
  |-------|--------|-------|
106
+ | 8k | `▁ 1 9 - абилеб ▁октябр ▁— ▁грегорианияб ▁календаралда ▁рекъон ... (+18 more)` | 28 |
107
+ | 16k | `▁ 1 9 - абилеб ▁октябр ▁— ▁грегорианияб ▁календаралда ▁рекъон ... (+18 more)` | 28 |
108
+ | 32k | `▁ 1 9 - абилеб ▁октябр ▁— ▁грегорианияб ▁календаралда ▁рекъон ... (+18 more)` | 28 |
109
+ | 64k | `▁ 1 9 - абилеб ▁октябр ▁— ▁грегорианияб ▁календаралда ▁рекъон ... (+18 more)` | 28 |
110
 
111
+ **Sample 2:** `Пинкь яги ГьанамагӀ (латиназул мацӀалда bulla; Bullae) гӀадамасул лага-черх. л...`
112
 
113
  | Vocab | Tokens | Count |
114
  |-------|--------|-------|
115
+ | 8k | `▁п ин кь ▁яги ▁гьан ам агӏ ▁( латиназул ▁мацӏалда ... (+18 more)` | 28 |
116
+ | 16k | `▁пин кь ▁яги ▁гьан амагӏ ▁( латиназул ▁мацӏалда ▁b ul ... (+15 more)` | 25 |
117
+ | 32k | `▁пин кь ▁яги ▁гьан амагӏ ▁( латиназул ▁мацӏалда ▁b ul ... (+14 more)` | 24 |
118
+ | 64k | `▁пинкь ▁яги ▁гьанамагӏ ▁( латиназул ▁мацӏалда ▁b ul la ; ... (+11 more)` | 21 |
119
 
120
+ **Sample 3:** `22-абилеб Октябр грегорианияб календаралда рекъон къо (високоснияб соналъ — св...`
121
 
122
  | Vocab | Tokens | Count |
123
  |-------|--------|-------|
124
+ | 8k | `▁ 2 2 - абилеб ▁октябр ▁— ▁грегорианияб ▁календаралда ▁рекъон ... (+18 more)` | 28 |
125
+ | 16k | `▁ 2 2 - абилеб ▁октябр ▁— ▁грегорианияб ▁календаралда ▁рекъон ... (+18 more)` | 28 |
126
+ | 32k | `▁ 2 2 - абилеб ▁октябр ▁— ▁грегорианияб ▁календаралда ▁рекъон ... (+18 more)` | 28 |
127
+ | 64k | `▁ 2 2 - абилеб ▁октябр ▁— ▁грегорианияб ▁календаралда ▁рекъон ... (+18 more)` | 28 |
128
 
129
 
130
  ### Key Findings
131
 
132
+ - **Best Compression:** 64k achieves 4.685x compression
133
+ - **Lowest UNK Rate:** 8k with 0.0828% unknown tokens
134
  - **Trade-off:** Larger vocabularies improve compression but increase model size
135
  - **Recommendation:** 32k vocabulary provides optimal balance for production use
136
 
 
147
 
148
  | N-gram | Variant | Perplexity | Entropy | Unique N-grams | Top-100 Coverage | Top-1000 Coverage |
149
  |--------|---------|------------|---------|----------------|------------------|-------------------|
150
+ | **2-gram** | Word | 3,089 | 11.59 | 6,523 | 23.7% | 56.2% |
151
+ | **2-gram** | Subword | 424 🏆 | 8.73 | 4,120 | 58.0% | 96.7% |
152
+ | **3-gram** | Word | 2,775 | 11.44 | 6,745 | 26.4% | 58.9% |
153
+ | **3-gram** | Subword | 3,361 | 11.71 | 28,903 | 23.9% | 63.4% |
154
+ | **4-gram** | Word | 8,260 | 13.01 | 18,126 | 17.8% | 39.8% |
155
+ | **4-gram** | Subword | 15,393 | 13.91 | 119,191 | 12.7% | 37.5% |
156
+ | **5-gram** | Word | 7,813 | 12.93 | 15,673 | 16.8% | 39.4% |
157
+ | **5-gram** | Subword | 38,531 | 15.23 | 222,134 | 8.4% | 26.5% |
158
 
159
  ### Top 5 N-grams by Size
160
 
 
162
 
163
  | Rank | N-gram | Count |
164
  |------|--------|-------|
165
+ | 1 | `росу буго` | 710 |
166
+ | 2 | `география росу` | 660 |
167
+ | 3 | `мухъалъул росаби` | 578 |
168
+ | 4 | `буго мухъалъул` | 530 |
169
+ | 5 | `мухъалъул росу` | 523 |
170
 
171
  **3-grams (Word):**
172
 
173
  | Rank | N-gram | Count |
174
  |------|--------|-------|
175
+ | 1 | `география росу буго` | 645 |
176
+ | 2 | `росу буго мухъалъул` | 523 |
177
+ | 3 | `лъугьа бахъинал гьаруна` | 368 |
178
+ | 4 | `бахъинал гьаруна хвана` | 358 |
179
+ | 5 | `байрамал лъугьа бахъинал` | 353 |
180
 
181
  **4-grams (Word):**
182
 
183
  | Rank | N-gram | Count |
184
  |------|--------|-------|
185
+ | 1 | `география росу буго мухъалъул` | 513 |
186
+ | 2 | `лъугьа бахъинал гьаруна хвана` | 358 |
187
+ | 3 | `байрамал лъугьа бахъинал гьаруна` | 352 |
188
+ | 4 | `къо байрамал лъугьа бахъинал` | 351 |
189
+ | 5 | `бахъинал гьаруна хвана ишараби` | 349 |
190
+
191
+ **5-grams (Word):**
192
+
193
+ | Rank | N-gram | Count |
194
+ |------|--------|-------|
195
+ | 1 | `къо байрамал лъугьа бахъинал гьаруна` | 350 |
196
+ | 2 | `лъугьа бахъинал гьаруна хвана ишараби` | 349 |
197
+ | 3 | `байрамал лъугьа бахъинал гьаруна хвана` | 348 |
198
+ | 4 | `демография ккола моноэтникияб авар росулъун` | 305 |
199
+ | 5 | `география росу буго мухъалъул марказ` | 279 |
200
 
201
  **2-grams (Subword):**
202
 
203
  | Rank | N-gram | Count |
204
  |------|--------|-------|
205
+ | 1 | `а л` | 85,368 |
206
+ | 2 | `л _` | 64,955 |
207
+ | 3 | `л ъ` | 53,561 |
208
+ | 4 | `а _` | 52,853 |
209
+ | 5 | `у л` | 50,828 |
210
 
211
  **3-grams (Subword):**
212
 
213
  | Rank | N-gram | Count |
214
  |------|--------|-------|
215
+ | 1 | `у л _` | 34,266 |
216
+ | 2 | `л ъ у` | 31,682 |
217
+ | 3 | `ъ у л` | 26,429 |
218
+ | 4 | `а л ъ` | 24,583 |
219
+ | 5 | `_ г ь` | 22,014 |
220
 
221
  **4-grams (Subword):**
222
 
223
  | Rank | N-gram | Count |
224
  |------|--------|-------|
225
+ | 1 | `л ъ у л` | 25,035 |
226
+ | 2 | `ъ у л _` | 22,571 |
227
+ | 3 | `а л ъ у` | 16,980 |
228
+ | 4 | `а л д а` | 11,684 |
229
+ | 5 | `_ г ь е` | 10,931 |
230
+
231
+ **5-grams (Subword):**
232
+
233
+ | Rank | N-gram | Count |
234
+ |------|--------|-------|
235
+ | 1 | `л ъ у л _` | 22,224 |
236
+ | 2 | `а л ъ у л` | 15,591 |
237
+ | 3 | `я л ъ у л` | 7,776 |
238
+ | 4 | `а л д а _` | 7,381 |
239
+ | 5 | `_ б у г о` | 5,843 |
240
 
241
 
242
  ### Key Findings
243
 
244
+ - **Best Perplexity:** 2-gram (subword) with 424
245
  - **Entropy Trend:** Decreases with larger n-grams (more predictable)
246
+ - **Coverage:** Top-1000 patterns cover ~26% of corpus
247
  - **Recommendation:** 4-gram or 5-gram for best predictive performance
248
 
249
  ---
 
259
 
260
  | Context | Variant | Avg Entropy | Perplexity | Branching Factor | Unique Contexts | Predictability |
261
  |---------|---------|-------------|------------|------------------|-----------------|----------------|
262
+ | **1** | Word | 0.6594 | 1.579 | 3.57 | 90,954 | 34.1% |
263
+ | **1** | Subword | 1.1677 | 2.247 | 9.26 | 1,148 | 0.0% |
264
+ | **2** | Word | 0.1264 | 1.092 | 1.22 | 323,475 | 87.4% |
265
+ | **2** | Subword | 0.9998 | 2.000 | 5.69 | 10,625 | 0.0% |
266
+ | **3** | Word | 0.0288 | 1.020 | 1.04 | 392,122 | 97.1% |
267
+ | **3** | Subword | 0.7938 | 1.734 | 3.67 | 60,414 | 20.6% |
268
+ | **4** | Word | 0.0121 🏆 | 1.008 | 1.02 | 406,770 | 98.8% |
269
+ | **4** | Subword | 0.5607 | 1.475 | 2.33 | 221,366 | 43.9% |
270
 
271
  ### Generated Text Samples (Word-based)
272
 
 
274
 
275
  **Context Size 1:**
276
 
277
+ 1. `ва испан фонология цогидал туркиял мацӏаз чанго шагьрияб гӏумру ��шавалда хурхарал феодализм социум с...`
278
+ 2. `буго республикалъул рутул мухъ буго шартіияб рикікіеналдалъун гьабураб бищун це б грузинский алфавит...`
279
+ 3. `бугеб муниципалияб гӏуцӏи гъорлӏе рачуна чӏужуялда хурхарал цогидал киналго хвана ишараби мугъчӏваял...`
280
 
281
  **Context Size 2:**
282
 
283
+ 1. `росу буго мухъалъул марказ лъаратӏаса 22 км лъ жанубияб бакъбаккудехун ралъдал гьурматӏаса 968 метра...`
284
+ 2. `география росу буго мухъалъул марказ лъаратӏаса 0 5 41 9 12 гуржиял 617 401 253 10 0`
285
+ 3. `буго мухъалъул центер уркарахъалдаса бакътӏерхьудехун демография референсал мухъалъул росаби мухъ ро...`
286
 
287
  **Context Size 3:**
288
 
289
+ 1. `география росу буго мухъалъул марказ лъаратӏаса 22 км алъ демография ккола моноэтникияб авар росулъу...`
290
+ 2. `росу буго мухъалъул центер уркарахъалдаса жанубияб бакътӏерхьудехун ралъдал гьурматӏаса борхалъи буг...`
291
+ 3. `лъугьа бахъинал гьаруна хвана ишараби мугъчӏваял гь балагье трактат адабият тайпаби изданиял`
292
 
293
  **Context Size 4:**
294
 
295
+ 1. `география росу буго мухъалъул марказ лъаратӏаса 5 км алъ шималалиябгин бакъбаккудехун аваргӏоралъул ...`
296
+ 2. `байрамал лъугьа бахъинал гьаруна хвана ишараби мугъчӏваял гь балагье`
297
+ 3. `къо байрамал лъугьа бахъинал гьаруна хвана ишараби мугъчӏваял гь балагье`
298
 
299
 
300
  ### Generated Text Samples (Subword-based)
 
303
 
304
  **Context Size 1:**
305
 
306
+ 1. `_ссва_—_1_вадаре`
307
+ 2. `ан._ия_в._тӏавар`
308
+ 3. `лдацӏиялъухъуск;`
309
 
310
  **Context Size 2:**
311
 
312
+ 1. `алдастияб_6_киябр`
313
+ 2. `л_джибацӏаниякеап`
314
+ 3. `лъул_бакъго_рахъе`
315
 
316
  **Context Size 3:**
317
 
318
+ 1. `ул_намен_гьеб_раса`
319
+ 2. `лъулго_справенция)`
320
+ 3. `ъул_яги_перации_«г`
321
 
322
  **Context Size 4:**
323
 
324
+ 1. `лъул_ассив_гьел_ккв`
325
+ 2. `ъул_ківар_география`
326
+ 3. `алъулалде._борхалъу`
327
 
328
 
329
  ### Key Findings
330
 
331
+ - **Best Predictability:** Context-4 (word) with 98.8% predictability
332
  - **Branching Factor:** Decreases with context size (more deterministic)
333
+ - **Memory Trade-off:** Larger contexts require more storage (221,366 contexts)
334
  - **Recommendation:** Context-3 or Context-4 for text generation
335
 
336
  ---
 
346
 
347
  | Metric | Value |
348
  |--------|-------|
349
+ | Vocabulary Size | 34,315 |
350
+ | Total Tokens | 413,611 |
351
+ | Mean Frequency | 12.05 |
352
  | Median Frequency | 3 |
353
+ | Frequency Std Dev | 77.17 |
354
 
355
  ### Most Common Words
356
 
357
  | Rank | Word | Frequency |
358
  |------|------|-----------|
359
+ | 1 | ва | 7,138 |
360
+ | 2 | буго | 5,684 |
361
+ | 3 | бугеб | 2,903 |
362
+ | 4 | ккола | 2,872 |
363
+ | 5 | росу | 2,838 |
364
+ | 6 | мухъалъул | 2,671 |
365
+ | 7 | гьеб | 2,178 |
366
+ | 8 | росдал | 1,902 |
367
+ | 9 | the | 1,812 |
368
+ | 10 | цо | 1,800 |
369
 
370
  ### Least Common Words (from vocabulary)
371
 
372
  | Rank | Word | Frequency |
373
  |------|------|-----------|
374
+ | 1 | уркутамахьи | 2 |
375
+ | 2 | континуумалде | 2 |
376
+ | 3 | къулецӏмаги | 2 |
377
+ | 4 | гьаркӏасуниб | 2 |
378
+ | 5 | махӏарги | 2 |
379
+ | 6 | пилибхиталъул | 2 |
380
+ | 7 | заповедникалда | 2 |
381
+ | 8 | пилибхит | 2 |
382
+ | 9 | лъалъадул | 2 |
383
+ | 10 | хӏанчӏи | 2 |
384
 
385
  ### Zipf's Law Analysis
386
 
387
  | Metric | Value |
388
  |--------|-------|
389
+ | Zipf Coefficient | 0.9572 |
390
+ | R² (Goodness of Fit) | 0.993745 |
391
  | Adherence Quality | **excellent** |
392
 
393
  ### Coverage Analysis
394
 
395
  | Top N Words | Coverage |
396
  |-------------|----------|
397
+ | Top 100 | 23.1% |
398
+ | Top 1,000 | 51.6% |
399
+ | Top 5,000 | 74.2% |
400
+ | Top 10,000 | 83.6% |
401
 
402
  ### Key Findings
403
 
404
+ - **Zipf Compliance:** R²=0.9937 indicates excellent adherence to Zipf's law
405
+ - **High Frequency Dominance:** Top 100 words cover 23.1% of corpus
406
+ - **Long Tail:** 24,315 words needed for remaining 16.4% coverage
407
 
408
  ---
409
  ## 5. Word Embeddings Evaluation
 
419
 
420
  ### 5.1 Cross-Lingual Alignment
421
 
422
+ ![Alignment Quality](visualizations/embedding_alignment_quality.png)
423
+
424
+ ![Multilingual t-SNE](visualizations/embedding_tsne_multilingual.png)
425
 
426
 
427
  ### 5.2 Model Comparison
428
 
429
  | Model | Dimension | Isotropy | Semantic Density | Alignment R@1 | Alignment R@10 |
430
  |-------|-----------|----------|------------------|---------------|----------------|
431
+ | **mono_32d** | 32 | 0.8604 | 0.3207 | N/A | N/A |
432
+ | **mono_64d** | 64 | 0.7367 | 0.2711 | N/A | N/A |
433
+ | **mono_128d** | 128 | 0.2721 | 0.2530 | N/A | N/A |
434
+ | **aligned_32d** | 32 | 0.8604 🏆 | 0.3335 | 0.0200 | 0.1400 |
435
+ | **aligned_64d** | 64 | 0.7367 | 0.2791 | 0.0280 | 0.1780 |
436
+ | **aligned_128d** | 128 | 0.2721 | 0.2649 | 0.0820 | 0.2540 |
437
 
438
  ### Key Findings
439
 
440
+ - **Best Isotropy:** aligned_32d with 0.8604 (more uniform distribution)
441
+ - **Semantic Density:** Average pairwise similarity of 0.2870. Lower values indicate better semantic separation.
442
+ - **Alignment Quality:** Aligned models achieve up to 8.2% R@1 in cross-lingual retrieval.
443
  - **Recommendation:** 128d aligned for best cross-lingual performance
444
 
445
  ---
446
  ## 6. Morphological Analysis (Experimental)
447
 
 
 
448
  This section presents an automated morphological analysis derived from the statistical divergence between word-level and subword-level models. By analyzing where subword predictability spikes and where word-level coverage fails, we can infer linguistic structures without supervised data.
449
 
450
  ### 6.1 Productivity & Complexity
451
 
452
  | Metric | Value | Interpretation | Recommendation |
453
  |--------|-------|----------------|----------------|
454
+ | Productivity Index | **5.000** | High morphological productivity | Reliable analysis |
455
+ | Idiomaticity Gap | **0.488** | High formulaic/idiomatic content | - |
456
 
457
  ### 6.2 Affix Inventory (Productive Units)
458
 
 
461
  #### Productive Prefixes
462
  | Prefix | Examples |
463
  |--------|----------|
464
+ | `-ба` | батӏалъуда, батӏа, бахъинаро |
 
 
465
 
466
  #### Productive Suffixes
467
  | Suffix | Examples |
468
  |--------|----------|
469
+ | `-л` | субтропикиял, кумикал, риччалел |
470
+ | `-а` | лъараца, тіалъиялда, анатолиялдаса |
471
+ | `-ул` | агьлулъиялъул, кипралъул, урарталъул |
472
+ | `-ъул` | агьлулъиялъул, кипралъул, урарталъул |
473
+ | `-лъул` | агьлулъиялъул, кипралъул, урарталъул |
474
+ | `-да` | тіалъиялда, текстазда, батӏалъуда |
475
+ | `-ал` | кумикал, туарегал, я́сал |
476
+ | `-ги` | тахшагьарлъунги, яги, фортисги |
477
 
478
  ### 6.3 Bound Stems (Lexical Roots)
479
 
 
481
 
482
  | Stem | Cohesion | Substitutability | Examples |
483
  |------|----------|------------------|----------|
484
+ | `алъу` | 1.88x | 101 contexts | алъул, далъун, малъун |
485
+ | `ялъу` | 2.05x | 41 contexts | ялъул, ялъуни, аялъул |
486
+ | `ьабу` | 2.11x | 29 contexts | гьабу, гьабун, кьабун |
487
+ | `агьа` | 1.75x | 59 contexts | багьа, дагьа, шагьав |
488
+ | `иялъ` | 1.85x | 36 contexts | химиялъ, биялъул, армиялъ |
489
+ | `анал` | 1.48x | 70 contexts | канал, ханал, данал |
490
+ | `иялд` | 1.69x | 36 contexts | сиялда, азиялде, азиялда |
491
+ | `огра` | 1.87x | 22 contexts | географ, фотограф, этнограф |
492
+ | `азда` | 1.67x | 31 contexts | гьазда, ишазда, раздан |
493
+ | `налд` | 1.64x | 31 contexts | иналда, доналд, иналде |
494
+ | `гъор` | 2.15x | 13 contexts | гъорлі, гъорлъ, гъорлӏ |
495
+ | `лдас` | 2.01x | 15 contexts | лдаса, ялдаса, алдаса |
496
 
497
  ### 6.4 Affix Compatibility (Co-occurrence)
498
 
 
500
 
501
  | Prefix | Suffix | Frequency | Examples |
502
  |--------|--------|-----------|----------|
503
+ | `-ба` | `-л` | 36 words | багьадурасул, бакътӏерхьул |
504
+ | `-ба` | `-а` | 34 words | багъа, батӏалъана |
505
+ | `-ба` | `-ул` | 17 words | багьадурасул, бакътӏерхьул |
506
+ | `-ба` | `-ун` | 16 words | бахчун, бахъбаккудехун |
507
+ | `-ба` | `-да` | 16 words | бащалъуда, балазда |
508
+ | `-ба` | `-ал` | 11 words | бахӏсал, бакъбаккулал |
509
+ | `-ба` | `-ъул` | 8 words | бавариялъул, баталйоналъул |
510
+ | `-ба` | `-лда` | 8 words | бахъиялда, бахшалда |
511
+ | `-ба` | `-ги` | 6 words | бакӏалъулги, бахӏарзабиги |
512
+ | `-ба` | `-лъул` | 6 words | бавариялъул, баталйоналъул |
513
 
514
  ### 6.5 Recursive Morpheme Segmentation
515
 
 
517
 
518
  | Word | Suggested Split | Confidence | Stem |
519
  |------|-----------------|------------|------|
520
+ | къуръаналги | **`къуръан-ал-ги`** | 6.0 | `къуръан` |
521
+ | ханасдаги | **`ханас-да-ги`** | 6.0 | `ханас` |
522
+ | элементалги | **`элемент-ал-ги`** | 6.0 | `элемент` |
523
+ | гьелъулги | **`гьел-ъул-ги`** | 6.0 | `гьел` |
524
+ | гьармониялда | **`гьармония-лда`** | 4.5 | `гьармония` |
525
+ | гьолокьги | **`гьолокь-ги`** | 4.5 | `гьолокь` |
526
+ | хьондасебги | **`хьондасеб-ги`** | 4.5 | `хьондасеб` |
527
+ | районазул | **`районаз-ул`** | 4.5 | `районаз` |
528
+ | аскаразда | **`аскараз-да`** | 4.5 | `аскараз` |
529
+ | экономикаги | **`экономика-ги`** | 4.5 | `экономика` |
530
+ | процессазул | **`процессаз-ул`** | 4.5 | `процессаз` |
531
+ | насрудиницаги | **`насрудиница-ги`** | 4.5 | `насрудиница` |
532
+ | бугиланги | **`бугилан-ги`** | 4.5 | `бугилан` |
533
+ | рагьаразул | **`рагьараз-ул`** | 4.5 | `рагьараз` |
534
+ | минскалъул | **`минска-лъул`** | 4.5 | `минска` |
535
 
536
  ### 6.6 Linguistic Interpretation
537
 
538
  > **Automated Insight:**
539
+ The language Avar shows high morphological productivity. The subword models are significantly more efficient than word models, suggesting a rich system of affixation or compounding.
540
+
541
+ > **Note on Idiomaticity:** The high Idiomaticity Gap suggests a large number of frequent multi-word expressions or formulaic sequences that are statistically distinct from their component parts.
542
 
543
  ---
544
  ## 7. Summary & Recommendations
 
549
 
550
  | Component | Recommended | Rationale |
551
  |-----------|-------------|-----------|
552
+ | Tokenizer | **64k BPE** | Best compression (4.69x) |
553
+ | N-gram | **2-gram** | Lowest perplexity (424) |
554
+ | Markov | **Context-4** | Highest predictability (98.8%) |
555
  | Embeddings | **100d** | Balanced semantic capture and isotropy |
556
 
557
 
 
765
  ---
766
  *Generated by Wikilangs Models Pipeline*
767
 
768
+ *Report Date: 2026-01-03 18:29:30*
models/embeddings/aligned/av_128d.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52180d54d8a01ed8dc3b1895710307bdf710be093a465547504c72098c065d36
3
+ size 1036200777
models/embeddings/aligned/av_128d.meta.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"lang": "av", "dim": 128, "max_seq_len": 512, "is_aligned": true}
models/embeddings/aligned/av_128d.projection.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af5e663dfe219030fa1896110d65b3a61da08e57239e14eb6c20a27e254e50bd
3
+ size 65664
models/embeddings/aligned/av_128d_metadata.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "language": "av",
3
+ "dimension": 128,
4
+ "version": "aligned",
5
+ "hub_language": "en",
6
+ "seed_vocab_size": 1918,
7
+ "vocab_size": 11646
8
+ }
models/embeddings/aligned/av_32d.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8251e7217d1b123fe6bbdd58c049f85f4fc0cb3e8fbb502325d478f85fd750a6
3
+ size 259256649
models/embeddings/aligned/av_32d.meta.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"lang": "av", "dim": 32, "max_seq_len": 512, "is_aligned": true}
models/embeddings/aligned/av_32d.projection.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82c4c5c405118ebedb62e164f95bf087157687447049e734ffa92982f10b987b
3
+ size 4224
models/embeddings/aligned/av_32d_metadata.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "language": "av",
3
+ "dimension": 32,
4
+ "version": "aligned",
5
+ "hub_language": "en",
6
+ "seed_vocab_size": 1918,
7
+ "vocab_size": 11646
8
+ }
models/embeddings/aligned/av_64d.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9ecf1ac33cdc6b30c9842ac01a360012c70da565e13e7a104b878aa86e312449
3
+ size 518238025
models/embeddings/aligned/av_64d.meta.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"lang": "av", "dim": 64, "max_seq_len": 512, "is_aligned": true}
models/embeddings/aligned/av_64d.projection.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2010d3e67a96f1d9214b69d0ddea791f43f4604b27e2a6ccf657a97032095dd6
3
+ size 16512
models/embeddings/aligned/av_64d_metadata.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "language": "av",
3
+ "dimension": 64,
4
+ "version": "aligned",
5
+ "hub_language": "en",
6
+ "seed_vocab_size": 1918,
7
+ "vocab_size": 11646
8
+ }
models/embeddings/monolingual/av_128d.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:086998639c3328d1f88a224eb653bef49f8aa011b5880d7feb85792cbe742361
3
- size 1036208926
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52180d54d8a01ed8dc3b1895710307bdf710be093a465547504c72098c065d36
3
+ size 1036200777
models/embeddings/monolingual/av_128d_metadata.json CHANGED
@@ -11,5 +11,5 @@
11
  "encoding_method": "rope",
12
  "dim": 128
13
  },
14
- "vocab_size": 11654
15
  }
 
11
  "encoding_method": "rope",
12
  "dim": 128
13
  },
14
+ "vocab_size": 11646
15
  }
models/embeddings/monolingual/av_32d.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6290250d27a46d72b90ede94292690c45a0a3f14bdbf05ab7aa5d07aa2093541
3
- size 259258654
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8251e7217d1b123fe6bbdd58c049f85f4fc0cb3e8fbb502325d478f85fd750a6
3
+ size 259256649
models/embeddings/monolingual/av_32d_metadata.json CHANGED
@@ -11,5 +11,5 @@
11
  "encoding_method": "rope",
12
  "dim": 32
13
  },
14
- "vocab_size": 11654
15
  }
 
11
  "encoding_method": "rope",
12
  "dim": 32
13
  },
14
+ "vocab_size": 11646
15
  }
models/embeddings/monolingual/av_64d.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5b709ff729efabdd809f80746b620dba08607a5b3377d6237ee6f2434e1eb3c2
3
- size 518242078
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9ecf1ac33cdc6b30c9842ac01a360012c70da565e13e7a104b878aa86e312449
3
+ size 518238025
models/embeddings/monolingual/av_64d_metadata.json CHANGED
@@ -11,5 +11,5 @@
11
  "encoding_method": "rope",
12
  "dim": 64
13
  },
14
- "vocab_size": 11654
15
  }
 
11
  "encoding_method": "rope",
12
  "dim": 64
13
  },
14
+ "vocab_size": 11646
15
  }
models/subword_markov/av_markov_ctx1_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4517ac9631ea8dbfee38f5e0a123dfdfec510e3a9c1219d351bc1fd509b60c17
3
- size 81084
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:38a5fc9cc98043e5e42a3d1b11dc89ca11fc329935cadbb52961882b9b7b83e9
3
+ size 81204
models/subword_markov/av_markov_ctx1_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "context_size": 1,
3
  "variant": "subword",
4
  "language": "av",
5
- "unique_contexts": 1145,
6
- "total_transitions": 3671343
7
  }
 
2
  "context_size": 1,
3
  "variant": "subword",
4
  "language": "av",
5
+ "unique_contexts": 1148,
6
+ "total_transitions": 3736506
7
  }
models/subword_markov/av_markov_ctx2_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2851c37f629eaaf5084dca27968d6cf91fd8843aa330d489a4440f208d3e60cd
3
- size 486043
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c720711e9e69392708fe5e71b1b2a63522d60a2455f116510f96f263cce0bf14
3
+ size 478235
models/subword_markov/av_markov_ctx2_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "context_size": 2,
3
  "variant": "subword",
4
  "language": "av",
5
- "unique_contexts": 10664,
6
- "total_transitions": 3667770
7
  }
 
2
  "context_size": 2,
3
  "variant": "subword",
4
  "language": "av",
5
+ "unique_contexts": 10625,
6
+ "total_transitions": 3732688
7
  }
models/subword_markov/av_markov_ctx3_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e93984ebab9588d00463857f800a79e61319598925a11d5785c24e04809162f3
3
- size 1681476
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e23c4390424fb194a144d49a7a4d106ee4815414d1f02b382d16cdce335c3a2
3
+ size 1619923
models/subword_markov/av_markov_ctx3_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "context_size": 3,
3
  "variant": "subword",
4
  "language": "av",
5
- "unique_contexts": 60534,
6
- "total_transitions": 3664197
7
  }
 
2
  "context_size": 3,
3
  "variant": "subword",
4
  "language": "av",
5
+ "unique_contexts": 60414,
6
+ "total_transitions": 3728870
7
  }
models/subword_markov/av_markov_ctx4_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:df78890cb55850cde1e4fbcdb06710bcbf3c0a04113c2fb5a81c53eb29e9a8bc
3
- size 4643917
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:307469cf2a68ecb4dce27966c3ba2bb8cf0af5f6fdd08b5f1f21c93064675ab3
3
+ size 4652863
models/subword_markov/av_markov_ctx4_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "context_size": 4,
3
  "variant": "subword",
4
  "language": "av",
5
- "unique_contexts": 221628,
6
- "total_transitions": 3660624
7
  }
 
2
  "context_size": 4,
3
  "variant": "subword",
4
  "language": "av",
5
+ "unique_contexts": 221366,
6
+ "total_transitions": 3725052
7
  }
models/subword_ngram/av_2gram_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:933b0de1b63cb20550e97fa2419bacf817141c74827bc4e101e5d76c7780509c
3
- size 54611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:878073a5d2c637b1f85fafae2ba4be9157d6c82edb5088bf8dd9cb33ab1a58c8
3
+ size 54488
models/subword_ngram/av_2gram_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "n": 2,
3
  "variant": "subword",
4
  "language": "av",
5
- "unique_ngrams": 4133,
6
- "total_ngrams": 3671343
7
  }
 
2
  "n": 2,
3
  "variant": "subword",
4
  "language": "av",
5
+ "unique_ngrams": 4120,
6
+ "total_ngrams": 3736506
7
  }
models/subword_ngram/av_3gram_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ca32b51e264e1da8fb8ad605682f565e4a0fd97590d147b080946a6cf8993da2
3
- size 370254
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6bc323576e241439ae55b2f57ce8af013b971d5e98dff0e748e2504fa6374f4
3
+ size 371643
models/subword_ngram/av_3gram_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "n": 3,
3
  "variant": "subword",
4
  "language": "av",
5
- "unique_ngrams": 28949,
6
- "total_ngrams": 3667770
7
  }
 
2
  "n": 3,
3
  "variant": "subword",
4
  "language": "av",
5
+ "unique_ngrams": 28903,
6
+ "total_ngrams": 3732688
7
  }
models/subword_ngram/av_4gram_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1511e4aa7f03fe2637243fa72bb121ccae146a0024ee7a81f233a708cc2778bb
3
- size 1469426
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a16219f855b243bc4265361726c9322bd40b68c3f3ec8c492a2fe9d88d9b9c54
3
+ size 1466503
models/subword_ngram/av_4gram_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "n": 4,
3
  "variant": "subword",
4
  "language": "av",
5
- "unique_ngrams": 119337,
6
- "total_ngrams": 3664197
7
  }
 
2
  "n": 4,
3
  "variant": "subword",
4
  "language": "av",
5
+ "unique_ngrams": 119191,
6
+ "total_ngrams": 3728870
7
  }
models/subword_ngram/av_5gram_subword.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56df9057219471fe915c809035a735c463eaa460b99a4d6af6abdbb9b6618763
3
+ size 2908784
models/subword_ngram/av_5gram_subword_metadata.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "n": 5,
3
+ "variant": "subword",
4
+ "language": "av",
5
+ "unique_ngrams": 222134,
6
+ "total_ngrams": 3725052
7
+ }
models/tokenizer/av_tokenizer_16k.model CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:95f05447a801c104acaac9e4a45ef1ade9a750529b7be0b69f4ee702b40bdd0f
3
- size 579855
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c084d64ab2c2f58d5bc9f1f2dc70eace9aeadaeadfd45a28841cae6bf2ef9e53
3
+ size 581313
models/tokenizer/av_tokenizer_16k.vocab CHANGED
The diff for this file is too large to render. See raw diff
 
models/tokenizer/av_tokenizer_32k.model CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bcc76ebd732e7de0cc3f1d1a7445d6e9812fc5861347bfbdb764a34ab378093a
3
- size 943739
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c6ff9e1bc49cd0cb18d9360212ae9e8cfe73a0a21e867e0912b1c87f6ad7b19a
3
+ size 946330
models/tokenizer/av_tokenizer_32k.vocab CHANGED
The diff for this file is too large to render. See raw diff
 
models/tokenizer/av_tokenizer_64k.model CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e25c37c305a3f71acc7fde1f68cea5d1b999e9cc76e1a9ae7e946535b348e7eb
3
- size 1709450
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dfa371e8e0d4e42c77bb7b2966ee37072f3b5cd2c0535d51bb585bd288e7eb4d
3
+ size 1710003
models/tokenizer/av_tokenizer_64k.vocab CHANGED
The diff for this file is too large to render. See raw diff
 
models/tokenizer/av_tokenizer_8k.model CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:69224dbe8521456dbe0800e5c74893d56ecd3d316ce5785f5ea5922ff2f1af24
3
- size 404399
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d34afc9d7d1c89ae47b12ffc26333fc4419d35ed30764eaf2e9938e4cce0dc23
3
+ size 405256
models/tokenizer/av_tokenizer_8k.vocab CHANGED
The diff for this file is too large to render. See raw diff
 
models/vocabulary/av_vocabulary.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a7310d663e46f21c8df63d5030b41ef164690182783e77aaf70f0bb2efff7bb0
3
- size 661413
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67880bc9e008144c86d9ad7b24a767201ed2594683e944ac426bacb90f203963
3
+ size 659034
models/vocabulary/av_vocabulary_metadata.json CHANGED
@@ -1,17 +1,17 @@
1
  {
2
  "language": "av",
3
- "vocabulary_size": 34392,
4
  "variant": "full",
5
  "statistics": {
6
- "type_token_ratio": 0.1973921591928009,
7
  "coverage": {
8
- "top_100": 0.19702269707347111,
9
- "top_1000": 0.4456231702442555,
10
- "top_5000": 0.6456771851739821,
11
- "top_10000": 0.7303661131936867
12
  },
13
- "hapax_count": 56968,
14
- "hapax_ratio": 0.623555166374781,
15
- "total_documents": 3573
16
  }
17
  }
 
1
  {
2
  "language": "av",
3
+ "vocabulary_size": 34315,
4
  "variant": "full",
5
  "statistics": {
6
+ "type_token_ratio": 0.19363404248081859,
7
  "coverage": {
8
+ "top_100": 0.20352823373591822,
9
+ "top_1000": 0.45408257631644405,
10
+ "top_5000": 0.652138603715743,
11
+ "top_10000": 0.7354887675205207
12
  },
13
+ "hapax_count": 56766,
14
+ "hapax_ratio": 0.6232474390926758,
15
+ "total_documents": 3818
16
  }
17
  }
models/word_markov/av_markov_ctx1_word.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e06648f80de88e4993ac542ab602cc9aac6b01b06f56ff96c14658afaf6be279
3
- size 4391092
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:000e753d5507ed19d49b2e77d3c852a9e226efade45d833420c7ab7bff43425b
3
+ size 4379098
models/word_markov/av_markov_ctx1_word_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "context_size": 1,
3
  "variant": "word",
4
  "language": "av",
5
- "unique_contexts": 91234,
6
- "total_transitions": 459262
7
  }
 
2
  "context_size": 1,
3
  "variant": "word",
4
  "language": "av",
5
+ "unique_contexts": 90954,
6
+ "total_transitions": 466559
7
  }
models/word_markov/av_markov_ctx2_word.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c68afd2ecd43feece23866755466060024bed9599ffd5b4fb7f0eebba9e44b7c
3
- size 9606282
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:748594fa3dfa2640edc5c4d6d16fa6fc935d127222889082f084b4c57286d0fc
3
+ size 9577859
models/word_markov/av_markov_ctx2_word_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "context_size": 2,
3
  "variant": "word",
4
  "language": "av",
5
- "unique_contexts": 324656,
6
- "total_transitions": 455689
7
  }
 
2
  "context_size": 2,
3
  "variant": "word",
4
  "language": "av",
5
+ "unique_contexts": 323475,
6
+ "total_transitions": 462741
7
  }