omarkamali commited on
Commit
bd17a4a
·
verified ·
1 Parent(s): 7ca7ea7

Upload all models and assets for arz (latest)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. README.md +212 -171
  3. models/embeddings/aligned/arz_128d.bin +3 -0
  4. models/embeddings/aligned/arz_128d.meta.json +1 -0
  5. models/embeddings/aligned/arz_128d.projection.npy +3 -0
  6. models/embeddings/aligned/arz_128d_metadata.json +8 -0
  7. models/embeddings/aligned/arz_32d.bin +3 -0
  8. models/embeddings/aligned/arz_32d.meta.json +1 -0
  9. models/embeddings/aligned/arz_32d.projection.npy +3 -0
  10. models/embeddings/aligned/arz_32d_metadata.json +8 -0
  11. models/embeddings/aligned/arz_64d.bin +3 -0
  12. models/embeddings/aligned/arz_64d.meta.json +1 -0
  13. models/embeddings/aligned/arz_64d.projection.npy +3 -0
  14. models/embeddings/aligned/arz_64d_metadata.json +8 -0
  15. models/embeddings/monolingual/arz_128d.bin +2 -2
  16. models/embeddings/monolingual/arz_128d_metadata.json +1 -1
  17. models/embeddings/monolingual/arz_32d.bin +2 -2
  18. models/embeddings/monolingual/arz_32d_metadata.json +1 -1
  19. models/embeddings/monolingual/arz_64d.bin +2 -2
  20. models/embeddings/monolingual/arz_64d_metadata.json +1 -1
  21. models/subword_markov/arz_markov_ctx1_subword.parquet +2 -2
  22. models/subword_markov/arz_markov_ctx1_subword_metadata.json +2 -2
  23. models/subword_markov/arz_markov_ctx2_subword.parquet +2 -2
  24. models/subword_markov/arz_markov_ctx2_subword_metadata.json +2 -2
  25. models/subword_markov/arz_markov_ctx3_subword.parquet +2 -2
  26. models/subword_markov/arz_markov_ctx3_subword_metadata.json +2 -2
  27. models/subword_markov/arz_markov_ctx4_subword.parquet +2 -2
  28. models/subword_markov/arz_markov_ctx4_subword_metadata.json +2 -2
  29. models/subword_ngram/arz_2gram_subword.parquet +2 -2
  30. models/subword_ngram/arz_2gram_subword_metadata.json +2 -2
  31. models/subword_ngram/arz_3gram_subword.parquet +2 -2
  32. models/subword_ngram/arz_3gram_subword_metadata.json +2 -2
  33. models/subword_ngram/arz_4gram_subword.parquet +2 -2
  34. models/subword_ngram/arz_4gram_subword_metadata.json +2 -2
  35. models/subword_ngram/arz_5gram_subword.parquet +3 -0
  36. models/subword_ngram/arz_5gram_subword_metadata.json +7 -0
  37. models/tokenizer/arz_tokenizer_16k.model +2 -2
  38. models/tokenizer/arz_tokenizer_16k.vocab +0 -0
  39. models/tokenizer/arz_tokenizer_32k.model +2 -2
  40. models/tokenizer/arz_tokenizer_32k.vocab +0 -0
  41. models/tokenizer/arz_tokenizer_64k.model +2 -2
  42. models/tokenizer/arz_tokenizer_64k.vocab +0 -0
  43. models/tokenizer/arz_tokenizer_8k.model +2 -2
  44. models/tokenizer/arz_tokenizer_8k.vocab +0 -0
  45. models/vocabulary/arz_vocabulary.parquet +2 -2
  46. models/vocabulary/arz_vocabulary_metadata.json +9 -9
  47. models/word_markov/arz_markov_ctx1_word.parquet +2 -2
  48. models/word_markov/arz_markov_ctx1_word_metadata.json +2 -2
  49. models/word_markov/arz_markov_ctx2_word.parquet +2 -2
  50. models/word_markov/arz_markov_ctx2_word_metadata.json +2 -2
.gitattributes CHANGED
@@ -39,3 +39,4 @@ visualizations/position_encoding_comparison.png filter=lfs diff=lfs merge=lfs -t
39
  visualizations/tsne_sentences.png filter=lfs diff=lfs merge=lfs -text
40
  visualizations/tsne_words.png filter=lfs diff=lfs merge=lfs -text
41
  visualizations/zipf_law.png filter=lfs diff=lfs merge=lfs -text
 
 
39
  visualizations/tsne_sentences.png filter=lfs diff=lfs merge=lfs -text
40
  visualizations/tsne_words.png filter=lfs diff=lfs merge=lfs -text
41
  visualizations/zipf_law.png filter=lfs diff=lfs merge=lfs -text
42
+ visualizations/embedding_tsne_multilingual.png filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -10,11 +10,21 @@ tags:
10
  - n-gram
11
  - markov
12
  - wikipedia
 
 
 
 
 
 
 
 
 
 
13
  - monolingual
14
  - family-arabic
15
  license: mit
16
  library_name: wikilangs
17
- pipeline_tag: feature-extraction
18
  datasets:
19
  - omarkamali/wikipedia-monthly
20
  dataset_info:
@@ -23,10 +33,10 @@ dataset_info:
23
  metrics:
24
  - name: best_compression_ratio
25
  type: compression
26
- value: 3.905
27
  - name: best_isotropy
28
  type: isotropy
29
- value: 0.7897
30
  - name: vocabulary_size
31
  type: vocab
32
  value: 0
@@ -60,7 +70,7 @@ We analyze tokenizers, n-gram models, Markov chains, vocabulary statistics, and
60
  - [3. Markov Chain Evaluation](#3-markov-chain-evaluation)
61
  - [4. Vocabulary Analysis](#4-vocabulary-analysis)
62
  - [5. Word Embeddings Evaluation](#5-word-embeddings-evaluation)
63
- - [6. Morphological Analysis (Experimental)](#6-morphological-analysis)
64
  - [7. Summary & Recommendations](#7-summary--recommendations)
65
  - [Metrics Glossary](#appendix-metrics-glossary--interpretation-guide)
66
  - [Visualizations Index](#visualizations-index)
@@ -80,47 +90,47 @@ We analyze tokenizers, n-gram models, Markov chains, vocabulary statistics, and
80
 
81
  | Vocab Size | Compression | Avg Token Len | UNK Rate | Total Tokens |
82
  |------------|-------------|---------------|----------|--------------|
83
- | **8k** | 2.876x | 2.88 | 0.8210% | 1,709,035 |
84
- | **16k** | 3.215x | 3.22 | 0.9180% | 1,528,463 |
85
- | **32k** | 3.559x | 3.56 | 1.0163% | 1,380,735 |
86
- | **64k** | 3.905x 🏆 | 3.91 | 1.1149% | 1,258,558 |
87
 
88
  ### Tokenization Examples
89
 
90
  Below are sample sentences tokenized with each vocabulary size:
91
 
92
- **Sample 1:** `تاملكوت هوا دوار فى المغرب. المكان تاملكوت موجود فى منطقه اداريه اسمها تماسين. س...`
93
 
94
  | Vocab | Tokens | Count |
95
  |-------|--------|-------|
96
- | 8k | `▁تام لك وت ▁هوا ▁دوار ▁فى ▁المغرب . ▁المك ان ... (+24 more)` | 34 |
97
- | 16k | `▁تام لك وت ▁هوا ▁دوار ▁فى ▁المغرب . ▁المك ان ... (+24 more)` | 34 |
98
- | 32k | `▁تام لك وت ▁هوا ▁دوار ▁فى ▁المغرب . ▁المكان ▁تام ... (+23 more)` | 33 |
99
- | 64k | `▁تام لك وت ▁هوا ▁دوار ▁فى ▁المغرب . ▁المكان ▁تام ... (+23 more)` | 33 |
100
 
101
- **Sample 2:** `جيريمى ديفيدسون مخرج افلام من امريكا. حياته جيريمى ديفيدسون من مواليد يوم 24 ديس...`
102
 
103
  | Vocab | Tokens | Count |
104
  |-------|--------|-------|
105
- | 8k | `▁جير يمى ▁ديفيد سون ▁مخرج ▁افلام ▁من ▁امريكا . ▁حياته ... (+23 more)` | 33 |
106
- | 16k | `▁جيريمى ▁ديفيد سون ▁مخرج ▁افلام ▁من ▁امريكا . ▁حياته ▁جيريمى ... (+21 more)` | 31 |
107
- | 32k | `▁جيريمى ▁ديفيدسون ▁مخرج ▁افلام ▁من ▁امريكا . ▁حياته ▁جيريمى ▁ديفيدسون ... (+19 more)` | 29 |
108
- | 64k | `▁جيريمى ▁ديفيدسون ▁مخرج ▁افلام ▁من ▁امريكا . ▁حياته ▁جيريمى ▁ديفيدسون ... (+19 more)` | 29 |
109
 
110
- **Sample 3:** `ابهينايا ممثله من الهند. حياتها ابهينايا من مواليد يوم 13 نوفمبر سنة فى كارنات��ك...`
111
 
112
  | Vocab | Tokens | Count |
113
  |-------|--------|-------|
114
- | 8k | `▁اب ه ينا يا ▁ممثله ▁من ▁الهند . ▁حياتها ▁اب ... (+28 more)` | 38 |
115
- | 16k | `▁اب ه ينا يا ▁ممثله ▁من ▁الهند . ▁حياتها ▁اب ... (+27 more)` | 37 |
116
- | 32k | `▁ابه ينا يا ▁ممثله ▁من ▁الهند . ▁حياتها ▁ابه ينا ... (+25 more)` | 35 |
117
- | 64k | `▁ابه ينا يا ▁ممثله ▁من ▁الهند . ▁حياتها ▁ابه ينا ... (+24 more)` | 34 |
118
 
119
 
120
  ### Key Findings
121
 
122
- - **Best Compression:** 64k achieves 3.905x compression
123
- - **Lowest UNK Rate:** 8k with 0.8210% unknown tokens
124
  - **Trade-off:** Larger vocabularies improve compression but increase model size
125
  - **Recommendation:** 32k vocabulary provides optimal balance for production use
126
 
@@ -137,12 +147,14 @@ Below are sample sentences tokenized with each vocabulary size:
137
 
138
  | N-gram | Variant | Perplexity | Entropy | Unique N-grams | Top-100 Coverage | Top-1000 Coverage |
139
  |--------|---------|------------|---------|----------------|------------------|-------------------|
140
- | **2-gram** | Word | 5,793 | 12.50 | 1,073,861 | 30.2% | 66.4% |
141
- | **2-gram** | Subword | 316 🏆 | 8.30 | 15,451 | 62.6% | 98.6% |
142
- | **3-gram** | Word | 8,299 | 13.02 | 1,682,809 | 28.5% | 62.7% |
143
- | **3-gram** | Subword | 2,021 | 10.98 | 129,923 | 30.1% | 74.0% |
144
- | **4-gram** | Word | 12,842 | 13.65 | 3,054,922 | 27.3% | 59.4% |
145
- | **4-gram** | Subword | 7,215 | 12.82 | 788,718 | 19.6% | 56.9% |
 
 
146
 
147
  ### Top 5 N-grams by Size
148
 
@@ -150,21 +162,21 @@ Below are sample sentences tokenized with each vocabulary size:
150
 
151
  | Rank | N-gram | Count |
152
  |------|--------|-------|
153
- | 1 | `لينكات برانيه` | 1,293,684 |
154
- | 2 | `برانيه مصادر` | 1,167,581 |
155
- | 3 | `من مواليد` | 829,322 |
156
- | 4 | `مواليد يوم` | 809,177 |
157
  | 5 | `الاستوا السماوى` | 668,876 |
158
 
159
  **3-grams (Word):**
160
 
161
  | Rank | N-gram | Count |
162
  |------|--------|-------|
163
- | 1 | `لينكات برانيه مصادر` | 1,164,952 |
164
- | 2 | `من مواليد يوم` | 809,029 |
165
  | 3 | `خط الاستوا السماوى` | 630,228 |
166
- | 4 | `الدايره الساعيه لجرم` | 445,892 |
167
- | 5 | `الساعيه لجرم سماوى` | 445,892 |
168
 
169
  **4-grams (Word):**
170
 
@@ -172,46 +184,66 @@ Below are sample sentences tokenized with each vocabulary size:
172
  |------|--------|-------|
173
  | 1 | `الدايره الساعيه لجرم سماوى` | 445,892 |
174
  | 2 | `السماوى تكون قيمة بعده` | 445,860 |
175
- | 3 | `خط الاستوا السماوى تكون` | 445,860 |
176
- | 4 | `الاستوا السماوى تكون قيمة` | 445,860 |
177
- | 5 | `لينكات برانيه مصادر من` | 320,727 |
 
 
 
 
 
 
 
 
 
 
178
 
179
  **2-grams (Subword):**
180
 
181
  | Rank | N-gram | Count |
182
  |------|--------|-------|
183
- | 1 | `_ ا` | 31,144,333 |
184
- | 2 | `ا ل` | 30,224,243 |
185
- | 3 | `ه _` | 17,180,633 |
186
- | 4 | `_ م` | 13,559,836 |
187
- | 5 | `ى _` | 11,805,719 |
188
 
189
  **3-grams (Subword):**
190
 
191
  | Rank | N-gram | Count |
192
  |------|--------|-------|
193
- | 1 | `_ ا ل` | 25,116,125 |
194
- | 2 | `ي ه _` | 6,396,587 |
195
- | 3 | `ه _ ا` | 6,346,797 |
196
- | 4 | `ا ل م` | 5,946,692 |
197
- | 5 | `_ م ن` | 4,537,386 |
198
 
199
  **4-grams (Subword):**
200
 
201
  | Rank | N-gram | Count |
202
  |------|--------|-------|
203
- | 1 | _ ا ل` | 5,297,759 |
204
- | 2 | `_ ا ل م` | 5,200,038 |
205
- | 3 | `_ ف ى _` | 4,251,301 |
206
- | 4 | `_ م ن _` | 3,906,606 |
207
- | 5 | `_ ا ل ا` | 3,578,656 |
 
 
 
 
 
 
 
 
 
 
208
 
209
 
210
  ### Key Findings
211
 
212
- - **Best Perplexity:** 2-gram (subword) with 316
213
  - **Entropy Trend:** Decreases with larger n-grams (more predictable)
214
- - **Coverage:** Top-1000 patterns cover ~57% of corpus
215
  - **Recommendation:** 4-gram or 5-gram for best predictive performance
216
 
217
  ---
@@ -227,14 +259,14 @@ Below are sample sentences tokenized with each vocabulary size:
227
 
228
  | Context | Variant | Avg Entropy | Perplexity | Branching Factor | Unique Contexts | Predictability |
229
  |---------|---------|-------------|------------|------------------|-----------------|----------------|
230
- | **1** | Word | 1.2217 | 2.332 | 9.13 | 1,353,062 | 0.0% |
231
- | **1** | Subword | 1.0533 | 2.075 | 8.28 | 5,726 | 0.0% |
232
- | **2** | Word | 0.3648 | 1.288 | 1.91 | 12,336,484 | 63.5% |
233
- | **2** | Subword | 0.7848 | 1.723 | 5.54 | 47,379 | 21.5% |
234
- | **3** | Word | 0.1139 | 1.082 | 1.28 | 23,517,673 | 88.6% |
235
- | **3** | Subword | 0.7673 | 1.702 | 4.73 | 262,420 | 23.3% |
236
- | **4** | Word | 0.0625 🏆 | 1.044 | 1.17 | 29,894,419 | 93.7% |
237
- | **4** | Subword | 0.7433 | 1.674 | 3.81 | 1,241,425 | 25.7% |
238
 
239
  ### Generated Text Samples (Word-based)
240
 
@@ -242,27 +274,27 @@ Below are text samples generated from each word-based Markov chain model:
242
 
243
  **Context Size 1:**
244
 
245
- 1. `فى العالم حسب المساحه لستة اكبر بحيرات اوروبا لينكات مصادر من مملكه ايطاليا حياته الرياضيه بيلعب`
246
- 2. `من مواليد يوم 16 يناير سنة فى ذا ماتشيس بتقدم الانواع الفنيه كانت دى لوبو من`
247
- 3. `و بتنقاس بالانزياح الاحمر المطلع المستقيم ممكن يتقاس بقوس دايره الاستواء السماويه من الجرى و نادى`
248
 
249
  **Context Size 2:**
250
 
251
- 1. `لينكات برانيه مصادر عجل ناريه من المانيا حياته اليكساندر انتونوڤيتش ريزونى اليكساندر انستروثير اليكس...`
252
- 2. `برانيه مصادر كوره قدم من الميكسيك حياته اڤير كاباليرو اڤيرالدو فيريرا لاعب كورة قدم من اليابان حياته`
253
- 3. `من مواليد يوم 19 اغسطس لسا عايشين فى استانبول لينكات برانيه مصادر هوكى الجليد من امريكا حياته`
254
 
255
  **Context Size 3:**
256
 
257
- 1. `لينكات برانيه مصادر سكان سكان فى ايران المكان ادم درهسى عليا adam darrehsi ye olya هيا تجمع سكان`
258
- 2. `من مواليد يوم 7 ديسمبر فى مونتفيدو الحياه الرياضيه بيلعب فى مركز مُدَافِع و لعب مع فريق ريال`
259
- 3. `خط الاستوا السماوى تكون قيمة بعده بالموجب و لو النجم جنوب خط الاستوا السماوى لو كان النجم شمال`
260
 
261
  **Context Size 4:**
262
 
263
  1. `الدايره الساعيه لجرم سماوى و الدايره الساعيه لنقطة الاعتدال الربيعى المطلع المستقيم ممكن يتقاس بقوس ...`
264
- 2. `الاستوا السماوى تكون قيمة بعده بالسالب مصادر كوكبه`
265
- 3. `السماوى تكون قيمة بعده بالسالب مصادر 2ماس كوكبه`
266
 
267
 
268
  ### Generated Text Samples (Subword-based)
@@ -271,34 +303,34 @@ Below are text samples generated from each subword-based Markov chain model:
271
 
272
  **Context Size 1:**
273
 
274
- 1. `_مرا_ارو_لسيه_س_`
275
- 2. `الدريه_اثر_كالال`
276
- 3. `لخطونالمطة_جو_عب`
277
 
278
  **Context Size 2:**
279
 
280
- 1. `_الكريتالسمات_فى_`
281
- 2. `اليه_عاعيه_مطلحجم`
282
- 3. `ه_بقه_ليكا_بقوى_ا`
283
 
284
  **Context Size 3:**
285
 
286
- 1. `_المستقيم_محمد_بيس`
287
- 2. `يه_مصادر_كورة_قدم_`
288
- 3. `ه_العقبت_برات_السم`
289
 
290
  **Context Size 4:**
291
 
292
- 1. _السماوى_مع_فريق_ن`
293
- 2. `_المكافئ_الفلك._الم`
294
- 3. `_فى_باردوه_مصادر_اس`
295
 
296
 
297
  ### Key Findings
298
 
299
- - **Best Predictability:** Context-4 (word) with 93.7% predictability
300
  - **Branching Factor:** Decreases with context size (more deterministic)
301
- - **Memory Trade-off:** Larger contexts require more storage (1,241,425 contexts)
302
  - **Recommendation:** Context-3 or Context-4 for text generation
303
 
304
  ---
@@ -314,48 +346,48 @@ Below are text samples generated from each subword-based Markov chain model:
314
 
315
  | Metric | Value |
316
  |--------|-------|
317
- | Vocabulary Size | 856,070 |
318
- | Total Tokens | 116,711,182 |
319
- | Mean Frequency | 136.33 |
320
  | Median Frequency | 4 |
321
- | Frequency Std Dev | 9391.59 |
322
 
323
  ### Most Common Words
324
 
325
  | Rank | Word | Frequency |
326
  |------|------|-----------|
327
- | 1 | فى | 4,414,661 |
328
- | 2 | من | 3,909,776 |
329
- | 3 | و | 3,512,508 |
330
- | 4 | مصادر | 1,612,463 |
331
- | 5 | لينكات | 1,359,404 |
332
- | 6 | برانيه | 1,298,834 |
333
- | 7 | هيا | 1,062,266 |
334
- | 8 | اللى | 965,103 |
335
- | 9 | يوم | 853,034 |
336
- | 10 | مواليد | 836,295 |
337
 
338
  ### Least Common Words (from vocabulary)
339
 
340
  | Rank | Word | Frequency |
341
  |------|------|-----------|
342
- | 1 | algeriens | 2 |
343
- | 2 | وبتينا | 2 |
344
- | 3 | روتلُف | 2 |
345
- | 4 | bouabdellah | 2 |
346
- | 5 | الخُضرة | 2 |
347
- | 6 | impressionisms | 2 |
348
- | 7 | assyriaca | 2 |
349
- | 8 | جروكبيديا | 2 |
350
- | 9 | grokipedia | 2 |
351
- | 10 | grok | 2 |
352
 
353
  ### Zipf's Law Analysis
354
 
355
  | Metric | Value |
356
  |--------|-------|
357
- | Zipf Coefficient | 1.2602 |
358
- | R² (Goodness of Fit) | 0.994644 |
359
  | Adherence Quality | **excellent** |
360
 
361
  ### Coverage Analysis
@@ -363,15 +395,15 @@ Below are text samples generated from each subword-based Markov chain model:
363
  | Top N Words | Coverage |
364
  |-------------|----------|
365
  | Top 100 | 46.0% |
366
- | Top 1,000 | 76.7% |
367
- | Top 5,000 | 85.9% |
368
- | Top 10,000 | 89.0% |
369
 
370
  ### Key Findings
371
 
372
- - **Zipf Compliance:** R²=0.9946 indicates excellent adherence to Zipf's law
373
  - **High Frequency Dominance:** Top 100 words cover 46.0% of corpus
374
- - **Long Tail:** 846,070 words needed for remaining 11.0% coverage
375
 
376
  ---
377
  ## 5. Word Embeddings Evaluation
@@ -387,37 +419,40 @@ Below are text samples generated from each subword-based Markov chain model:
387
 
388
  ### 5.1 Cross-Lingual Alignment
389
 
390
- > *Note: Multilingual alignment visualization not available for this language.*
 
 
391
 
392
 
393
  ### 5.2 Model Comparison
394
 
395
  | Model | Dimension | Isotropy | Semantic Density | Alignment R@1 | Alignment R@10 |
396
  |-------|-----------|----------|------------------|---------------|----------------|
397
- | **mono_32d** | 32 | 0.7897 🏆 | 0.3482 | N/A | N/A |
398
- | **mono_64d** | 64 | 0.7690 | 0.2976 | N/A | N/A |
399
- | **mono_128d** | 128 | 0.7177 | 0.2526 | N/A | N/A |
 
 
 
400
 
401
  ### Key Findings
402
 
403
- - **Best Isotropy:** mono_32d with 0.7897 (more uniform distribution)
404
- - **Semantic Density:** Average pairwise similarity of 0.2995. Lower values indicate better semantic separation.
405
- - **Alignment Quality:** No aligned models evaluated in this run.
406
  - **Recommendation:** 128d aligned for best cross-lingual performance
407
 
408
  ---
409
  ## 6. Morphological Analysis (Experimental)
410
 
411
- > ⚠️ **Warning:** This language shows low morphological productivity. The statistical signals used for this analysis may be noisy or less reliable than for morphologically rich languages.
412
-
413
  This section presents an automated morphological analysis derived from the statistical divergence between word-level and subword-level models. By analyzing where subword predictability spikes and where word-level coverage fails, we can infer linguistic structures without supervised data.
414
 
415
  ### 6.1 Productivity & Complexity
416
 
417
  | Metric | Value | Interpretation | Recommendation |
418
  |--------|-------|----------------|----------------|
419
- | Productivity Index | **0.000** | Low morphological productivity | ⚠️ Likely unreliable |
420
- | Idiomaticity Gap | **-1.000** | Low formulaic content | - |
421
 
422
  ### 6.2 Affix Inventory (Productive Units)
423
 
@@ -426,13 +461,15 @@ These are the most productive prefixes and suffixes identified by sampling the v
426
  #### Productive Prefixes
427
  | Prefix | Examples |
428
  |--------|----------|
429
- | `-ال` | الخوذ, المندوبين, الدمرداشيه |
 
430
 
431
  #### Productive Suffixes
432
  | Suffix | Examples |
433
  |--------|----------|
434
- | `-ين` | كلوكيرين, بيرجرين, المندوبين |
435
- | `-ان` | مالڤان, ملازمان, پايرلمان |
 
436
 
437
  ### 6.3 Bound Stems (Lexical Roots)
438
 
@@ -440,18 +477,18 @@ Bound stems are high-frequency subword units that are semantically cohesive but
440
 
441
  | Stem | Cohesion | Substitutability | Examples |
442
  |------|----------|------------------|----------|
443
- | `العا` | 1.85x | 296 contexts | العام, العاج, العال |
444
- | `المج` | 1.79x | 267 contexts | المجد, المجر, المجئ |
445
- | `انزي` | 1.95x | 165 contexts | انزيا, انزيت, انزيد |
446
- | `الشع` | 2.11x | 103 contexts | الشعب, الشعف, الشعز |
447
- | `ياته` | 2.11x | 96 contexts | عياته, آياته, حياته |
448
- | `الاع` | 2.00x | 107 contexts | الاعور, الاعتر, الاعدا |
449
- | `مستق` | 2.01x | 80 contexts | مستقل, مستقر, مستقله |
450
- | `الاح` | 1.79x | 110 contexts | الاحد, صالاحى, الاحرش |
451
- | `لموج` | 2.13x | 48 contexts | لموجة, الموج, الموجب |
452
- | `لمجر` | 1.85x | 71 contexts | لمجره, المجر, لمجرة |
453
- | `لساع` | 2.34x | 28 contexts | لساعة, الساعة, لساعات |
454
- | `مريك` | 1.69x | 102 contexts | لمريك, مريكا, مريكن |
455
 
456
  ### 6.4 Affix Compatibility (Co-occurrence)
457
 
@@ -459,8 +496,12 @@ This table shows which prefixes and suffixes most frequently co-occur on the sam
459
 
460
  | Prefix | Suffix | Frequency | Examples |
461
  |--------|--------|-----------|----------|
462
- | `-ال` | `-ين` | 47 words | الصديقين, الحدوديين |
463
- | `-ال` | `-ان` | 11 words | الأخوان, الترامان |
 
 
 
 
464
 
465
  ### 6.5 Recursive Morpheme Segmentation
466
 
@@ -468,26 +509,26 @@ Using **Recursive Hierarchical Substitutability**, we decompose complex words in
468
 
469
  | Word | Suggested Split | Confidence | Stem |
470
  |------|-----------------|------------|------|
471
- | السريانيين | **`ال-سرياني-ين`** | 6.0 | `سرياني` |
472
- | كانتيلينين | **`كانتيل-ين-ين`** | 6.0 | `كانتيل` |
473
- | الجينومية | **`ال-جينومية`** | 4.5 | `جينومية` |
474
- | البرمجيات | **`ال-برمجيات`** | 4.5 | `برمجيات` |
475
- | الاستعلامات | **`ال-استعلامات`** | 4.5 | `استعلامات` |
476
- | بيجلاندسفچوردين | **`بيجلاندسفچورد-ين`** | 4.5 | `بيجلاندسفچورد` |
477
- | السينابون | **`ال-سينابون`** | 4.5 | `سينابون` |
478
- | الديمقراطي | **`ال-ديمقراطي`** | 4.5 | `ديمقراطي` |
479
- | الانبعاثية | **`ال-انبعاثية`** | 4.5 | `انبعاثية` |
480
- | الميتانيه | **`ال-ميتانيه`** | 4.5 | `ميتانيه` |
481
- | الطويحينه | **`ال-طويحينه`** | 4.5 | `طويحينه` |
482
- | الصابونجى | **`ال-صابونجى`** | 4.5 | `صابونجى` |
483
- | البنغاليه | **`ال-بنغاليه`** | 4.5 | `بنغاليه` |
484
- | المتحدثون | **`ال-متحدثون`** | 4.5 | `متحدثون` |
485
- | ستشميدلين | **`ستشميدل-ين`** | 4.5 | `ستشميدل` |
486
 
487
  ### 6.6 Linguistic Interpretation
488
 
489
  > **Automated Insight:**
490
- The language Egyptian Arabic appears to be more isolating or has a highly fixed vocabulary. Word-level models perform nearly as well as subword models, indicating fewer productive morphological processes.
491
 
492
  ---
493
  ## 7. Summary & Recommendations
@@ -498,9 +539,9 @@ The language Egyptian Arabic appears to be more isolating or has a highly fixed
498
 
499
  | Component | Recommended | Rationale |
500
  |-----------|-------------|-----------|
501
- | Tokenizer | **64k BPE** | Best compression (3.91x) |
502
- | N-gram | **2-gram** | Lowest perplexity (316) |
503
- | Markov | **Context-4** | Highest predictability (93.7%) |
504
  | Embeddings | **100d** | Balanced semantic capture and isotropy |
505
 
506
 
@@ -714,4 +755,4 @@ MIT License - Free for academic and commercial use.
714
  ---
715
  *Generated by Wikilangs Models Pipeline*
716
 
717
- *Report Date: 2026-01-03 07:45:31*
 
10
  - n-gram
11
  - markov
12
  - wikipedia
13
+ - feature-extraction
14
+ - sentence-similarity
15
+ - tokenization
16
+ - n-grams
17
+ - markov-chain
18
+ - text-mining
19
+ - fasttext
20
+ - babelvec
21
+ - vocabulous
22
+ - vocabulary
23
  - monolingual
24
  - family-arabic
25
  license: mit
26
  library_name: wikilangs
27
+ pipeline_tag: text-generation
28
  datasets:
29
  - omarkamali/wikipedia-monthly
30
  dataset_info:
 
33
  metrics:
34
  - name: best_compression_ratio
35
  type: compression
36
+ value: 3.899
37
  - name: best_isotropy
38
  type: isotropy
39
+ value: 0.7938
40
  - name: vocabulary_size
41
  type: vocab
42
  value: 0
 
70
  - [3. Markov Chain Evaluation](#3-markov-chain-evaluation)
71
  - [4. Vocabulary Analysis](#4-vocabulary-analysis)
72
  - [5. Word Embeddings Evaluation](#5-word-embeddings-evaluation)
73
+ - [6. Morphological Analysis (Experimental)](#6--morphological-analysis-experimental)
74
  - [7. Summary & Recommendations](#7-summary--recommendations)
75
  - [Metrics Glossary](#appendix-metrics-glossary--interpretation-guide)
76
  - [Visualizations Index](#visualizations-index)
 
90
 
91
  | Vocab Size | Compression | Avg Token Len | UNK Rate | Total Tokens |
92
  |------------|-------------|---------------|----------|--------------|
93
+ | **8k** | 2.872x | 2.87 | 0.8437% | 1,716,209 |
94
+ | **16k** | 3.211x | 3.21 | 0.9431% | 1,535,351 |
95
+ | **32k** | 3.553x | 3.55 | 1.0437% | 1,387,311 |
96
+ | **64k** | 3.899x 🏆 | 3.90 | 1.1453% | 1,264,296 |
97
 
98
  ### Tokenization Examples
99
 
100
  Below are sample sentences tokenized with each vocabulary size:
101
 
102
+ **Sample 1:** `سينافريدى ( الاسم العلمى: Synaphridae ) هوا فصيله من العنكبيات بيتبع عنكبوت. لين...`
103
 
104
  | Vocab | Tokens | Count |
105
  |-------|--------|-------|
106
+ | 8k | `▁سين اف ريد ى ▁( ▁الاسم ▁العلم ى : ▁s ... (+29 more)` | 39 |
107
+ | 16k | `▁سين اف ريدى ▁( ▁الاسم ▁العلمى : ▁s yn ap ... (+24 more)` | 34 |
108
+ | 32k | `▁سين اف ريدى ▁( ▁الاسم ▁العلمى : ▁syn ap h ... (+22 more)` | 32 |
109
+ | 64k | `▁سين اف ريدى ▁( ▁الاسم ▁العلمى : ▁syn aph rida ... (+20 more)` | 30 |
110
 
111
+ **Sample 2:** `اينديرا باچت لاعبه شطرنج من سلوفينيا و كازاخستان. حياتها اينديرا باچت من مواليد ...`
112
 
113
  | Vocab | Tokens | Count |
114
  |-------|--------|-------|
115
+ | 8k | `▁ايند يرا ▁با چ ت ▁لاعبه ▁شطرنج ▁من ▁سلوفينيا ▁و ... (+24 more)` | 34 |
116
+ | 16k | `▁ايند يرا ▁با چ ت ▁لاعبه ▁شطرنج ▁من ▁سلوفينيا ▁و ... (+24 more)` | 34 |
117
+ | 32k | `▁ايند يرا ▁باچ ت ▁لاعبه ▁شطرنج ▁من ▁سلوفينيا ▁و ▁كازاخستان ... (+22 more)` | 32 |
118
+ | 64k | `▁ايند يرا ▁باچ ت ▁لاعبه ▁شطرنج ▁من ▁سلوفينيا ▁و ▁كازاخستان ... (+22 more)` | 32 |
119
 
120
+ **Sample 3:** `مفطورة الخنازير ( الاسم العلمى: Mycoplasma suis ) هوا نوع من بدائيات النوى بيتبع...`
121
 
122
  | Vocab | Tokens | Count |
123
  |-------|--------|-------|
124
+ | 8k | `▁مف ط ورة ▁الخ نا زير ▁( ▁الاسم ▁العلم ى ... (+32 more)` | 42 |
125
+ | 16k | `▁مف ط ورة ▁الخ نا زير ▁( ▁الاسم ▁العلمى : ... (+30 more)` | 40 |
126
+ | 32k | `▁مف ط ورة ▁الخ نا زير ▁( ▁الاسم ▁العلمى : ... (+30 more)` | 40 |
127
+ | 64k | `▁مف ط ورة ▁الخ نا زير ▁( ▁الاسم ▁العلمى : ... (+29 more)` | 39 |
128
 
129
 
130
  ### Key Findings
131
 
132
+ - **Best Compression:** 64k achieves 3.899x compression
133
+ - **Lowest UNK Rate:** 8k with 0.8437% unknown tokens
134
  - **Trade-off:** Larger vocabularies improve compression but increase model size
135
  - **Recommendation:** 32k vocabulary provides optimal balance for production use
136
 
 
147
 
148
  | N-gram | Variant | Perplexity | Entropy | Unique N-grams | Top-100 Coverage | Top-1000 Coverage |
149
  |--------|---------|------------|---------|----------------|------------------|-------------------|
150
+ | **2-gram** | Word | 5,833 | 12.51 | 1,079,967 | 30.2% | 66.4% |
151
+ | **2-gram** | Subword | 317 🏆 | 8.31 | 15,559 | 62.6% | 98.6% |
152
+ | **3-gram** | Word | 8,334 | 13.02 | 1,690,048 | 28.5% | 62.7% |
153
+ | **3-gram** | Subword | 2,031 | 10.99 | 130,688 | 30.0% | 73.9% |
154
+ | **4-gram** | Word | 12,878 | 13.65 | 3,065,781 | 27.3% | 59.4% |
155
+ | **4-gram** | Subword | 7,269 | 12.83 | 793,433 | 19.5% | 56.8% |
156
+ | **5-gram** | Word | 13,448 | 13.72 | 3,166,704 | 28.9% | 59.2% |
157
+ | **5-gram** | Subword | 18,103 | 14.14 | 2,865,423 | 14.0% | 48.6% |
158
 
159
  ### Top 5 N-grams by Size
160
 
 
162
 
163
  | Rank | N-gram | Count |
164
  |------|--------|-------|
165
+ | 1 | `لينكات برانيه` | 1,294,219 |
166
+ | 2 | `برانيه مصادر` | 1,167,266 |
167
+ | 3 | `من مواليد` | 829,316 |
168
+ | 4 | `مواليد يوم` | 809,154 |
169
  | 5 | `الاستوا السماوى` | 668,876 |
170
 
171
  **3-grams (Word):**
172
 
173
  | Rank | N-gram | Count |
174
  |------|--------|-------|
175
+ | 1 | `لينكات برانيه مصادر` | 1,164,637 |
176
+ | 2 | `من مواليد يوم` | 809,006 |
177
  | 3 | `خط الاستوا السماوى` | 630,228 |
178
+ | 4 | `الساعيه لجرم سماوى` | 445,892 |
179
+ | 5 | `الدايره الساعيه لجرم` | 445,892 |
180
 
181
  **4-grams (Word):**
182
 
 
184
  |------|--------|-------|
185
  | 1 | `الدايره الساعيه لجرم سماوى` | 445,892 |
186
  | 2 | `السماوى تكون قيمة بعده` | 445,860 |
187
+ | 3 | `الاستوا السماوى تكون قيمة` | 445,860 |
188
+ | 4 | `خط الاستوا السماوى تكون` | 445,860 |
189
+ | 5 | `لينكات برانيه مصادر من` | 320,790 |
190
+
191
+ **5-grams (Word):**
192
+
193
+ | Rank | N-gram | Count |
194
+ |------|--------|-------|
195
+ | 1 | `خط الاستوا السماوى تكون قيمة` | 445,860 |
196
+ | 2 | `الاستوا السماوى تكون قيمة بعده` | 445,860 |
197
+ | 3 | `لستة اكبر بحيرات العالم حسب` | 255,463 |
198
+ | 4 | `السماويه اللى المجره جزء منها` | 222,981 |
199
+ | 5 | `صوره و هيا مجال الكره` | 222,975 |
200
 
201
  **2-grams (Subword):**
202
 
203
  | Rank | N-gram | Count |
204
  |------|--------|-------|
205
+ | 1 | `_ ا` | 31,094,853 |
206
+ | 2 | `ا ل` | 30,178,157 |
207
+ | 3 | `ه _` | 17,208,514 |
208
+ | 4 | `_ م` | 13,583,995 |
209
+ | 5 | `ى _` | 11,832,103 |
210
 
211
  **3-grams (Subword):**
212
 
213
  | Rank | N-gram | Count |
214
  |------|--------|-------|
215
+ | 1 | `_ ا ل` | 25,055,980 |
216
+ | 2 | `ي ه _` | 6,400,461 |
217
+ | 3 | `ه _ ا` | 6,229,523 |
218
+ | 4 | `ا ل م` | 5,957,557 |
219
+ | 5 | `_ م ن` | 4,545,069 |
220
 
221
  **4-grams (Subword):**
222
 
223
  | Rank | N-gram | Count |
224
  |------|--------|-------|
225
+ | 1 | `_ ا ل م` | 5,209,448 |
226
+ | 2 | _ ا ل` | 5,178,964 |
227
+ | 3 | `_ ف ى _` | 4,259,956 |
228
+ | 4 | `_ م ن _` | 3,913,053 |
229
+ | 5 | `_ ا ل ا` | 3,581,934 |
230
+
231
+ **5-grams (Subword):**
232
+
233
+ | Rank | N-gram | Count |
234
+ |------|--------|-------|
235
+ | 1 | `_ م ن _ ا` | 1,823,528 |
236
+ | 2 | `ر ه _ ا ل` | 1,712,451 |
237
+ | 3 | `م ص ا د ر` | 1,614,472 |
238
+ | 4 | `_ م ص ا د` | 1,612,850 |
239
+ | 5 | `_ ل ي ن ك` | 1,400,053 |
240
 
241
 
242
  ### Key Findings
243
 
244
+ - **Best Perplexity:** 2-gram (subword) with 317
245
  - **Entropy Trend:** Decreases with larger n-grams (more predictable)
246
+ - **Coverage:** Top-1000 patterns cover ~49% of corpus
247
  - **Recommendation:** 4-gram or 5-gram for best predictive performance
248
 
249
  ---
 
259
 
260
  | Context | Variant | Avg Entropy | Perplexity | Branching Factor | Unique Contexts | Predictability |
261
  |---------|---------|-------------|------------|------------------|-----------------|----------------|
262
+ | **1** | Word | 1.2202 | 2.330 | 9.16 | 1,361,925 | 0.0% |
263
+ | **1** | Subword | 1.0545 | 2.077 | 8.26 | 5,787 | 0.0% |
264
+ | **2** | Word | 0.3640 | 1.287 | 1.91 | 12,454,727 | 63.6% |
265
+ | **2** | Subword | 0.7835 | 1.721 | 5.53 | 47,806 | 21.7% |
266
+ | **3** | Word | 0.1137 | 1.082 | 1.27 | 23,730,854 | 88.6% |
267
+ | **3** | Subword | 0.7666 | 1.701 | 4.73 | 264,404 | 23.3% |
268
+ | **4** | Word | 0.0623 🏆 | 1.044 | 1.17 | 30,143,409 | 93.8% |
269
+ | **4** | Subword | 0.7433 | 1.674 | 3.81 | 1,249,901 | 25.7% |
270
 
271
  ### Generated Text Samples (Word-based)
272
 
 
274
 
275
  **Context Size 1:**
276
 
277
+ 1. `فى مرصد لويل للتدوير عن تشغيلها willer trains wales police beats and diocesan links milwaukee holy`
278
+ 2. `من امستردام 16 اكتوبر فى مركز الكواكب الصغيره مصادر من النجوم اللى جايه لينا من البرتغال`
279
+ 3. `و بكده عملية فى الحزب الديمقراطى المسيحى اشتغل فى ابوت توريبيو الكوليا مساحتها 4 سبتمبر سنة`
280
 
281
  **Context Size 2:**
282
 
283
+ 1. `لينكات برانيه مصادر اليمن يمنيه`
284
+ 2. `برانيه مصادر صدرى من المملكه المتحده عضو برلمان المملكه المتحده حياته نيل ماثيوز ميك ديسبوروج ريس تش...`
285
+ 3. `من مواليد يوم 12 يونيه فى لوس انجليس اغانى اغانى نيو ويڤ جوايز لينكات برانيه مصادر من`
286
 
287
  **Context Size 3:**
288
 
289
+ 1. `لينكات برانيه مصادر من النرويج فى جامعة كوبينهاجين و جامعة جوتينجن و جامعة زيورخ و المعهد الفدرالى ا...`
290
+ 2. `من مواليد يوم 3 يناير فى تارنوف مات فى 16 يناير الحياه العمليه كان عضو فى academic division`
291
+ 3. `خط الاستوا السماوى تكون قيمة بعده بالسالب مصادر مايور 2ماس`
292
 
293
  **Context Size 4:**
294
 
295
  1. `الدايره الساعيه لجرم سماوى و الدايره الساعيه لنقطة الاعتدال الربيعى المطلع المستقيم ممكن يتقاس بقوس ...`
296
+ 2. `الاستوا السماوى تكون قيمة بعده بالموجب و لو النجم جنوب خط الاستوا السماوى تكون قيمة بعده بالموجب و ل...`
297
+ 3. `السماوى تكون قيمة بعده بالسالب مصادر مايور 2ماس`
298
 
299
 
300
  ### Generated Text Samples (Subword-based)
 
303
 
304
  **Context Size 1:**
305
 
306
+ 1. `_اعاده_اكلمطقص_ا`
307
+ 2. `انجنجويناتيلودره`
308
+ 3. _اوانيره._حيا_ا`
309
 
310
  **Context Size 2:**
311
 
312
+ 1. `_الحارض_فراكريفيا`
313
+ 2. `النظمى_نقطه_الشعا`
314
+ 3. `ه_الربيس_الداد_(+`
315
 
316
  **Context Size 3:**
317
 
318
+ 1. `_الاكتوردشت_كندا._`
319
+ 2. `يه_لجرم_الحرة._نظا`
320
+ 3. `ه_المقرا_جبات_فى_م`
321
 
322
  **Context Size 4:**
323
 
324
+ 1. `_المتحده_فضاء_منها.`
325
+ 2. _السكان_سكان_فى_كو`
326
+ 3. `_فى_مركز_مُدَافِع,_و_ه`
327
 
328
 
329
  ### Key Findings
330
 
331
+ - **Best Predictability:** Context-4 (word) with 93.8% predictability
332
  - **Branching Factor:** Decreases with context size (more deterministic)
333
+ - **Memory Trade-off:** Larger contexts require more storage (1,249,901 contexts)
334
  - **Recommendation:** Context-3 or Context-4 for text generation
335
 
336
  ---
 
346
 
347
  | Metric | Value |
348
  |--------|-------|
349
+ | Vocabulary Size | 859,607 |
350
+ | Total Tokens | 116,985,057 |
351
+ | Mean Frequency | 136.09 |
352
  | Median Frequency | 4 |
353
+ | Frequency Std Dev | 9386.65 |
354
 
355
  ### Most Common Words
356
 
357
  | Rank | Word | Frequency |
358
  |------|------|-----------|
359
+ | 1 | فى | 4,423,347 |
360
+ | 2 | من | 3,916,260 |
361
+ | 3 | و | 3,516,072 |
362
+ | 4 | مصادر | 1,612,738 |
363
+ | 5 | لينكات | 1,359,751 |
364
+ | 6 | برانيه | 1,299,373 |
365
+ | 7 | هيا | 1,062,774 |
366
+ | 8 | اللى | 967,317 |
367
+ | 9 | يوم | 853,586 |
368
+ | 10 | مواليد | 836,389 |
369
 
370
  ### Least Common Words (from vocabulary)
371
 
372
  | Rank | Word | Frequency |
373
  |------|------|-----------|
374
+ | 1 | ثاكراي | 2 |
375
+ | 2 | تشوهاتها | 2 |
376
+ | 3 | جبائر | 2 |
377
+ | 4 | jesuss | 2 |
378
+ | 5 | وأران | 2 |
379
+ | 6 | مرثير | 2 |
380
+ | 7 | راثماينز | 2 |
381
+ | 8 | غرانغغورمان | 2 |
382
+ | 9 | grangegorman | 2 |
383
+ | 10 | ditsu | 2 |
384
 
385
  ### Zipf's Law Analysis
386
 
387
  | Metric | Value |
388
  |--------|-------|
389
+ | Zipf Coefficient | 1.2584 |
390
+ | R² (Goodness of Fit) | 0.994685 |
391
  | Adherence Quality | **excellent** |
392
 
393
  ### Coverage Analysis
 
395
  | Top N Words | Coverage |
396
  |-------------|----------|
397
  | Top 100 | 46.0% |
398
+ | Top 1,000 | 76.5% |
399
+ | Top 5,000 | 85.8% |
400
+ | Top 10,000 | 88.9% |
401
 
402
  ### Key Findings
403
 
404
+ - **Zipf Compliance:** R²=0.9947 indicates excellent adherence to Zipf's law
405
  - **High Frequency Dominance:** Top 100 words cover 46.0% of corpus
406
+ - **Long Tail:** 849,607 words needed for remaining 11.1% coverage
407
 
408
  ---
409
  ## 5. Word Embeddings Evaluation
 
419
 
420
  ### 5.1 Cross-Lingual Alignment
421
 
422
+ ![Alignment Quality](visualizations/embedding_alignment_quality.png)
423
+
424
+ ![Multilingual t-SNE](visualizations/embedding_tsne_multilingual.png)
425
 
426
 
427
  ### 5.2 Model Comparison
428
 
429
  | Model | Dimension | Isotropy | Semantic Density | Alignment R@1 | Alignment R@10 |
430
  |-------|-----------|----------|------------------|---------------|----------------|
431
+ | **mono_32d** | 32 | 0.7938 | 0.3446 | N/A | N/A |
432
+ | **mono_64d** | 64 | 0.7682 | 0.2977 | N/A | N/A |
433
+ | **mono_128d** | 128 | 0.7168 | 0.2564 | N/A | N/A |
434
+ | **aligned_32d** | 32 | 0.7938 🏆 | 0.3389 | 0.1080 | 0.4340 |
435
+ | **aligned_64d** | 64 | 0.7682 | 0.3004 | 0.2180 | 0.6240 |
436
+ | **aligned_128d** | 128 | 0.7168 | 0.2666 | 0.3440 | 0.7120 |
437
 
438
  ### Key Findings
439
 
440
+ - **Best Isotropy:** aligned_32d with 0.7938 (more uniform distribution)
441
+ - **Semantic Density:** Average pairwise similarity of 0.3008. Lower values indicate better semantic separation.
442
+ - **Alignment Quality:** Aligned models achieve up to 34.4% R@1 in cross-lingual retrieval.
443
  - **Recommendation:** 128d aligned for best cross-lingual performance
444
 
445
  ---
446
  ## 6. Morphological Analysis (Experimental)
447
 
 
 
448
  This section presents an automated morphological analysis derived from the statistical divergence between word-level and subword-level models. By analyzing where subword predictability spikes and where word-level coverage fails, we can infer linguistic structures without supervised data.
449
 
450
  ### 6.1 Productivity & Complexity
451
 
452
  | Metric | Value | Interpretation | Recommendation |
453
  |--------|-------|----------------|----------------|
454
+ | Productivity Index | **5.000** | High morphological productivity | Reliable analysis |
455
+ | Idiomaticity Gap | **0.218** | High formulaic/idiomatic content | - |
456
 
457
  ### 6.2 Affix Inventory (Productive Units)
458
 
 
461
  #### Productive Prefixes
462
  | Prefix | Examples |
463
  |--------|----------|
464
+ | `-ال` | الطبقه, التحاقه, التنسيق |
465
+ | `-وا` | واعتراف, وازواجها, والسحالى |
466
 
467
  #### Productive Suffixes
468
  | Suffix | Examples |
469
  |--------|----------|
470
+ | `-ين` | ڤيكيلين, لالغليمين, كورجتچارنين |
471
+ | `-ان` | فالسارتان, نيوبان, تيزمان |
472
+ | `-ون` | اندريلتون, ازانون, السيويون |
473
 
474
  ### 6.3 Bound Stems (Lexical Roots)
475
 
 
477
 
478
  | Stem | Cohesion | Substitutability | Examples |
479
  |------|----------|------------------|----------|
480
+ | `المج` | 1.77x | 271 contexts | المجن, المجد, المجل |
481
+ | `ياته` | 2.08x | 97 contexts | بياته, آياته, عياته |
482
+ | `الشع` | 2.04x | 104 contexts | الشعف, الشعر, الشعب |
483
+ | `انزي` | 1.84x | 164 contexts | انزيچ, انزيت, انزيغ |
484
+ | `الاع` | 1.91x | 107 contexts | الاعمل, الاعدا, الاعيب |
485
+ | `لموج` | 2.21x | 48 contexts | لموجة, الموج, الموجة |
486
+ | `الاح` | 1.75x | 110 contexts | الاحد, الاحرد, والاحد |
487
+ | `مستق` | 1.86x | 81 contexts | مستقر, مستقل, ومستقل |
488
+ | `لمجر` | 1.87x | 71 contexts | لمجرى, لمجرم, للمجر |
489
+ | `لساع` | 2.28x | 28 contexts | لساعة, الساعى, لساعته |
490
+ | `لمطل` | 2.23x | 29 contexts | لمطلع, المطل, المطله |
491
+ | `لسما` | 1.60x | 110 contexts | لسماء, للسما, لسماع |
492
 
493
  ### 6.4 Affix Compatibility (Co-occurrence)
494
 
 
496
 
497
  | Prefix | Suffix | Frequency | Examples |
498
  |--------|--------|-----------|----------|
499
+ | `-ال` | `-ين` | 42 words | المسؤولين, الهواريين |
500
+ | `-ال` | `-ون` | 27 words | الغويلفيون, المراديون |
501
+ | `-ال` | `-ان` | 16 words | الشخصان, اليرقان |
502
+ | `-وا` | `-ين` | 6 words | والاصلاحيين, والمخبرين |
503
+ | `-وا` | `-ان` | 4 words | وايزمان, والغثيان |
504
+ | `-وا` | `-ون` | 4 words | واسيون, وايتيلون |
505
 
506
  ### 6.5 Recursive Morpheme Segmentation
507
 
 
509
 
510
  | Word | Suggested Split | Confidence | Stem |
511
  |------|-----------------|------------|------|
512
+ | الرومانيتين | **`ال-رومانيت-ين`** | 6.0 | `رومانيت` |
513
+ | والمنظمين | **`وا-لمنظم-ين`** | 6.0 | `لمنظم` |
514
+ | والخريجون | **`وا-لخريج-ون`** | 6.0 | `لخريج` |
515
+ | اوليمبيين | **`اوليمبي-ين`** | 4.5 | `اوليمبي` |
516
+ | الفينلاندى | **`ال-فينلاندى`** | 4.5 | `فينلاندى` |
517
+ | لوڤتچارنين | **`لوڤتچارن-ين`** | 4.5 | `لوڤتچارن` |
518
+ | الرحمانوف | **`ال-رحمانوف`** | 4.5 | `رحمانوف` |
519
+ | الإرسالية | **`ال-إرسالية`** | 4.5 | `إرسالية` |
520
+ | جيريدهاران | **`جيريدهار-ان`** | 4.5 | `جيريدهار` |
521
+ | البرمائيات | **`ال-برمائيات`** | 4.5 | `برمائيات` |
522
+ | المتبادلة | **`ال-متبادلة`** | 4.5 | `متبادلة` |
523
+ | المستخرجة | **`ال-مستخرجة`** | 4.5 | `مستخرجة` |
524
+ | الباراجواى | **`ال-باراجواى`** | 4.5 | `باراجواى` |
525
+ | الايرلندى | **`ال-ايرلندى`** | 4.5 | `ايرلندى` |
526
+ | التصميمات | **`ال-تصميمات`** | 4.5 | `تصميمات` |
527
 
528
  ### 6.6 Linguistic Interpretation
529
 
530
  > **Automated Insight:**
531
+ The language Egyptian Arabic shows high morphological productivity. The subword models are significantly more efficient than word models, suggesting a rich system of affixation or compounding.
532
 
533
  ---
534
  ## 7. Summary & Recommendations
 
539
 
540
  | Component | Recommended | Rationale |
541
  |-----------|-------------|-----------|
542
+ | Tokenizer | **64k BPE** | Best compression (3.90x) |
543
+ | N-gram | **2-gram** | Lowest perplexity (317) |
544
+ | Markov | **Context-4** | Highest predictability (93.8%) |
545
  | Embeddings | **100d** | Balanced semantic capture and isotropy |
546
 
547
 
 
755
  ---
756
  *Generated by Wikilangs Models Pipeline*
757
 
758
+ *Report Date: 2026-01-03 20:14:21*
models/embeddings/aligned/arz_128d.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a93d1b1d200a873b6e9a9096c7b18c4397ac9a643ca73002034e62b536fd9f20
3
+ size 1529650607
models/embeddings/aligned/arz_128d.meta.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"lang": "arz", "dim": 128, "max_seq_len": 512, "is_aligned": true}
models/embeddings/aligned/arz_128d.projection.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45855ffa01ef1ccad3191df9709fde0e11b3fef8b54f24f7bb3fc0f139ab0ab4
3
+ size 65664
models/embeddings/aligned/arz_128d_metadata.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "language": "arz",
3
+ "dimension": 128,
4
+ "version": "aligned",
5
+ "hub_language": "en",
6
+ "seed_vocab_size": 51508,
7
+ "vocab_size": 483420
8
+ }
models/embeddings/aligned/arz_32d.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c6256df8926d38898c2a7314db52af2add7a2924515e162e6bf2132f5dcc152
3
+ size 390384047
models/embeddings/aligned/arz_32d.meta.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"lang": "arz", "dim": 32, "max_seq_len": 512, "is_aligned": true}
models/embeddings/aligned/arz_32d.projection.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:879d846167a92c16b6141f9bf5e57809288cfd40ec199b718f6fe5433fddc2da
3
+ size 4224
models/embeddings/aligned/arz_32d_metadata.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "language": "arz",
3
+ "dimension": 32,
4
+ "version": "aligned",
5
+ "hub_language": "en",
6
+ "seed_vocab_size": 51508,
7
+ "vocab_size": 483420
8
+ }
models/embeddings/aligned/arz_64d.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc863c082135ba1da04b2bd1dc6317a645458adf2de617806c8169b70f01318d
3
+ size 770139567
models/embeddings/aligned/arz_64d.meta.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"lang": "arz", "dim": 64, "max_seq_len": 512, "is_aligned": true}
models/embeddings/aligned/arz_64d.projection.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e4a7cbecfb3aca876da085b9eca7016b1a274c8e52c96adf1a43793d6d9ffff7
3
+ size 16512
models/embeddings/aligned/arz_64d_metadata.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "language": "arz",
3
+ "dimension": 64,
4
+ "version": "aligned",
5
+ "hub_language": "en",
6
+ "seed_vocab_size": 51508,
7
+ "vocab_size": 483420
8
+ }
models/embeddings/monolingual/arz_128d.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5bdb36e12bc1a678fd5c157ad32e02341de1eca60c4bc2b5ef93fe49b61bd555
3
- size 1527330535
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a93d1b1d200a873b6e9a9096c7b18c4397ac9a643ca73002034e62b536fd9f20
3
+ size 1529650607
models/embeddings/monolingual/arz_128d_metadata.json CHANGED
@@ -11,5 +11,5 @@
11
  "encoding_method": "rope",
12
  "dim": 128
13
  },
14
- "vocab_size": 481203
15
  }
 
11
  "encoding_method": "rope",
12
  "dim": 128
13
  },
14
+ "vocab_size": 483420
15
  }
models/embeddings/monolingual/arz_32d.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:30582a755835303bdd9d0ea4ad7243b43b631aa397c3e567b21c8d4a5c4449d3
3
- size 389766631
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c6256df8926d38898c2a7314db52af2add7a2924515e162e6bf2132f5dcc152
3
+ size 390384047
models/embeddings/monolingual/arz_32d_metadata.json CHANGED
@@ -11,5 +11,5 @@
11
  "encoding_method": "rope",
12
  "dim": 32
13
  },
14
- "vocab_size": 481203
15
  }
 
11
  "encoding_method": "rope",
12
  "dim": 32
13
  },
14
+ "vocab_size": 483420
15
  }
models/embeddings/monolingual/arz_64d.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5068a8b810efa630cdf0314c353a4495557bcd65811e48356936ecd7a645b35c
3
- size 768954599
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc863c082135ba1da04b2bd1dc6317a645458adf2de617806c8169b70f01318d
3
+ size 770139567
models/embeddings/monolingual/arz_64d_metadata.json CHANGED
@@ -11,5 +11,5 @@
11
  "encoding_method": "rope",
12
  "dim": 64
13
  },
14
- "vocab_size": 481203
15
  }
 
11
  "encoding_method": "rope",
12
  "dim": 64
13
  },
14
+ "vocab_size": 483420
15
  }
models/subword_markov/arz_markov_ctx1_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:93b9ef162389968b455ca3710639cb0faa0e67079e19324a4f3d232dc220101e
3
- size 347493
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d88aa401545a7cb868030c3e704a39018094110969e595475748f34cfa468195
3
+ size 351656
models/subword_markov/arz_markov_ctx1_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "context_size": 1,
3
  "variant": "subword",
4
  "language": "arz",
5
- "unique_contexts": 5726,
6
- "total_transitions": 693777470
7
  }
 
2
  "context_size": 1,
3
  "variant": "subword",
4
  "language": "arz",
5
+ "unique_contexts": 5787,
6
+ "total_transitions": 695246839
7
  }
models/subword_markov/arz_markov_ctx2_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:98db3946a7c34d13376e437f6401ae1d31aa408e141da100b0793536f9682ba1
3
- size 2267694
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef4b04112512f9152b8d2a8aead3dae96220811aada1e938a3a2fd3032aa1151
3
+ size 2279775
models/subword_markov/arz_markov_ctx2_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "context_size": 2,
3
  "variant": "subword",
4
  "language": "arz",
5
- "unique_contexts": 47379,
6
- "total_transitions": 692148775
7
  }
 
2
  "context_size": 2,
3
  "variant": "subword",
4
  "language": "arz",
5
+ "unique_contexts": 47806,
6
+ "total_transitions": 693617577
7
  }
models/subword_markov/arz_markov_ctx3_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:791a2be4e464c30d58ac081746362b42037f50dd8d0c4b552c3b0e68c2518dea
3
- size 10864076
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c74fb24b2618a95b7dcdf3dfe34859bd85a929b291258c2ce22882405f437ee8
3
+ size 10965216
models/subword_markov/arz_markov_ctx3_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "context_size": 3,
3
  "variant": "subword",
4
  "language": "arz",
5
- "unique_contexts": 262420,
6
- "total_transitions": 690520080
7
  }
 
2
  "context_size": 3,
3
  "variant": "subword",
4
  "language": "arz",
5
+ "unique_contexts": 264404,
6
+ "total_transitions": 691988315
7
  }
models/subword_markov/arz_markov_ctx4_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f9412f5ef232c0c613c068829b486f969ed98f144b0956cfba48ca7651d697cc
3
- size 40934252
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d9949ab9e4f3a093304bfe0dffdbb24f12be13664e07076bc4388ea6a8b3e2e1
3
+ size 41091997
models/subword_markov/arz_markov_ctx4_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "context_size": 4,
3
  "variant": "subword",
4
  "language": "arz",
5
- "unique_contexts": 1241425,
6
- "total_transitions": 688891385
7
  }
 
2
  "context_size": 4,
3
  "variant": "subword",
4
  "language": "arz",
5
+ "unique_contexts": 1249901,
6
+ "total_transitions": 690359053
7
  }
models/subword_ngram/arz_2gram_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:54b1e94823bf6eda78e610a31f50839e3eb6945296345628a51c2775bbc535b5
3
- size 225336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5800d1d849b8e18cc45db646a0757e75ac10bd88fb659b78004d6cebf8166c07
3
+ size 226684
models/subword_ngram/arz_2gram_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "n": 2,
3
  "variant": "subword",
4
  "language": "arz",
5
- "unique_ngrams": 15451,
6
- "total_ngrams": 693777470
7
  }
 
2
  "n": 2,
3
  "variant": "subword",
4
  "language": "arz",
5
+ "unique_ngrams": 15559,
6
+ "total_ngrams": 695246839
7
  }
models/subword_ngram/arz_3gram_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6241036ff50c386080b4d36854fdc224c1c5099af471b3862e1464cd644b63ae
3
- size 1697106
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad77c5c7d64974ac45c5a464440d8672c08a593d927e741aa02bc7aa9c8b3068
3
+ size 1710417
models/subword_ngram/arz_3gram_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "n": 3,
3
  "variant": "subword",
4
  "language": "arz",
5
- "unique_ngrams": 129923,
6
- "total_ngrams": 692148775
7
  }
 
2
  "n": 3,
3
  "variant": "subword",
4
  "language": "arz",
5
+ "unique_ngrams": 130688,
6
+ "total_ngrams": 693617577
7
  }
models/subword_ngram/arz_4gram_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c000b6a963ccc11a1bb0feebe6d49196497cbb6667e247ab3df4764a4e91003c
3
- size 10220298
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:785394e1c4ea1d480da60e9f5a6a5f1c79f89c12ac018998bb84854d099919ff
3
+ size 10290736
models/subword_ngram/arz_4gram_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "n": 4,
3
  "variant": "subword",
4
  "language": "arz",
5
- "unique_ngrams": 788718,
6
- "total_ngrams": 690520080
7
  }
 
2
  "n": 4,
3
  "variant": "subword",
4
  "language": "arz",
5
+ "unique_ngrams": 793433,
6
+ "total_ngrams": 691988315
7
  }
models/subword_ngram/arz_5gram_subword.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:027d1bef375f12bc153fff490b146974f8a468599aea359db02d4447fd9dedbb
3
+ size 39354928
models/subword_ngram/arz_5gram_subword_metadata.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "n": 5,
3
+ "variant": "subword",
4
+ "language": "arz",
5
+ "unique_ngrams": 2865423,
6
+ "total_ngrams": 690359053
7
+ }
models/tokenizer/arz_tokenizer_16k.model CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8182c7627a2b642bc8213e1f478fb8483ae43e1decdfb9d36a8b81c0b1e5db70
3
- size 553522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c7fe49582ea5c0bd25a74c4bd56aa7935cb61efc4d2207d8478b9f55c1deb88e
3
+ size 553566
models/tokenizer/arz_tokenizer_16k.vocab CHANGED
The diff for this file is too large to render. See raw diff
 
models/tokenizer/arz_tokenizer_32k.model CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3f58411587d93e3605bc0bfb493f2adb152e457978754608d4bfa1098bbe3484
3
- size 874271
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:65590445b44ef400b2d4f5156f78a4370c48fef1256a27ea8cd6dc9331f5494f
3
+ size 874340
models/tokenizer/arz_tokenizer_32k.vocab CHANGED
The diff for this file is too large to render. See raw diff
 
models/tokenizer/arz_tokenizer_64k.model CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b7caa9ae0cc952f588395f385251d423792d50e832f759c19f62d2debfa53c97
3
- size 1535709
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:624e67c5ca7f66135be05eb14503d5c8a3bba01e8de29e986a29eb35a8738253
3
+ size 1535714
models/tokenizer/arz_tokenizer_64k.vocab CHANGED
The diff for this file is too large to render. See raw diff
 
models/tokenizer/arz_tokenizer_8k.model CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:75cf05a2c2e3d6160ef1a57d3e6c1105fcab185e64a3ffd3fdab63dacc685a1f
3
- size 396360
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61a540a4e4713c4e1bef88d1433bc6ccd1d63553a9ef85c3114448773ca9f029
3
+ size 396191
models/tokenizer/arz_tokenizer_8k.vocab CHANGED
The diff for this file is too large to render. See raw diff
 
models/vocabulary/arz_vocabulary.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f49f18512e3b41813735d519aef4205264437e74599a3808b6c8fbea97f3bb17
3
- size 12321602
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1aa9c6e17d25fc4c2264c9a06b8ad84b9a303e652725edfac492f8aeacab09e3
3
+ size 12376425
models/vocabulary/arz_vocabulary_metadata.json CHANGED
@@ -1,17 +1,17 @@
1
  {
2
  "language": "arz",
3
- "vocabulary_size": 856070,
4
  "variant": "full",
5
  "statistics": {
6
- "type_token_ratio": 0.011546453807845636,
7
  "coverage": {
8
- "top_100": 0.4579160902506231,
9
- "top_1000": 0.7632735433913325,
10
- "top_5000": 0.8553371329341142,
11
- "top_10000": 0.885855315521865
12
  },
13
- "hapax_count": 497272,
14
- "hapax_ratio": 0.36744001146790684,
15
- "total_documents": 1628695
16
  }
17
  }
 
1
  {
2
  "language": "arz",
3
+ "vocabulary_size": 859607,
4
  "variant": "full",
5
  "statistics": {
6
+ "type_token_ratio": 0.011594418548720495,
7
  "coverage": {
8
+ "top_100": 0.45771756046088624,
9
+ "top_1000": 0.762176545686491,
10
+ "top_5000": 0.8545014147912448,
11
+ "top_10000": 0.8851748938277777
12
  },
13
+ "hapax_count": 502594,
14
+ "hapax_ratio": 0.3689572977849818,
15
+ "total_documents": 1629262
16
  }
17
  }
models/word_markov/arz_markov_ctx1_word.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:27fac99b38072bd41d19f9620ff6d00511cdf0cb2c23718cc24825c3425b0c81
3
- size 125535197
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ccbdd7f412f35796c8d761dd8df63b88ffbaae4d67baaced253ef9f0a25fa0d
3
+ size 126951261
models/word_markov/arz_markov_ctx1_word_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "context_size": 1,
3
  "variant": "word",
4
  "language": "arz",
5
- "unique_contexts": 1353062,
6
- "total_transitions": 115579759
7
  }
 
2
  "context_size": 1,
3
  "variant": "word",
4
  "language": "arz",
5
+ "unique_contexts": 1361925,
6
+ "total_transitions": 115858389
7
  }
models/word_markov/arz_markov_ctx2_word.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9256f52202a6653f8df7a39b233dce04b0731a6961f54294e451f0ff631a642f
3
- size 413546149
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:205fa0c25e52a899e1ca07f401999e2e9d1bfb2c14765ab1e5a8ef141f5c442c
3
+ size 418545596
models/word_markov/arz_markov_ctx2_word_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "context_size": 2,
3
  "variant": "word",
4
  "language": "arz",
5
- "unique_contexts": 12336484,
6
- "total_transitions": 113951064
7
  }
 
2
  "context_size": 2,
3
  "variant": "word",
4
  "language": "arz",
5
+ "unique_contexts": 12454727,
6
+ "total_transitions": 114229127
7
  }