omarkamali commited on
Commit
b5a7140
·
verified ·
1 Parent(s): e015fc1

Upload all models and assets for br (latest)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. README.md +209 -175
  3. models/embeddings/aligned/br_128d.bin +3 -0
  4. models/embeddings/aligned/br_128d.meta.json +1 -0
  5. models/embeddings/aligned/br_128d.projection.npy +3 -0
  6. models/embeddings/aligned/br_128d_metadata.json +8 -0
  7. models/embeddings/aligned/br_32d.bin +3 -0
  8. models/embeddings/aligned/br_32d.meta.json +1 -0
  9. models/embeddings/aligned/br_32d.projection.npy +3 -0
  10. models/embeddings/aligned/br_32d_metadata.json +8 -0
  11. models/embeddings/aligned/br_64d.bin +3 -0
  12. models/embeddings/aligned/br_64d.meta.json +1 -0
  13. models/embeddings/aligned/br_64d.projection.npy +3 -0
  14. models/embeddings/aligned/br_64d_metadata.json +8 -0
  15. models/embeddings/monolingual/br_128d.bin +2 -2
  16. models/embeddings/monolingual/br_128d_metadata.json +1 -1
  17. models/embeddings/monolingual/br_32d.bin +2 -2
  18. models/embeddings/monolingual/br_32d_metadata.json +1 -1
  19. models/embeddings/monolingual/br_64d.bin +2 -2
  20. models/embeddings/monolingual/br_64d_metadata.json +1 -1
  21. models/subword_markov/br_markov_ctx1_subword.parquet +2 -2
  22. models/subword_markov/br_markov_ctx1_subword_metadata.json +2 -2
  23. models/subword_markov/br_markov_ctx2_subword.parquet +2 -2
  24. models/subword_markov/br_markov_ctx2_subword_metadata.json +2 -2
  25. models/subword_markov/br_markov_ctx3_subword.parquet +2 -2
  26. models/subword_markov/br_markov_ctx3_subword_metadata.json +2 -2
  27. models/subword_markov/br_markov_ctx4_subword.parquet +2 -2
  28. models/subword_markov/br_markov_ctx4_subword_metadata.json +2 -2
  29. models/subword_ngram/br_2gram_subword.parquet +2 -2
  30. models/subword_ngram/br_2gram_subword_metadata.json +2 -2
  31. models/subword_ngram/br_3gram_subword.parquet +2 -2
  32. models/subword_ngram/br_3gram_subword_metadata.json +2 -2
  33. models/subword_ngram/br_4gram_subword.parquet +2 -2
  34. models/subword_ngram/br_4gram_subword_metadata.json +2 -2
  35. models/subword_ngram/br_5gram_subword.parquet +3 -0
  36. models/subword_ngram/br_5gram_subword_metadata.json +7 -0
  37. models/tokenizer/br_tokenizer_16k.model +2 -2
  38. models/tokenizer/br_tokenizer_16k.vocab +0 -0
  39. models/tokenizer/br_tokenizer_32k.model +2 -2
  40. models/tokenizer/br_tokenizer_32k.vocab +0 -0
  41. models/tokenizer/br_tokenizer_64k.model +2 -2
  42. models/tokenizer/br_tokenizer_64k.vocab +0 -0
  43. models/tokenizer/br_tokenizer_8k.model +2 -2
  44. models/tokenizer/br_tokenizer_8k.vocab +0 -0
  45. models/vocabulary/br_vocabulary.parquet +2 -2
  46. models/vocabulary/br_vocabulary_metadata.json +9 -9
  47. models/word_markov/br_markov_ctx1_word.parquet +2 -2
  48. models/word_markov/br_markov_ctx1_word_metadata.json +2 -2
  49. models/word_markov/br_markov_ctx2_word.parquet +2 -2
  50. models/word_markov/br_markov_ctx2_word_metadata.json +2 -2
.gitattributes CHANGED
@@ -39,3 +39,4 @@ visualizations/position_encoding_comparison.png filter=lfs diff=lfs merge=lfs -t
39
  visualizations/tsne_sentences.png filter=lfs diff=lfs merge=lfs -text
40
  visualizations/tsne_words.png filter=lfs diff=lfs merge=lfs -text
41
  visualizations/zipf_law.png filter=lfs diff=lfs merge=lfs -text
 
 
39
  visualizations/tsne_sentences.png filter=lfs diff=lfs merge=lfs -text
40
  visualizations/tsne_words.png filter=lfs diff=lfs merge=lfs -text
41
  visualizations/zipf_law.png filter=lfs diff=lfs merge=lfs -text
42
+ visualizations/embedding_tsne_multilingual.png filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,6 +1,6 @@
1
  ---
2
  language: br
3
- language_name: BR
4
  language_family: celtic_brythonic
5
  tags:
6
  - wikilangs
@@ -10,11 +10,21 @@ tags:
10
  - n-gram
11
  - markov
12
  - wikipedia
 
 
 
 
 
 
 
 
 
 
13
  - monolingual
14
  - family-celtic_brythonic
15
  license: mit
16
  library_name: wikilangs
17
- pipeline_tag: feature-extraction
18
  datasets:
19
  - omarkamali/wikipedia-monthly
20
  dataset_info:
@@ -23,20 +33,20 @@ dataset_info:
23
  metrics:
24
  - name: best_compression_ratio
25
  type: compression
26
- value: 3.786
27
  - name: best_isotropy
28
  type: isotropy
29
- value: 0.8171
30
  - name: vocabulary_size
31
  type: vocab
32
  value: 0
33
  generated: 2026-01-03
34
  ---
35
 
36
- # BR - Wikilangs Models
37
  ## Comprehensive Research Report & Full Ablation Study
38
 
39
- This repository contains NLP models trained and evaluated by Wikilangs, specifically on **BR** Wikipedia data.
40
  We analyze tokenizers, n-gram models, Markov chains, vocabulary statistics, and word embeddings.
41
 
42
  ## 📋 Repository Contents
@@ -60,7 +70,7 @@ We analyze tokenizers, n-gram models, Markov chains, vocabulary statistics, and
60
  - [3. Markov Chain Evaluation](#3-markov-chain-evaluation)
61
  - [4. Vocabulary Analysis](#4-vocabulary-analysis)
62
  - [5. Word Embeddings Evaluation](#5-word-embeddings-evaluation)
63
- - [6. Morphological Analysis (Experimental)](#6-morphological-analysis)
64
  - [7. Summary & Recommendations](#7-summary--recommendations)
65
  - [Metrics Glossary](#appendix-metrics-glossary--interpretation-guide)
66
  - [Visualizations Index](#visualizations-index)
@@ -80,47 +90,47 @@ We analyze tokenizers, n-gram models, Markov chains, vocabulary statistics, and
80
 
81
  | Vocab Size | Compression | Avg Token Len | UNK Rate | Total Tokens |
82
  |------------|-------------|---------------|----------|--------------|
83
- | **8k** | 3.235x | 3.24 | 0.4490% | 793,680 |
84
- | **16k** | 3.460x | 3.46 | 0.4803% | 742,059 |
85
- | **32k** | 3.645x | 3.65 | 0.5060% | 704,331 |
86
- | **64k** | 3.786x 🏆 | 3.79 | 0.5255% | 678,179 |
87
 
88
  ### Tokenization Examples
89
 
90
  Below are sample sentences tokenized with each vocabulary size:
91
 
92
- **Sample 1:** `Monsano zo ur gumun italian e proviñs Ancona, er Marche. Marche Proviñs Ancona`
93
 
94
  | Vocab | Tokens | Count |
95
  |-------|--------|-------|
96
- | 8k | `▁mon s ano zourgumunitalianeproviñsanc ... (+9 more)` | 19 |
97
- | 16k | `▁mons anozourgumunitalianeproviñsancona , ... (+6 more)` | 16 |
98
- | 32k | `▁mons anozourgumunitalianeproviñsancona , ... (+6 more)` | 16 |
99
- | 64k | `▁mons anozourgumunitalianeproviñsancona , ... (+6 more)` | 16 |
100
 
101
- **Sample 2:** `San Asensio zo ur gumun e proviñs La Rioja en Spagn. Rioja`
102
 
103
  | Vocab | Tokens | Count |
104
  |-------|--------|-------|
105
- | 8k | `▁san ▁as ens io ▁zo ▁ur ▁gumun ▁eproviñsla ... (+5 more)` | 15 |
106
- | 16k | `▁san ▁as ens io ▁zo ▁ur ▁gumun ▁eproviñsla ... (+5 more)` | 15 |
107
- | 32k | `▁san ▁as ens io ▁zo ▁ur ▁gumun ▁eproviñsla ... (+5 more)` | 15 |
108
- | 64k | `▁san ▁as ens io ▁zo ▁ur ▁gumun ▁e ▁proviñsla ... (+5 more)` | 15 |
109
 
110
- **Sample 3:** `Segusino zo ur gumun e proviñs Treviso e Veneto, en Italia.`
111
 
112
  | Vocab | Tokens | Count |
113
  |-------|--------|-------|
114
- | 8k | `▁seg us inozourgumun ▁eproviñstrev iso ... (+6 more)` | 16 |
115
- | 16k | `▁seg us ino zourgumun ▁eproviñstrevisoe ... (+5 more)` | 15 |
116
- | 32k | `▁seg us ino zourgumun ▁eproviñstrevisoe ... (+5 more)` | 15 |
117
- | 64k | `▁seg us ino ▁zo ▁ur ▁gumun ▁e ▁proviñstreviso ▁e ... (+5 more)` | 15 |
118
 
119
 
120
  ### Key Findings
121
 
122
- - **Best Compression:** 64k achieves 3.786x compression
123
- - **Lowest UNK Rate:** 8k with 0.4490% unknown tokens
124
  - **Trade-off:** Larger vocabularies improve compression but increase model size
125
  - **Recommendation:** 32k vocabulary provides optimal balance for production use
126
 
@@ -137,12 +147,14 @@ Below are sample sentences tokenized with each vocabulary size:
137
 
138
  | N-gram | Variant | Perplexity | Entropy | Unique N-grams | Top-100 Coverage | Top-1000 Coverage |
139
  |--------|---------|------------|---------|----------------|------------------|-------------------|
140
- | **2-gram** | Word | 37,349 | 15.19 | 296,192 | 13.7% | 32.0% |
141
- | **2-gram** | Subword | 294 🏆 | 8.20 | 11,776 | 65.3% | 98.9% |
142
- | **3-gram** | Word | 128,487 | 16.97 | 570,380 | 5.9% | 19.5% |
143
- | **3-gram** | Subword | 2,726 | 11.41 | 81,142 | 23.8% | 68.1% |
144
- | **4-gram** | Word | 279,047 | 18.09 | 973,376 | 4.1% | 14.8% |
145
- | **4-gram** | Subword | 17,313 | 14.08 | 421,873 | 10.8% | 35.5% |
 
 
146
 
147
  ### Top 5 N-grams by Size
148
 
@@ -150,68 +162,88 @@ Below are sample sentences tokenized with each vocabulary size:
150
 
151
  | Rank | N-gram | Count |
152
  |------|--------|-------|
153
- | 1 | `e voe` | 59,782 |
154
- | 2 | `ar c` | 55,139 |
155
- | 3 | `a viz` | 53,711 |
156
- | 4 | `e oa` | 52,022 |
157
- | 5 | `d ar` | 47,935 |
158
 
159
  **3-grams (Word):**
160
 
161
  | Rank | N-gram | Count |
162
  |------|--------|-------|
163
- | 1 | `zo ur gumun` | 17,678 |
164
- | 2 | `bro c hall` | 15,638 |
165
- | 3 | `a zo ur` | 15,315 |
166
- | 4 | `e oa bet` | 12,845 |
167
- | 5 | `ur gumun eus` | 8,897 |
168
 
169
  **4-grams (Word):**
170
 
171
  | Rank | N-gram | Count |
172
  |------|--------|-------|
173
- | 1 | `zo ur gumun eus` | 8,261 |
174
- | 2 | `monumantoù ha traoù heverk` | 5,435 |
175
  | 3 | `a zo ur gumun` | 5,065 |
176
- | 4 | `zo ur gumun e` | 4,314 |
177
- | 5 | `monumant ar re varv` | 3,991 |
 
 
 
 
 
 
 
 
 
 
178
 
179
  **2-grams (Subword):**
180
 
181
  | Rank | N-gram | Count |
182
  |------|--------|-------|
183
- | 1 | `_ a` | 1,901,092 |
184
- | 2 | `_ e` | 1,675,345 |
185
- | 3 | `a n` | 1,608,231 |
186
- | 4 | `e _` | 1,592,896 |
187
- | 5 | `r _` | 1,428,493 |
188
 
189
  **3-grams (Subword):**
190
 
191
  | Rank | N-gram | Count |
192
  |------|--------|-------|
193
- | 1 | `a r _` | 640,562 |
194
- | 2 | `_ e _` | 639,818 |
195
- | 3 | `e t _` | 623,800 |
196
- | 4 | `_ a r` | 555,503 |
197
- | 5 | `e n n` | 467,995 |
198
 
199
  **4-grams (Subword):**
200
 
201
  | Rank | N-gram | Count |
202
  |------|--------|-------|
203
- | 1 | `_ a r _` | 456,425 |
204
- | 2 | `_ a n _` | 280,111 |
205
- | 3 | `a n t _` | 269,237 |
206
- | 4 | `_ g a n` | 228,714 |
207
- | 5 | `_ h a _` | 221,977 |
 
 
 
 
 
 
 
 
 
 
208
 
209
 
210
  ### Key Findings
211
 
212
- - **Best Perplexity:** 2-gram (subword) with 294
213
  - **Entropy Trend:** Decreases with larger n-grams (more predictable)
214
- - **Coverage:** Top-1000 patterns cover ~35% of corpus
215
  - **Recommendation:** 4-gram or 5-gram for best predictive performance
216
 
217
  ---
@@ -227,14 +259,14 @@ Below are sample sentences tokenized with each vocabulary size:
227
 
228
  | Context | Variant | Avg Entropy | Perplexity | Branching Factor | Unique Contexts | Predictability |
229
  |---------|---------|-------------|------------|------------------|-----------------|----------------|
230
- | **1** | Word | 0.8900 | 1.853 | 7.60 | 546,189 | 11.0% |
231
- | **1** | Subword | 0.8953 | 1.860 | 5.85 | 8,402 | 10.5% |
232
- | **2** | Word | 0.3300 | 1.257 | 2.04 | 4,129,549 | 67.0% |
233
- | **2** | Subword | 0.6669 | 1.588 | 4.21 | 49,100 | 33.3% |
234
- | **3** | Word | 0.1561 | 1.114 | 1.34 | 8,377,190 | 84.4% |
235
- | **3** | Subword | 0.6656 | 1.586 | 3.74 | 206,555 | 33.4% |
236
- | **4** | Word | 0.0728 🏆 | 1.052 | 1.13 | 11,216,136 | 92.7% |
237
- | **4** | Subword | 0.6497 | 1.569 | 3.22 | 772,246 | 35.0% |
238
 
239
  ### Generated Text Samples (Word-based)
240
 
@@ -242,27 +274,27 @@ Below are text samples generated from each word-based Markov chain model:
242
 
243
  **Context Size 1:**
244
 
245
- 1. `e breizh e vennozhioù diskouez an 18 muhelder bihanañ 34 evel caravan palace da vezañ ivez`
246
- 2. `ar zastava e amzer tredeoged betek da vare a eskemm ha pterosaurus petra a ra an`
247
- 3. `a oa un heñvelster gant dean bounce prison gang crime deep grand prix de france bleu`
248
 
249
  **Context Size 2:**
250
 
251
- 1. `e voe embannet an testennoù klasel e vez da 800 000 den pe gant kraf ar revelezh`
252
- 2. `ar c hembraeg mawr bras tolkien avat en doa bet ur gwadliñvadur e barzh ar c haramel`
253
- 3. `a viz eost a oa ul livour hag un impalaer e voe kumun kernitron al lann e`
254
 
255
  **Context Size 3:**
256
 
257
- 1. `zo ur gumun en italia e proviñs cuneo etre kumunioù entracque ha valdieri anezhañ unan eus ugent lev...`
258
- 2. `a zo ur bronneg geotdebrer a vev er meurvor atlantel belle isle distaget bɛl ˈaɪl e saozneg zo`
259
- 3. `bro c hall zo un tiern ag ar morioù a zo e kenver ar sonerezh met evel ul`
260
 
261
  **Context Size 4:**
262
 
263
- 1. `zo ur gumun eus meurgêr palermo e sikilia un 3 400 a dud zo enni o chom anezhi an`
264
- 2. `monumantoù ha traoù heverk iliz katolik saint eustacheclochers de france douaroniezh emdroadur ar bo...`
265
- 3. `a zo ur gumun eus italia e proviñs piacenza e rannvro emilia romagna ha 525 940 a dud o`
266
 
267
 
268
  ### Generated Text Samples (Subword-based)
@@ -271,34 +303,34 @@ Below are text samples generated from each subword-based Markov chain model:
271
 
272
  **Context Size 1:**
273
 
274
- 1. `_t_onur_b_el_gan`
275
- 2. `er_pakoumiat_g_v`
276
- 3. `adoù-stiz_aleuri`
277
 
278
  **Context Size 2:**
279
 
280
- 1. `_a_un_erl_ezenner`
281
- 2. `_e_he_c'hhn_/_niz`
282
- 3. `an_ero_liged_;_ev`
283
 
284
  **Context Size 3:**
285
 
286
- 1. `ar_bretek_ilioù,_o`
287
- 2. `_e_pyrrarkva,_leon`
288
- 3. `et_gant_franne,_ga`
289
 
290
  **Context Size 4:**
291
 
292
- 1. `_ar_spesadoù_war_ar`
293
- 2. `_an_emsavid_fy_nhad`
294
- 3. `ant_an_alamanentiad`
295
 
296
 
297
  ### Key Findings
298
 
299
  - **Best Predictability:** Context-4 (word) with 92.7% predictability
300
  - **Branching Factor:** Decreases with context size (more deterministic)
301
- - **Memory Trade-off:** Larger contexts require more storage (772,246 contexts)
302
  - **Recommendation:** Context-3 or Context-4 for text generation
303
 
304
  ---
@@ -314,64 +346,64 @@ Below are text samples generated from each subword-based Markov chain model:
314
 
315
  | Metric | Value |
316
  |--------|-------|
317
- | Vocabulary Size | 242,115 |
318
- | Total Tokens | 15,327,088 |
319
- | Mean Frequency | 63.30 |
320
  | Median Frequency | 4 |
321
- | Frequency Std Dev | 2500.68 |
322
 
323
  ### Most Common Words
324
 
325
  | Rank | Word | Frequency |
326
  |------|------|-----------|
327
- | 1 | e | 701,948 |
328
- | 2 | ar | 517,584 |
329
- | 3 | a | 464,667 |
330
- | 4 | an | 326,300 |
331
- | 5 | ha | 228,454 |
332
- | 6 | gant | 189,759 |
333
- | 7 | c | 186,830 |
334
- | 8 | en | 181,309 |
335
- | 9 | da | 170,732 |
336
- | 10 | ur | 158,708 |
337
 
338
  ### Least Common Words (from vocabulary)
339
 
340
  | Rank | Word | Frequency |
341
  |------|------|-----------|
342
- | 1 | nfpb | 2 |
343
- | 2 | konjic | 2 |
344
- | 3 | formoraich | 2 |
345
- | 4 | vsn | 2 |
346
- | 5 | moldavie | 2 |
347
- | 6 | yankovich | 2 |
348
- | 7 | gueydon | 2 |
349
- | 8 | tréhouart | 2 |
350
- | 9 | bouguen | 2 |
351
- | 10 | shimosa | 2 |
352
 
353
  ### Zipf's Law Analysis
354
 
355
  | Metric | Value |
356
  |--------|-------|
357
- | Zipf Coefficient | 1.1106 |
358
- | R² (Goodness of Fit) | 0.996763 |
359
  | Adherence Quality | **excellent** |
360
 
361
  ### Coverage Analysis
362
 
363
  | Top N Words | Coverage |
364
  |-------------|----------|
365
- | Top 100 | 41.7% |
366
  | Top 1,000 | 65.8% |
367
- | Top 5,000 | 80.4% |
368
- | Top 10,000 | 85.6% |
369
 
370
  ### Key Findings
371
 
372
  - **Zipf Compliance:** R²=0.9968 indicates excellent adherence to Zipf's law
373
- - **High Frequency Dominance:** Top 100 words cover 41.7% of corpus
374
- - **Long Tail:** 232,115 words needed for remaining 14.4% coverage
375
 
376
  ---
377
  ## 5. Word Embeddings Evaluation
@@ -387,37 +419,40 @@ Below are text samples generated from each subword-based Markov chain model:
387
 
388
  ### 5.1 Cross-Lingual Alignment
389
 
390
- > *Note: Multilingual alignment visualization not available for this language.*
 
 
391
 
392
 
393
  ### 5.2 Model Comparison
394
 
395
  | Model | Dimension | Isotropy | Semantic Density | Alignment R@1 | Alignment R@10 |
396
  |-------|-----------|----------|------------------|---------------|----------------|
397
- | **mono_32d** | 32 | 0.8120 | 0.3605 | N/A | N/A |
398
- | **mono_64d** | 64 | 0.8171 🏆 | 0.2761 | N/A | N/A |
399
- | **mono_128d** | 128 | 0.7922 | 0.2119 | N/A | N/A |
 
 
 
400
 
401
  ### Key Findings
402
 
403
- - **Best Isotropy:** mono_64d with 0.8171 (more uniform distribution)
404
- - **Semantic Density:** Average pairwise similarity of 0.2828. Lower values indicate better semantic separation.
405
- - **Alignment Quality:** No aligned models evaluated in this run.
406
  - **Recommendation:** 128d aligned for best cross-lingual performance
407
 
408
  ---
409
  ## 6. Morphological Analysis (Experimental)
410
 
411
- > ⚠️ **Warning:** This language shows low morphological productivity. The statistical signals used for this analysis may be noisy or less reliable than for morphologically rich languages.
412
-
413
  This section presents an automated morphological analysis derived from the statistical divergence between word-level and subword-level models. By analyzing where subword predictability spikes and where word-level coverage fails, we can infer linguistic structures without supervised data.
414
 
415
  ### 6.1 Productivity & Complexity
416
 
417
  | Metric | Value | Interpretation | Recommendation |
418
  |--------|-------|----------------|----------------|
419
- | Productivity Index | **0.000** | Low morphological productivity | ⚠️ Likely unreliable |
420
- | Idiomaticity Gap | **-1.000** | Low formulaic content | - |
421
 
422
  ### 6.2 Affix Inventory (Productive Units)
423
 
@@ -430,12 +465,11 @@ These are the most productive prefixes and suffixes identified by sampling the v
430
  #### Productive Suffixes
431
  | Suffix | Examples |
432
  |--------|----------|
433
- | `-s` | mariånas, gilgamès, battalions |
434
- | `-er` | tufer, hutier, beaver |
435
- | `-où` | damkanadoù, barrennoù, heitioù |
436
- | `-es` | tarbes, marcondes, cordes |
437
- | `-us` | luchinus, menenius, fulmarus |
438
- | `-en` | weyden, minchen, wageningen |
439
 
440
  ### 6.3 Bound Stems (Lexical Roots)
441
 
@@ -443,18 +477,18 @@ Bound stems are high-frequency subword units that are semantically cohesive but
443
 
444
  | Stem | Cohesion | Substitutability | Examples |
445
  |------|----------|------------------|----------|
446
- | `tion` | 2.51x | 77 contexts | tione, metion, aktion |
447
- | `emba` | 2.14x | 41 contexts | pemba, emban, bemba |
448
- | `adoù` | 1.80x | 74 contexts | dadoù, kadoù, zadoù |
449
- | `nnet` | 1.78x | 70 contexts | annet, bonnet, linnet |
450
  | `iamm` | 2.35x | 24 contexts | liamm, fiamma, fiamme |
451
- | `ouar` | 1.48x | 126 contexts | douar, zouar, mouar |
452
- | `nnad` | 1.52x | 97 contexts | bennad, rannad, hannad |
453
- | `zhio` | 1.87x | 40 contexts | uzhioù, lezhioù, bezhioù |
454
- | `zhañ` | 1.91x | 35 contexts | ezhañ, kozhañ, dizhañ |
455
- | `nnoù` | 1.84x | 39 contexts | tennoù, vannoù, bennoù |
456
- | `hone` | 1.81x | 40 contexts | honeg, khone, dhone |
457
- | `reze` | 1.46x | 94 contexts | breze, dreze, rezet |
 
458
 
459
  ### 6.4 Affix Compatibility (Co-occurrence)
460
 
@@ -469,26 +503,26 @@ Using **Recursive Hierarchical Substitutability**, we decompose complex words in
469
 
470
  | Word | Suggested Split | Confidence | Stem |
471
  |------|-----------------|------------|------|
472
- | eildelwennoù | **`eildelwenn-où`** | 4.5 | `eildelwenn` |
473
- | hejadennoù | **`hejadenn-où`** | 4.5 | `hejadenn` |
474
- | wissenschaften | **`wissenschaft-en`** | 4.5 | `wissenschaft` |
475
- | beauvaisen | **`beauvais-en`** | 4.5 | `beauvais` |
476
- | wellaennoù | **`wellaenn-où`** | 4.5 | `wellaenn` |
477
- | antoninus | **`antonin-us`** | 4.5 | `antonin` |
478
- | pluñvennoù | **`pluñvenn-où`** | 4.5 | `pluñvenn` |
479
- | kementadoù | **`kementad-où`** | 4.5 | `kementad` |
480
- | compositores | **`compositor-es`** | 4.5 | `compositor` |
481
- | garidelloù | **`garidell-où`** | 4.5 | `garidell` |
482
- | hromozomoù | **`hromozom-où`** | 4.5 | `hromozom` |
483
- | reolennoù | **`reolenn-où`** | 4.5 | `reolenn` |
484
- | barringer | **`barring-er`** | 4.5 | `barring` |
485
- | diamantes | **`diamant-es`** | 4.5 | `diamant` |
486
- | stradivarius | **`stradivari-us`** | 4.5 | `stradivari` |
487
 
488
  ### 6.6 Linguistic Interpretation
489
 
490
  > **Automated Insight:**
491
- The language BR appears to be more isolating or has a highly fixed vocabulary. Word-level models perform nearly as well as subword models, indicating fewer productive morphological processes.
492
 
493
  ---
494
  ## 7. Summary & Recommendations
@@ -500,7 +534,7 @@ The language BR appears to be more isolating or has a highly fixed vocabulary. W
500
  | Component | Recommended | Rationale |
501
  |-----------|-------------|-----------|
502
  | Tokenizer | **64k BPE** | Best compression (3.79x) |
503
- | N-gram | **2-gram** | Lowest perplexity (294) |
504
  | Markov | **Context-4** | Highest predictability (92.7%) |
505
  | Embeddings | **100d** | Balanced semantic capture and isotropy |
506
 
@@ -715,4 +749,4 @@ MIT License - Free for academic and commercial use.
715
  ---
716
  *Generated by Wikilangs Models Pipeline*
717
 
718
- *Report Date: 2026-01-03 08:48:16*
 
1
  ---
2
  language: br
3
+ language_name: Breton
4
  language_family: celtic_brythonic
5
  tags:
6
  - wikilangs
 
10
  - n-gram
11
  - markov
12
  - wikipedia
13
+ - feature-extraction
14
+ - sentence-similarity
15
+ - tokenization
16
+ - n-grams
17
+ - markov-chain
18
+ - text-mining
19
+ - fasttext
20
+ - babelvec
21
+ - vocabulous
22
+ - vocabulary
23
  - monolingual
24
  - family-celtic_brythonic
25
  license: mit
26
  library_name: wikilangs
27
+ pipeline_tag: text-generation
28
  datasets:
29
  - omarkamali/wikipedia-monthly
30
  dataset_info:
 
33
  metrics:
34
  - name: best_compression_ratio
35
  type: compression
36
+ value: 3.787
37
  - name: best_isotropy
38
  type: isotropy
39
+ value: 0.8154
40
  - name: vocabulary_size
41
  type: vocab
42
  value: 0
43
  generated: 2026-01-03
44
  ---
45
 
46
+ # Breton - Wikilangs Models
47
  ## Comprehensive Research Report & Full Ablation Study
48
 
49
+ This repository contains NLP models trained and evaluated by Wikilangs, specifically on **Breton** Wikipedia data.
50
  We analyze tokenizers, n-gram models, Markov chains, vocabulary statistics, and word embeddings.
51
 
52
  ## 📋 Repository Contents
 
70
  - [3. Markov Chain Evaluation](#3-markov-chain-evaluation)
71
  - [4. Vocabulary Analysis](#4-vocabulary-analysis)
72
  - [5. Word Embeddings Evaluation](#5-word-embeddings-evaluation)
73
+ - [6. Morphological Analysis (Experimental)](#6--morphological-analysis-experimental)
74
  - [7. Summary & Recommendations](#7-summary--recommendations)
75
  - [Metrics Glossary](#appendix-metrics-glossary--interpretation-guide)
76
  - [Visualizations Index](#visualizations-index)
 
90
 
91
  | Vocab Size | Compression | Avg Token Len | UNK Rate | Total Tokens |
92
  |------------|-------------|---------------|----------|--------------|
93
+ | **8k** | 3.238x | 3.24 | 0.4518% | 788,643 |
94
+ | **16k** | 3.463x | 3.46 | 0.4832% | 737,391 |
95
+ | **32k** | 3.647x | 3.65 | 0.5089% | 700,148 |
96
+ | **64k** | 3.787x 🏆 | 3.79 | 0.5284% | 674,255 |
97
 
98
  ### Tokenization Examples
99
 
100
  Below are sample sentences tokenized with each vocabulary size:
101
 
102
+ **Sample 1:** `Concetta Barra a oa ur ganerez hag un aktourez italian ha dreist-holl napolitane...`
103
 
104
  | Vocab | Tokens | Count |
105
  |-------|--------|-------|
106
+ | 8k | `▁conc ettabar ra aoaurganerezhagun ... (+30 more)` | 40 |
107
+ | 16k | `▁conc ettabarraaoaurganerezhagun ▁aktourez ... (+26 more)` | 36 |
108
+ | 32k | `▁conc ettabarraaoaurganerezhagun ▁aktourez ... (+26 more)` | 36 |
109
+ | 64k | `▁conc ettabarraaoaurganerezhagun ▁aktourez ... (+22 more)` | 32 |
110
 
111
+ **Sample 2:** `Fénis zo ur gumun italian, e rannvro emren Traoñienn Aosta. Notennoù`
112
 
113
  | Vocab | Tokens | Count |
114
  |-------|--------|-------|
115
+ | 8k | `▁f én is ▁zo ▁ur ▁gumun ▁italian , erannvro ... (+6 more)` | 16 |
116
+ | 16k | `▁f én is ▁zo ▁ur ▁gumun ▁italian , erannvro ... (+5 more)` | 15 |
117
+ | 32k | `▁f én is ▁zo ▁ur ▁gumun ▁italian , erannvro ... (+5 more)` | 15 |
118
+ | 64k | `▁fén is ▁zo ▁ur ▁gumun ▁italian , ▁e ▁rannvroemren ... (+4 more)` | 14 |
119
 
120
+ **Sample 3:** `Cervera del Río Alhama zo ur gumun e kumuniezh emren La Rioja e Spagn.`
121
 
122
  | Vocab | Tokens | Count |
123
  |-------|--------|-------|
124
+ | 8k | `▁c erv eradelríoal h ama zour ... (+9 more)` | 19 |
125
+ | 16k | `▁cerv eradelríoal h ama zourgumun ... (+8 more)` | 18 |
126
+ | 32k | `▁cerv eradelríoal h ama zourgumun ... (+8 more)` | 18 |
127
+ | 64k | `▁cervera ▁del ▁ríoalhama ▁zo ▁ur ▁gumun ▁e ▁kumuniezhemren ... (+5 more)` | 15 |
128
 
129
 
130
  ### Key Findings
131
 
132
+ - **Best Compression:** 64k achieves 3.787x compression
133
+ - **Lowest UNK Rate:** 8k with 0.4518% unknown tokens
134
  - **Trade-off:** Larger vocabularies improve compression but increase model size
135
  - **Recommendation:** 32k vocabulary provides optimal balance for production use
136
 
 
147
 
148
  | N-gram | Variant | Perplexity | Entropy | Unique N-grams | Top-100 Coverage | Top-1000 Coverage |
149
  |--------|---------|------------|---------|----------------|------------------|-------------------|
150
+ | **2-gram** | Word | 37,064 | 15.18 | 295,690 | 13.7% | 32.1% |
151
+ | **2-gram** | Subword | 293 🏆 | 8.19 | 11,777 | 65.4% | 98.9% |
152
+ | **3-gram** | Word | 127,942 | 16.97 | 571,162 | 5.9% | 19.5% |
153
+ | **3-gram** | Subword | 2,712 | 11.41 | 80,865 | 23.9% | 68.2% |
154
+ | **4-gram** | Word | 277,916 | 18.08 | 975,958 | 4.1% | 14.9% |
155
+ | **4-gram** | Subword | 17,204 | 14.07 | 420,279 | 10.8% | 35.6% |
156
+ | **5-gram** | Word | 202,294 | 17.63 | 684,204 | 4.9% | 16.7% |
157
+ | **5-gram** | Subword | 72,650 | 16.15 | 1,308,264 | 6.0% | 21.7% |
158
 
159
  ### Top 5 N-grams by Size
160
 
 
162
 
163
  | Rank | N-gram | Count |
164
  |------|--------|-------|
165
+ | 1 | `e voe` | 60,584 |
166
+ | 2 | `ar c` | 55,004 |
167
+ | 3 | `a viz` | 53,947 |
168
+ | 4 | `e oa` | 52,533 |
169
+ | 5 | `d ar` | 48,158 |
170
 
171
  **3-grams (Word):**
172
 
173
  | Rank | N-gram | Count |
174
  |------|--------|-------|
175
+ | 1 | `zo ur gumun` | 17,679 |
176
+ | 2 | `bro c hall` | 15,683 |
177
+ | 3 | `a zo ur` | 15,380 |
178
+ | 4 | `e oa bet` | 13,023 |
179
+ | 5 | `ur gumun eus` | 8,893 |
180
 
181
  **4-grams (Word):**
182
 
183
  | Rank | N-gram | Count |
184
  |------|--------|-------|
185
+ | 1 | `zo ur gumun eus` | 8,258 |
186
+ | 2 | `monumantoù ha traoù heverk` | 5,437 |
187
  | 3 | `a zo ur gumun` | 5,065 |
188
+ | 4 | `zo ur gumun e` | 4,316 |
189
+ | 5 | `monumant ar re varv` | 3,982 |
190
+
191
+ **5-grams (Word):**
192
+
193
+ | Rank | N-gram | Count |
194
+ |------|--------|-------|
195
+ | 1 | `a zo ur gumun eus` | 3,616 |
196
+ | 2 | `ioc world bird list diwar` | 2,760 |
197
+ | 3 | `world bird list diwar benn` | 2,760 |
198
+ | 4 | `roadennoù ioc world bird list` | 2,759 |
199
+ | 5 | `zo ur gumun eus italia` | 2,622 |
200
 
201
  **2-grams (Subword):**
202
 
203
  | Rank | N-gram | Count |
204
  |------|--------|-------|
205
+ | 1 | `_ a` | 1,908,238 |
206
+ | 2 | `_ e` | 1,681,083 |
207
+ | 3 | `a n` | 1,609,135 |
208
+ | 4 | `e _` | 1,599,725 |
209
+ | 5 | `r _` | 1,429,762 |
210
 
211
  **3-grams (Subword):**
212
 
213
  | Rank | N-gram | Count |
214
  |------|--------|-------|
215
+ | 1 | `a r _` | 641,927 |
216
+ | 2 | `_ e _` | 641,853 |
217
+ | 3 | `e t _` | 627,577 |
218
+ | 4 | `_ a r` | 556,810 |
219
+ | 5 | `e n n` | 468,710 |
220
 
221
  **4-grams (Subword):**
222
 
223
  | Rank | N-gram | Count |
224
  |------|--------|-------|
225
+ | 1 | `_ a r _` | 457,578 |
226
+ | 2 | `_ a n _` | 280,457 |
227
+ | 3 | `a n t _` | 268,610 |
228
+ | 4 | `_ g a n` | 228,380 |
229
+ | 5 | `_ h a _` | 223,259 |
230
+
231
+ **5-grams (Subword):**
232
+
233
+ | Rank | N-gram | Count |
234
+ |------|--------|-------|
235
+ | 1 | `_ g a n t` | 202,257 |
236
+ | 2 | `g a n t _` | 193,123 |
237
+ | 3 | `_ h a g _` | 134,751 |
238
+ | 4 | `_ e u s _` | 130,235 |
239
+ | 5 | `e t _ e _` | 103,216 |
240
 
241
 
242
  ### Key Findings
243
 
244
+ - **Best Perplexity:** 2-gram (subword) with 293
245
  - **Entropy Trend:** Decreases with larger n-grams (more predictable)
246
+ - **Coverage:** Top-1000 patterns cover ~22% of corpus
247
  - **Recommendation:** 4-gram or 5-gram for best predictive performance
248
 
249
  ---
 
259
 
260
  | Context | Variant | Avg Entropy | Perplexity | Branching Factor | Unique Contexts | Predictability |
261
  |---------|---------|-------------|------------|------------------|-----------------|----------------|
262
+ | **1** | Word | 0.8873 | 1.850 | 7.57 | 546,965 | 11.3% |
263
+ | **1** | Subword | 0.8951 | 1.860 | 5.84 | 8,419 | 10.5% |
264
+ | **2** | Word | 0.3297 | 1.257 | 2.04 | 4,120,028 | 67.0% |
265
+ | **2** | Subword | 0.6667 | 1.587 | 4.20 | 49,174 | 33.3% |
266
+ | **3** | Word | 0.1564 | 1.115 | 1.35 | 8,357,037 | 84.4% |
267
+ | **3** | Subword | 0.6634 | 1.584 | 3.73 | 206,424 | 33.7% |
268
+ | **4** | Word | 0.0731 🏆 | 1.052 | 1.13 | 11,199,579 | 92.7% |
269
+ | **4** | Subword | 0.6489 | 1.568 | 3.22 | 770,069 | 35.1% |
270
 
271
  ### Generated Text Samples (Word-based)
272
 
 
274
 
275
  **Context Size 1:**
276
 
277
+ 1. `e kastell aigneaux kantved merc h kannidi o devoa kemeret hent reter menezioù ezhomm da vont`
278
+ 2. `ar boblañs melestradurezh tud ar pif gadget de carnac et seigneur isaac baron met breinet gant`
279
+ 3. `a ra eus bro c haokaz ar fedon ar 25vet rujumant troadegiezhfichenn hiniennel memorial genweb egile`
280
 
281
  **Context Size 2:**
282
 
283
+ 1. `e voe azoet an oferenn rak miret eo bet troet e galleg a 346 pajennad a zeuas`
284
+ 2. `ar c haner en deus kumuniezhioù kumunioù beg ar skeul mañ zo levezonet gant friedrich dürrenmatt d`
285
+ 3. `a viz eost e departamant il ha gwilen bro roazhon bet ganet d ar mare se e`
286
 
287
  **Context Size 3:**
288
 
289
+ 1. `zo ur gumun e spagn e kumuniezh valencia spagn pennadoù kar carlo ii charlez iañ karl iañ carlo`
290
+ 2. `a zo ur sammad a stennadur a en em astenn a ra erv kourland eus ledenez sambia lec`
291
+ 3. `bro c hall société des amis de benjamin péret pour un second manifeste communiste gant grandizo muni...`
292
 
293
  **Context Size 4:**
294
 
295
+ 1. `zo ur gumun eus departamant calvados e bro c hall douaroniezh armerzh emdroadur ar boblañs melestrad...`
296
+ 2. `monumantoù ha traoù heverk iliz katolik sant albin ners douaroniezh emdroadur ar boblañs cassini hag...`
297
+ 3. `a zo ur gumun eus departamant pas de calais bro c hall istor armerzh kompagnunezh mengleuzioù bruay ...`
298
 
299
 
300
  ### Generated Text Samples (Subword-based)
 
303
 
304
  **Context Size 1:**
305
 
306
+ 1. `_cheunoù_wez:_be`
307
+ 2. `ere_zharndütren_`
308
+ 3. `añs_t.lalel_da_k`
309
 
310
  **Context Size 2:**
311
 
312
+ 1. `_amm_da_gant_ges_`
313
+ 2. `_evez._marezal_pe`
314
+ 3. `annoù_art,_pag_ga`
315
 
316
  **Context Size 3:**
317
 
318
+ 1. `ar_senner._levelet`
319
+ 2. `_e_rout_-_bloareku`
320
+ 3. `et_en_affarink_d’a`
321
 
322
  **Context Size 4:**
323
 
324
+ 1. `_ar_solinago,_mab_s`
325
+ 2. `_an_ilizoù_sir_krei`
326
+ 3. `ant_bet_kemeret_an_`
327
 
328
 
329
  ### Key Findings
330
 
331
  - **Best Predictability:** Context-4 (word) with 92.7% predictability
332
  - **Branching Factor:** Decreases with context size (more deterministic)
333
+ - **Memory Trade-off:** Larger contexts require more storage (770,069 contexts)
334
  - **Recommendation:** Context-3 or Context-4 for text generation
335
 
336
  ---
 
346
 
347
  | Metric | Value |
348
  |--------|-------|
349
+ | Vocabulary Size | 241,991 |
350
+ | Total Tokens | 15,343,130 |
351
+ | Mean Frequency | 63.40 |
352
  | Median Frequency | 4 |
353
+ | Frequency Std Dev | 2509.84 |
354
 
355
  ### Most Common Words
356
 
357
  | Rank | Word | Frequency |
358
  |------|------|-----------|
359
+ | 1 | e | 703,890 |
360
+ | 2 | ar | 518,682 |
361
+ | 3 | a | 468,243 |
362
+ | 4 | an | 326,691 |
363
+ | 5 | ha | 229,662 |
364
+ | 6 | gant | 189,178 |
365
+ | 7 | c | 187,433 |
366
+ | 8 | en | 180,997 |
367
+ | 9 | da | 171,218 |
368
+ | 10 | ur | 158,920 |
369
 
370
  ### Least Common Words (from vocabulary)
371
 
372
  | Rank | Word | Frequency |
373
  |------|------|-----------|
374
+ | 1 | veyne | 2 |
375
+ | 2 | wga | 2 |
376
+ | 3 | codreanu | 2 |
377
+ | 4 | dumitru | 2 |
378
+ | 5 | maghrebonkoud | 2 |
379
+ | 6 | fidefide | 2 |
380
+ | 7 | ougandachess | 2 |
381
+ | 8 | cytonn | 2 |
382
+ | 9 | malinga | 2 |
383
+ | 10 | ablainville | 2 |
384
 
385
  ### Zipf's Law Analysis
386
 
387
  | Metric | Value |
388
  |--------|-------|
389
+ | Zipf Coefficient | 1.1114 |
390
+ | R² (Goodness of Fit) | 0.996756 |
391
  | Adherence Quality | **excellent** |
392
 
393
  ### Coverage Analysis
394
 
395
  | Top N Words | Coverage |
396
  |-------------|----------|
397
+ | Top 100 | 41.9% |
398
  | Top 1,000 | 65.8% |
399
+ | Top 5,000 | 80.5% |
400
+ | Top 10,000 | 85.7% |
401
 
402
  ### Key Findings
403
 
404
  - **Zipf Compliance:** R²=0.9968 indicates excellent adherence to Zipf's law
405
+ - **High Frequency Dominance:** Top 100 words cover 41.9% of corpus
406
+ - **Long Tail:** 231,991 words needed for remaining 14.3% coverage
407
 
408
  ---
409
  ## 5. Word Embeddings Evaluation
 
419
 
420
  ### 5.1 Cross-Lingual Alignment
421
 
422
+ ![Alignment Quality](visualizations/embedding_alignment_quality.png)
423
+
424
+ ![Multilingual t-SNE](visualizations/embedding_tsne_multilingual.png)
425
 
426
 
427
  ### 5.2 Model Comparison
428
 
429
  | Model | Dimension | Isotropy | Semantic Density | Alignment R@1 | Alignment R@10 |
430
  |-------|-----------|----------|------------------|---------------|----------------|
431
+ | **mono_32d** | 32 | 0.8117 | 0.3810 | N/A | N/A |
432
+ | **mono_64d** | 64 | 0.8154 🏆 | 0.2792 | N/A | N/A |
433
+ | **mono_128d** | 128 | 0.8010 | 0.2076 | N/A | N/A |
434
+ | **aligned_32d** | 32 | 0.8117 | 0.3700 | 0.2440 | 0.6460 |
435
+ | **aligned_64d** | 64 | 0.8154 | 0.2752 | 0.3920 | 0.7600 |
436
+ | **aligned_128d** | 128 | 0.8010 | 0.2094 | 0.5340 | 0.8640 |
437
 
438
  ### Key Findings
439
 
440
+ - **Best Isotropy:** mono_64d with 0.8154 (more uniform distribution)
441
+ - **Semantic Density:** Average pairwise similarity of 0.2871. Lower values indicate better semantic separation.
442
+ - **Alignment Quality:** Aligned models achieve up to 53.4% R@1 in cross-lingual retrieval.
443
  - **Recommendation:** 128d aligned for best cross-lingual performance
444
 
445
  ---
446
  ## 6. Morphological Analysis (Experimental)
447
 
 
 
448
  This section presents an automated morphological analysis derived from the statistical divergence between word-level and subword-level models. By analyzing where subword predictability spikes and where word-level coverage fails, we can infer linguistic structures without supervised data.
449
 
450
  ### 6.1 Productivity & Complexity
451
 
452
  | Metric | Value | Interpretation | Recommendation |
453
  |--------|-------|----------------|----------------|
454
+ | Productivity Index | **5.000** | High morphological productivity | Reliable analysis |
455
+ | Idiomaticity Gap | **-0.232** | Low formulaic content | - |
456
 
457
  ### 6.2 Affix Inventory (Productive Units)
458
 
 
465
  #### Productive Suffixes
466
  | Suffix | Examples |
467
  |--------|----------|
468
+ | `-s` | wolves, hobbs, cassis |
469
+ | `-où` | gwallzarvoudoù, emstummoù, pellgomzioù |
470
+ | `-us` | tarphonomus, benildus, gigantorhinus |
471
+ | `-er` | hompozer, siger, geschwister |
472
+ | `-es` | wolves, béssèges, fontenailles |
 
473
 
474
  ### 6.3 Bound Stems (Lexical Roots)
475
 
 
477
 
478
  | Stem | Cohesion | Substitutability | Examples |
479
  |------|----------|------------------|----------|
480
+ | `tion` | 2.41x | 78 contexts | tione, eetion, motion |
481
+ | `adoù` | 2.03x | 74 contexts | tadoù, padoù, hadoù |
482
+ | `emba` | 2.26x | 40 contexts | emban, pemba, bemba |
 
483
  | `iamm` | 2.35x | 24 contexts | liamm, fiamma, fiamme |
484
+ | `ouar` | 1.52x | 126 contexts | mouar, zouar, bouar |
485
+ | `nnet` | 1.68x | 71 contexts | annet, rannet, rennet |
486
+ | `nnad` | 1.53x | 98 contexts | mennad, gannad, vennad |
487
+ | `zhañ` | 1.96x | 35 contexts | ezhañ, tizhañ, dizhañ |
488
+ | `reze` | 1.52x | 94 contexts | rezet, dreze, breze |
489
+ | `ntañ` | 1.75x | 51 contexts | antaño, vontañ, wintañ |
490
+ | `nnoù` | 1.87x | 38 contexts | vannoù, gennoù, pennoù |
491
+ | `iwar` | 2.55x | 13 contexts | diwar, ziwar, siward |
492
 
493
  ### 6.4 Affix Compatibility (Co-occurrence)
494
 
 
503
 
504
  | Word | Suggested Split | Confidence | Stem |
505
  |------|-----------------|------------|------|
506
+ | heureuses | **`heure-us-es`** | 6.0 | `heure` |
507
+ | burzhudoù | **`burzhud-où`** | 4.5 | `burzhud` |
508
+ | ziarbennoù | **`ziarbenn-où`** | 4.5 | `ziarbenn` |
509
+ | goudeskridoù | **`goudeskrid-où`** | 4.5 | `goudeskrid` |
510
+ | nijadegoù | **`nijadeg-où`** | 4.5 | `nijadeg` |
511
+ | ziskoulmoù | **`ziskoulm-où`** | 4.5 | `ziskoulm` |
512
+ | dasprenus | **`daspren-us`** | 4.5 | `daspren` |
513
+ | tradutores | **`tradutor-es`** | 4.5 | `tradutor` |
514
+ | drubuilhoù | **`drubuilh-où`** | 4.5 | `drubuilh` |
515
+ | reichsmarkoù | **`reichsmark-où`** | 4.5 | `reichsmark` |
516
+ | variantennoù | **`variantenn-où`** | 4.5 | `variantenn` |
517
+ | livuzennoù | **`livuzenn-où`** | 4.5 | `livuzenn` |
518
+ | kompozadoù | **`kompozad-où`** | 4.5 | `kompozad` |
519
+ | viñsaskelloù | **`viñsaskell-où`** | 4.5 | `viñsaskell` |
520
+ | kellennoù | **`kellenn-où`** | 4.5 | `kellenn` |
521
 
522
  ### 6.6 Linguistic Interpretation
523
 
524
  > **Automated Insight:**
525
+ The language Breton shows high morphological productivity. The subword models are significantly more efficient than word models, suggesting a rich system of affixation or compounding.
526
 
527
  ---
528
  ## 7. Summary & Recommendations
 
534
  | Component | Recommended | Rationale |
535
  |-----------|-------------|-----------|
536
  | Tokenizer | **64k BPE** | Best compression (3.79x) |
537
+ | N-gram | **2-gram** | Lowest perplexity (293) |
538
  | Markov | **Context-4** | Highest predictability (92.7%) |
539
  | Embeddings | **100d** | Balanced semantic capture and isotropy |
540
 
 
749
  ---
750
  *Generated by Wikilangs Models Pipeline*
751
 
752
+ *Report Date: 2026-01-03 20:37:28*
models/embeddings/aligned/br_128d.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9cf7029aeb871b95976d657332c7979955b3472f5cc997911ea5845326eed098
3
+ size 1181336739
models/embeddings/aligned/br_128d.meta.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"lang": "br", "dim": 128, "max_seq_len": 512, "is_aligned": true}
models/embeddings/aligned/br_128d.projection.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8019b49dd16fa1eaea2d6883a25739f54360b9485c5721e59f5b853a7e93eca8
3
+ size 65664
models/embeddings/aligned/br_128d_metadata.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "language": "br",
3
+ "dimension": 128,
4
+ "version": "aligned",
5
+ "hub_language": "en",
6
+ "seed_vocab_size": 63876,
7
+ "vocab_size": 151042
8
+ }
models/embeddings/aligned/br_32d.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5a04434da293f96ee1cf3159f4953ae9e70374e388480d3f97da4466cad588f
3
+ size 297336483
models/embeddings/aligned/br_32d.meta.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"lang": "br", "dim": 32, "max_seq_len": 512, "is_aligned": true}
models/embeddings/aligned/br_32d.projection.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c830997beb7d49617d22238749dab4c67739138aadce1e34c8d4ab91871baafd
3
+ size 4224
models/embeddings/aligned/br_32d_metadata.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "language": "br",
3
+ "dimension": 32,
4
+ "version": "aligned",
5
+ "hub_language": "en",
6
+ "seed_vocab_size": 63876,
7
+ "vocab_size": 151042
8
+ }
models/embeddings/aligned/br_64d.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:38a0702c9382196b1aa4715b46199c5a1a04dc269571320f1da9c5b35eed7285
3
+ size 592003235
models/embeddings/aligned/br_64d.meta.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"lang": "br", "dim": 64, "max_seq_len": 512, "is_aligned": true}
models/embeddings/aligned/br_64d.projection.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e234edf9a9b2ba2bdd409b023eaafcb3f40bcaf318174e7fc979aa6fa32277b5
3
+ size 16512
models/embeddings/aligned/br_64d_metadata.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "language": "br",
3
+ "dimension": 64,
4
+ "version": "aligned",
5
+ "hub_language": "en",
6
+ "seed_vocab_size": 63876,
7
+ "vocab_size": 151042
8
+ }
models/embeddings/monolingual/br_128d.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2cc2dcf0e5dbf7b0a47f5b7b84203934a4ef4a94c793c7cf148e47bcf6831e94
3
- size 1181713085
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9cf7029aeb871b95976d657332c7979955b3472f5cc997911ea5845326eed098
3
+ size 1181336739
models/embeddings/monolingual/br_128d_metadata.json CHANGED
@@ -11,5 +11,5 @@
11
  "encoding_method": "rope",
12
  "dim": 128
13
  },
14
- "vocab_size": 151402
15
  }
 
11
  "encoding_method": "rope",
12
  "dim": 128
13
  },
14
+ "vocab_size": 151042
15
  }
models/embeddings/monolingual/br_32d.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:563c972d1b01c4444517fc5d7b7951b8d9f5cf9c450bf8bfa161f4a7504bc953
3
- size 297436349
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5a04434da293f96ee1cf3159f4953ae9e70374e388480d3f97da4466cad588f
3
+ size 297336483
models/embeddings/monolingual/br_32d_metadata.json CHANGED
@@ -11,5 +11,5 @@
11
  "encoding_method": "rope",
12
  "dim": 32
13
  },
14
- "vocab_size": 151402
15
  }
 
11
  "encoding_method": "rope",
12
  "dim": 32
13
  },
14
+ "vocab_size": 151042
15
  }
models/embeddings/monolingual/br_64d.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:64947ec6dfc340b15fe1245dfbf27eb885378a9c32be40f616fce71e20a67416
3
- size 592195261
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:38a0702c9382196b1aa4715b46199c5a1a04dc269571320f1da9c5b35eed7285
3
+ size 592003235
models/embeddings/monolingual/br_64d_metadata.json CHANGED
@@ -11,5 +11,5 @@
11
  "encoding_method": "rope",
12
  "dim": 64
13
  },
14
- "vocab_size": 151402
15
  }
 
11
  "encoding_method": "rope",
12
  "dim": 64
13
  },
14
+ "vocab_size": 151042
15
  }
models/subword_markov/br_markov_ctx1_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:364c87eeed84e49a322b272f9060cbaf46e7cf91d5fc9cf833ebcd76d814171e
3
- size 371383
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:76e6fae95f5a61fb627737e66053e388ea5468f419a8ba3f08957a4a4383bec5
3
+ size 372500
models/subword_markov/br_markov_ctx1_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "context_size": 1,
3
  "variant": "subword",
4
  "language": "br",
5
- "unique_contexts": 8402,
6
- "total_transitions": 88562222
7
  }
 
2
  "context_size": 1,
3
  "variant": "subword",
4
  "language": "br",
5
+ "unique_contexts": 8419,
6
+ "total_transitions": 88614190
7
  }
models/subword_markov/br_markov_ctx2_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3b2ffa4ddd6ad6edd05335dca480707c3b64c0a14c509081d1d74dfe72edc981
3
- size 1665166
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3dd84b5d12129dd75fbbc2ec5327bc62e709e34ec5ff585646adf2844a3251ab
3
+ size 1701096
models/subword_markov/br_markov_ctx2_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "context_size": 2,
3
  "variant": "subword",
4
  "language": "br",
5
- "unique_contexts": 49100,
6
- "total_transitions": 88473399
7
  }
 
2
  "context_size": 2,
3
  "variant": "subword",
4
  "language": "br",
5
+ "unique_contexts": 49174,
6
+ "total_transitions": 88524855
7
  }
models/subword_markov/br_markov_ctx3_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:692d3af21734dce1c8cf7fcea827b781d47ef5c8b25654782ff6145d753309c2
3
- size 6642409
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ccc7aec735112a7272c9a1b866452f6de1fc15a32bc592b84c7c2954d8565f0d
3
+ size 6611427
models/subword_markov/br_markov_ctx3_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "context_size": 3,
3
  "variant": "subword",
4
  "language": "br",
5
- "unique_contexts": 206555,
6
- "total_transitions": 88384576
7
  }
 
2
  "context_size": 3,
3
  "variant": "subword",
4
  "language": "br",
5
+ "unique_contexts": 206424,
6
+ "total_transitions": 88435520
7
  }
models/subword_markov/br_markov_ctx4_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:02fe0d42cac9d5c8869d073c8570acc4ae0f693f23327c55de4b987d7ddb8e52
3
- size 20533909
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d1c38f7192e08d688c3c4b1b4d47f77efc324b960e025aaf54c352f7c68a3af
3
+ size 20565163
models/subword_markov/br_markov_ctx4_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "context_size": 4,
3
  "variant": "subword",
4
  "language": "br",
5
- "unique_contexts": 772246,
6
- "total_transitions": 88295753
7
  }
 
2
  "context_size": 4,
3
  "variant": "subword",
4
  "language": "br",
5
+ "unique_contexts": 770069,
6
+ "total_transitions": 88346185
7
  }
models/subword_ngram/br_2gram_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7aa9a0309201a5bff85bb732039ec09cee34921527090479840454db58c6414f
3
- size 154793
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6100bec355398114589c1a591ef7cce0aa74534699fa7dcd3bc536e8434f08fd
3
+ size 154502
models/subword_ngram/br_2gram_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "n": 2,
3
  "variant": "subword",
4
  "language": "br",
5
- "unique_ngrams": 11776,
6
- "total_ngrams": 88562222
7
  }
 
2
  "n": 2,
3
  "variant": "subword",
4
  "language": "br",
5
+ "unique_ngrams": 11777,
6
+ "total_ngrams": 88614190
7
  }
models/subword_ngram/br_3gram_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3f11838b2e1dd3db7e5060c435f0ed9b2d633720992bd49d8414dc3ebb1947f1
3
- size 1025779
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23ba302a7d19117f6743e8131a78e057e141e0319cb42d9352ce8baf3d888b02
3
+ size 1028682
models/subword_ngram/br_3gram_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "n": 3,
3
  "variant": "subword",
4
  "language": "br",
5
- "unique_ngrams": 81142,
6
- "total_ngrams": 88473399
7
  }
 
2
  "n": 3,
3
  "variant": "subword",
4
  "language": "br",
5
+ "unique_ngrams": 80865,
6
+ "total_ngrams": 88524855
7
  }
models/subword_ngram/br_4gram_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c0662a30ed0f866ba0ef46fba70681b5e219c20b4d5f818869aeaf346ad95cea
3
- size 4921369
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed9eaf1dc00b86ec9e962f9ce0fab70ded47d94549dfe615cc34515caa8d2149
3
+ size 4904958
models/subword_ngram/br_4gram_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "n": 4,
3
  "variant": "subword",
4
  "language": "br",
5
- "unique_ngrams": 421873,
6
- "total_ngrams": 88384576
7
  }
 
2
  "n": 4,
3
  "variant": "subword",
4
  "language": "br",
5
+ "unique_ngrams": 420279,
6
+ "total_ngrams": 88435520
7
  }
models/subword_ngram/br_5gram_subword.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d24c8a32466633aad19b5685f4f96e93beed6daf68754b6913a335d6f3be528
3
+ size 15681978
models/subword_ngram/br_5gram_subword_metadata.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "n": 5,
3
+ "variant": "subword",
4
+ "language": "br",
5
+ "unique_ngrams": 1308264,
6
+ "total_ngrams": 88346185
7
+ }
models/tokenizer/br_tokenizer_16k.model CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3a34b2820290797923a2b18aa49d5154f1b7c7d12b7564cace04d896d97c6e53
3
- size 502646
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:83fb88fe9bf797ec7100535d626d2281591253ac7a7192ec8f714ef1275449bc
3
+ size 502780
models/tokenizer/br_tokenizer_16k.vocab CHANGED
The diff for this file is too large to render. See raw diff
 
models/tokenizer/br_tokenizer_32k.model CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:26ea0db3a85a9e9055b43984cf4ac687e795f5825553ccc302a5abc29d9b8273
3
- size 774769
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:911df452c5729a9800392f02d004ab34704c39363199a59b81c398dc85d0ae9d
3
+ size 775074
models/tokenizer/br_tokenizer_32k.vocab CHANGED
The diff for this file is too large to render. See raw diff
 
models/tokenizer/br_tokenizer_64k.model CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d3d3d03f79fa86777af32c2e613a308e2b7c627fbf49f5d7a3c97678ebafad13
3
- size 1335604
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c4bb37ad9f7da618a62705a639a7e995aa500aac4d54128b97c2e200a0c9286
3
+ size 1336555
models/tokenizer/br_tokenizer_64k.vocab CHANGED
The diff for this file is too large to render. See raw diff
 
models/tokenizer/br_tokenizer_8k.model CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b6a1ce4f9267e8a3f8951e0d2dfd0751006c25871dfdcd890cceebf4f01248eb
3
- size 370491
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:416bd093915a7ce9b3d37f55d4e6f19b655bc9cb93ca1ad42e442fda1839cb70
3
+ size 370573
models/tokenizer/br_tokenizer_8k.vocab CHANGED
The diff for this file is too large to render. See raw diff
 
models/vocabulary/br_vocabulary.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5ace7ffc403a7ed53e5646074aa9d3b0acceee4c23eb067355b372c233b0488f
3
- size 3765893
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9853208ecd8f588061cdc38e2b3046ab1d262a5ec3dfd25bf4a60397033b1b26
3
+ size 3762757
models/vocabulary/br_vocabulary_metadata.json CHANGED
@@ -1,17 +1,17 @@
1
  {
2
  "language": "br",
3
- "vocabulary_size": 242115,
4
  "variant": "full",
5
  "statistics": {
6
- "type_token_ratio": 0.03497837029849385,
7
  "coverage": {
8
- "top_100": 0.40905545676087623,
9
- "top_1000": 0.6447280425360034,
10
- "top_5000": 0.7885968080597011,
11
- "top_10000": 0.8392496909814169
12
  },
13
- "hapax_count": 304658,
14
- "hapax_ratio": 0.5571928387100314,
15
- "total_documents": 88823
16
  }
17
  }
 
1
  {
2
  "language": "br",
3
+ "vocabulary_size": 241991,
4
  "variant": "full",
5
  "statistics": {
6
+ "type_token_ratio": 0.034990028236873805,
7
  "coverage": {
8
+ "top_100": 0.41033685445941886,
9
+ "top_1000": 0.6455537132284644,
10
+ "top_5000": 0.7892532453361742,
11
+ "top_10000": 0.8397786344630703
12
  },
13
+ "hapax_count": 305557,
14
+ "hapax_ratio": 0.5580460525835178,
15
+ "total_documents": 89335
16
  }
17
  }
models/word_markov/br_markov_ctx1_word.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:be693644d5a0ebbb01f9e30236eea7c454a9d88268b56513e574925b7ff00051
3
- size 35952141
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de721e8dee95a4e4ce94021f78ae05a169c11550ad6b494d9e6627b8b2c68df2
3
+ size 35934580
models/word_markov/br_markov_ctx1_word_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "context_size": 1,
3
  "variant": "word",
4
  "language": "br",
5
- "unique_contexts": 546189,
6
- "total_transitions": 15542923
7
  }
 
2
  "context_size": 1,
3
  "variant": "word",
4
  "language": "br",
5
+ "unique_contexts": 546965,
6
+ "total_transitions": 15559352
7
  }
models/word_markov/br_markov_ctx2_word.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a8fda1c756cb7fc2b4ab6e70c2cd1d67819d416d7e280f2e0548839fb1da8d2f
3
- size 97354526
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25eda57d96503b9d2a3e57951e6143e326c6fd10fdb26ddde28002172acb1f49
3
+ size 97522977
models/word_markov/br_markov_ctx2_word_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "context_size": 2,
3
  "variant": "word",
4
  "language": "br",
5
- "unique_contexts": 4129549,
6
- "total_transitions": 15454100
7
  }
 
2
  "context_size": 2,
3
  "variant": "word",
4
  "language": "br",
5
+ "unique_contexts": 4120028,
6
+ "total_transitions": 15470017
7
  }