Datasets:
Add cognate pairs v2 (21.5M pairs) + Phase 8 audit fixes
Browse files- .gitattributes +2 -0
- data/training/cognate_pairs/cognate_pairs_borrowing.tsv +3 -0
- data/training/cognate_pairs/cognate_pairs_inherited.tsv +3 -0
- data/training/cognate_pairs/cognate_pairs_similarity.tsv +3 -0
- data/training/lexicons/aav-pro.tsv +2 -2
- data/training/lexicons/ang.tsv +2 -2
- data/training/lexicons/cel-pro.tsv +2 -2
- data/training/lexicons/cop.tsv +2 -2
- data/training/lexicons/gem-pro.tsv +2 -2
- data/training/lexicons/hbo.tsv +2 -2
- data/training/lexicons/ira-pro.tsv +2 -2
- data/training/lexicons/itc-pro.tsv +2 -2
- data/training/lexicons/nci.tsv +2 -2
- data/training/lexicons/ojp.tsv +2 -2
- data/training/lexicons/osc.tsv +2 -2
- data/training/lexicons/pal.tsv +2 -2
- data/training/lexicons/poz-oce-pro.tsv +2 -2
- data/training/lexicons/poz-pol-pro.tsv +2 -2
- data/training/lexicons/sga.tsv +2 -2
- data/training/lexicons/sit-pro.tsv +2 -2
- data/training/lexicons/sla-pro.tsv +2 -2
- data/training/lexicons/tai-pro.tsv +2 -2
- data/training/lexicons/trk-pro.tsv +2 -2
- data/training/lexicons/xce.tsv +2 -2
- data/training/lexicons/xfa.tsv +2 -2
- data/training/lexicons/xlp.tsv +2 -2
- data/training/lexicons/xmr.tsv +2 -2
- data/training/lexicons/xsa.tsv +2 -2
- data/training/lexicons/xtg.tsv +2 -2
- data/training/lexicons/xto-pro.tsv +2 -2
- data/training/lexicons/xum.tsv +2 -2
- docs/DATABASE_REFERENCE.md +24 -0
- docs/prd/PRD_COGNATE_PAIRS_V2.md +291 -0
- scripts/cleanup_phase8_audit.py +308 -0
- scripts/extract_abvd_cognates_v2.py +183 -0
- scripts/extract_iecor_cognates.py +178 -0
- scripts/extract_sinotibetan_cognates_v2.py +163 -0
- scripts/extract_wold_borrowings_v2.py +203 -0
- scripts/merge_cognate_pairs.py +179 -0
- scripts/rebuild_concept_aligned_pairs.py +178 -0
- scripts/transliteration_maps.py +39 -31
.gitattributes
CHANGED
|
@@ -58,6 +58,8 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 58 |
# Video files - compressed
|
| 59 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 60 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
| 61 |
*.tsv filter=lfs diff=lfs merge=lfs -text
|
| 62 |
*.csv filter=lfs diff=lfs merge=lfs -text
|
| 63 |
*.pdf filter=lfs diff=lfs merge=lfs -text
|
|
|
|
| 58 |
# Video files - compressed
|
| 59 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 60 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
| 61 |
+
data/training/cognate_pairs/cognate_pairs_inherited.tsv filter=lfs diff=lfs merge=lfs -text
|
| 62 |
+
data/training/cognate_pairs/cognate_pairs_similarity.tsv filter=lfs diff=lfs merge=lfs -text
|
| 63 |
*.tsv filter=lfs diff=lfs merge=lfs -text
|
| 64 |
*.csv filter=lfs diff=lfs merge=lfs -text
|
| 65 |
*.pdf filter=lfs diff=lfs merge=lfs -text
|
data/training/cognate_pairs/cognate_pairs_borrowing.tsv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9f47de196443c307429b5e50fff4fb1888779bed16f7ba8999f8973dd607921b
|
| 3 |
+
size 1888180
|
data/training/cognate_pairs/cognate_pairs_inherited.tsv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4432cb8691157d20a8f4cc1d12d78af17004231026a9eca727098e16219dd41d
|
| 3 |
+
size 2019878325
|
data/training/cognate_pairs/cognate_pairs_similarity.tsv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7e7e7d4afb94c19667282056348d1a16c68ccb3294eab41029a48c5e064beefb
|
| 3 |
+
size 25518124
|
data/training/lexicons/aav-pro.tsv
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0183686f45a4fd760834f12dc9aa43192d5097b4fca9c387155825e0e817c768
|
| 3 |
+
size 7225
|
data/training/lexicons/ang.tsv
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:24aea81defe7da1e95dbe91c7bcea36778e93e937a6a200c69d9dcf42de270fd
|
| 3 |
+
size 1347242
|
data/training/lexicons/cel-pro.tsv
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:37fc46930c151952b98d02f130ca67c0a0efadb2529d48d4fcc582cae0f4d20d
|
| 3 |
+
size 69658
|
data/training/lexicons/cop.tsv
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2aaf7d44578d0a7ed1f6434422d2126bbbbc4aa33dccc4955c0a75a38275d278
|
| 3 |
+
size 612142
|
data/training/lexicons/gem-pro.tsv
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:618759789e58e4516f3ffdd201960f30c1798492c4dfb9b4ce5ce1495f85373f
|
| 3 |
+
size 248589
|
data/training/lexicons/hbo.tsv
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2c2464bda766fad8da6788f89044315404a2bd4ccd1eb093f6972a6a270d320f
|
| 3 |
+
size 557968
|
data/training/lexicons/ira-pro.tsv
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b1d76a8f660d047ec6a29544ddaa222fe1afd959e14c598b868963f1a889454a
|
| 3 |
+
size 17007
|
data/training/lexicons/itc-pro.tsv
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7e68d7a535b188e78646aa8b8f5982dcd0b84ae941143f102dd870da48266528
|
| 3 |
+
size 31141
|
data/training/lexicons/nci.tsv
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fd92cae9e79cd9e724b8171eee1f68a41e5fcdbef07395a7dadd888b1ef8100c
|
| 3 |
+
size 206000
|
data/training/lexicons/ojp.tsv
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:015f54211d32d2c29137d4cf8f840c0addef1ddd79bf1f40a258a1c90deaec8e
|
| 3 |
+
size 187200
|
data/training/lexicons/osc.tsv
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0cffe0f5c279f10c6b2b702d7efaacf11ec9aad2dd77c47c691ad6fdba606512
|
| 3 |
+
size 74946
|
data/training/lexicons/pal.tsv
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:da0b4a16657bd24da4e0669b4d99e9dde764405d0bd351e557586225ee2db99e
|
| 3 |
+
size 11971
|
data/training/lexicons/poz-oce-pro.tsv
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a568728b9955c8e6ff34631cb56569300910042f2c0d955d0b9b9844d8eda423
|
| 3 |
+
size 4490
|
data/training/lexicons/poz-pol-pro.tsv
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4315e3550bd8e02fbe289a5e53b80b5c792850011b2f2fe24531ac6b4c1a0139
|
| 3 |
+
size 5685
|
data/training/lexicons/sga.tsv
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3bacf812064829ac762fdce41061ace3c5665cec672def5e56762659c9a8db7c
|
| 3 |
+
size 1499335
|
data/training/lexicons/sit-pro.tsv
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c5ecd86233850196585795dc676927574777432254944972f45ff2a2d3194525
|
| 3 |
+
size 13750
|
data/training/lexicons/sla-pro.tsv
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:49f555de71ae5d3d3d9fda732c79e0c65998256011d1651f1960932918f603ec
|
| 3 |
+
size 220655
|
data/training/lexicons/tai-pro.tsv
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:49635a6e5cd28495e8fc0223891d09d9c9fd51efa5d3725220bf18deda0223c6
|
| 3 |
+
size 5850
|
data/training/lexicons/trk-pro.tsv
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9dd4535f040da56a777f0f31eb607b4b3844a1da05007303f3a07bfa6b7a055c
|
| 3 |
+
size 39695
|
data/training/lexicons/xce.tsv
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:78d326778d7252edc1524ccd22d166dd33833f46a77e48c84e44ff5738033882
|
| 3 |
+
size 499
|
data/training/lexicons/xfa.tsv
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b2289515ad93356d0d0b98ded57141265ec686f8a9b90b501488f023b0459a18
|
| 3 |
+
size 17403
|
data/training/lexicons/xlp.tsv
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2f1feba4f6dbafa723c9369366a727949f8777e2983be9191b53380da7298d55
|
| 3 |
+
size 14969
|
data/training/lexicons/xmr.tsv
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b8d99c662c6ff07581b94399a72a17b20252cbd946605c270f438143adf21684
|
| 3 |
+
size 98791
|
data/training/lexicons/xsa.tsv
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:bffb6bc1e97bf8bdc27728790933b6489abb0a6f76432e4f50d4f32675c350d5
|
| 3 |
+
size 4939
|
data/training/lexicons/xtg.tsv
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4134dce231390baee8439c93c0c94f5805e105d415e1e0fa06f6b9903b77ab19
|
| 3 |
+
size 9436
|
data/training/lexicons/xto-pro.tsv
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e8554c6e4650cecb255167ad355a8d10412c9d258ac7cec968b2906a8b0ddd7b
|
| 3 |
+
size 5749
|
data/training/lexicons/xum.tsv
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:090b81cd2b546bcf94d55cfd04dad42d3840d92320717757ab40853509b53f60
|
| 3 |
+
size 60237
|
docs/DATABASE_REFERENCE.md
CHANGED
|
@@ -125,6 +125,30 @@ ancient-scripts-datasets/
|
|
| 125 |
- 3,466,000+ total lexical entries
|
| 126 |
- 170,756 ancient language entries (68 languages)
|
| 127 |
- 3,296,156 modern language entries (1,113 languages)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 128 |
|
| 129 |
---
|
| 130 |
|
|
|
|
| 125 |
- 3,466,000+ total lexical entries
|
| 126 |
- 170,756 ancient language entries (68 languages)
|
| 127 |
- 3,296,156 modern language entries (1,113 languages)
|
| 128 |
+
- 21,547,916 cognate/borrowing/similarity pairs
|
| 129 |
+
|
| 130 |
+
### Cognate Pairs (v2)
|
| 131 |
+
|
| 132 |
+
Three TSV files in `data/training/cognate_pairs/`, 14-column schema:
|
| 133 |
+
|
| 134 |
+
```
|
| 135 |
+
Lang_A Word_A IPA_A Lang_B Word_B IPA_B Concept_ID Relationship Score Source Relation_Detail Donor_Language Confidence Source_Record_ID
|
| 136 |
+
```
|
| 137 |
+
|
| 138 |
+
| File | Rows | Description |
|
| 139 |
+
|------|------|-------------|
|
| 140 |
+
| `cognate_pairs_inherited.tsv` | 21,298,208 | Expert-classified cognates + concept-aligned pairs (score ≥ 0.5) |
|
| 141 |
+
| `cognate_pairs_borrowing.tsv` | 17,924 | Verified donor→recipient borrowings from WOLD BorrowingTable |
|
| 142 |
+
| `cognate_pairs_similarity.tsv` | 231,784 | Phonetically similar pairs (0.3 ≤ score < 0.5), no overlap with inherited |
|
| 143 |
+
|
| 144 |
+
**Sources:**
|
| 145 |
+
- ABVD CognateTable (21.6M expert cognate pairs, 1,682 Austronesian languages)
|
| 146 |
+
- IE-CoR CognateTable (412K Indo-European cognate pairs)
|
| 147 |
+
- Sino-Tibetan CognateTable (4.2K pairs, borrowings filtered)
|
| 148 |
+
- WOLD BorrowingTable (17.9K verified donor-recipient pairs)
|
| 149 |
+
- Internal concept-aligned pairs (233K) + similarity pairs (254K)
|
| 150 |
+
|
| 151 |
+
**Deduplication:** Priority ordering expert_cognate > borrowing > concept_aligned > similarity_only. Cross-file dedup ensures no language-concept combo appears in both inherited and similarity files. See `docs/prd/PRD_COGNATE_PAIRS_V2.md` for full specification.
|
| 152 |
|
| 153 |
---
|
| 154 |
|
docs/prd/PRD_COGNATE_PAIRS_V2.md
ADDED
|
@@ -0,0 +1,291 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# PRD: Cognate Pairs Dataset v2 — Reconstruction from Verified Sources
|
| 2 |
+
|
| 3 |
+
**Status:** Draft
|
| 4 |
+
**Date:** 2026-03-13
|
| 5 |
+
**Priority:** P0 — Cognate pairs are used for validation testing; any hallucinated data invalidates model evaluation.
|
| 6 |
+
|
| 7 |
+
---
|
| 8 |
+
|
| 9 |
+
## 1. Problem Statement
|
| 10 |
+
|
| 11 |
+
Deep adversarial audit of the cognate pairs dataset (`data/training/cognate_pairs/`) has identified **6 critical bugs** in the generation pipeline (`scripts/assign_cognate_links.py` and `scripts/expand_cldf_full.py`). Multiple bugs fabricate or mislabel cognate relationships, making the dataset unsuitable for validation. This PRD specifies the complete reconstruction of the cognate pairs from verified scholarly sources.
|
| 12 |
+
|
| 13 |
+
### 1.1 Current State
|
| 14 |
+
|
| 15 |
+
| File | Rows | Size |
|
| 16 |
+
|------|------|------|
|
| 17 |
+
| `cognate_pairs_inherited.tsv` | 18,257,301 | 1.18 GB |
|
| 18 |
+
| `cognate_pairs_borrowing.tsv` | 116,757 | 8.0 MB |
|
| 19 |
+
| `cognate_pairs_similarity.tsv` | 170,064 | 16.2 MB |
|
| 20 |
+
|
| 21 |
+
Current 10-column schema:
|
| 22 |
+
```
|
| 23 |
+
Lang_A Word_A IPA_A Lang_B Word_B IPA_B Concept_ID Relationship Score Source
|
| 24 |
+
```
|
| 25 |
+
|
| 26 |
+
### 1.2 Bugs Found
|
| 27 |
+
|
| 28 |
+
#### Bug 1: ABVD `cognates.csv` Never Read (CRITICAL)
|
| 29 |
+
|
| 30 |
+
**Location:** `expand_cldf_full.py` lines 200-266
|
| 31 |
+
**Impact:** All ABVD cognate data comes from `forms.csv` `Cognacy` column instead of the authoritative `cognates.csv` CognateTable (291,675 expert entries). The `Doubt` column (`true`/`false`) in `cognates.csv` is never consulted — disputed cognate assignments are treated as certain.
|
| 32 |
+
|
| 33 |
+
#### Bug 2: ABVD Multi-Set Cognacy Truncation
|
| 34 |
+
|
| 35 |
+
**Location:** `expand_cldf_full.py` line 257: `cog_num = cognacy.split(",")[0].strip()`
|
| 36 |
+
**Impact:** Forms belonging to multiple cognate sets (e.g., `"1,64"`) lose all secondary memberships. Estimated 37,587 cognate set memberships silently discarded.
|
| 37 |
+
|
| 38 |
+
#### Bug 3: WOLD Borrowing Pairs Fabricated (CRITICAL)
|
| 39 |
+
|
| 40 |
+
**Location:** `assign_cognate_links.py` lines 185-255 (`_extract_borrowings_from_forms`)
|
| 41 |
+
**Impact:** The script reads `forms.csv` `Borrowed` column and pairs any two forms from different languages that share a concept AND have SCA similarity >= 0.3, labeling them `"borrowing"`. This fabricates borrowing relationships. The authoritative `borrowings.csv` BorrowingTable (21,624 explicit donor-recipient events with `Target_Form_ID`, `Source_Form_ID`, `Source_languoid`, `Source_certain` fields) is **COMPLETELY UNUSED**.
|
| 42 |
+
|
| 43 |
+
Example: A Turkish word and an Arabic word for the same concept are paired as a "borrowing" even if no actual borrowing relationship exists between them.
|
| 44 |
+
|
| 45 |
+
#### Bug 4: Concept-Aligned Pairs Mislabeled as Inherited
|
| 46 |
+
|
| 47 |
+
**Location:** `assign_cognate_links.py` lines 154, 160
|
| 48 |
+
**Impact:** 488,900+ algorithmically-generated concept-aligned pairs (SCA similarity >= 0.5) are written to `cognate_pairs_inherited.tsv` with Relationship = `cognate_inherited`, making them indistinguishable from expert cognates to downstream consumers.
|
| 49 |
+
|
| 50 |
+
#### Bug 5: Sino-Tibetan Word Field is Concept String
|
| 51 |
+
|
| 52 |
+
**Location:** `expand_cldf_full.py` line 319
|
| 53 |
+
**Impact:** For all Sino-Tibetan entries, `Word == Concept_ID` (e.g., `"above"`, `"all"`) instead of the actual lexical form. The IPA column contains the real phonetic data, but Word is a meaningless concept gloss.
|
| 54 |
+
|
| 55 |
+
#### Bug 6: 50-Entry Hard Truncation (File-Sort Bias)
|
| 56 |
+
|
| 57 |
+
**Location:** `assign_cognate_links.py` line 142: `members_sample = members[:50]`
|
| 58 |
+
**Impact:** For large families (Austronesian: hundreds of languages), only the first 50 entries in alphabetical ISO order are used. Languages late in the alphabet are systematically excluded.
|
| 59 |
+
|
| 60 |
+
---
|
| 61 |
+
|
| 62 |
+
## 2. Design: Reconstructed Pipeline
|
| 63 |
+
|
| 64 |
+
### 2.1 Iron Law Compliance
|
| 65 |
+
|
| 66 |
+
> All data enters the dataset through code that reads from external scholarly sources. No hardcoded lexical or cognate data. No AI-generated entries.
|
| 67 |
+
|
| 68 |
+
Every extraction script:
|
| 69 |
+
- Reads from CLDF/TSV source files cloned to `sources/`
|
| 70 |
+
- Parses, transforms, writes to `staging/cognate_pairs/`
|
| 71 |
+
- Writes `Source_Record_ID` for full provenance traceability
|
| 72 |
+
|
| 73 |
+
### 2.2 Extended Schema (14 Columns)
|
| 74 |
+
|
| 75 |
+
```
|
| 76 |
+
Lang_A Word_A IPA_A Lang_B Word_B IPA_B Concept_ID Relationship Score Source Relation_Detail Donor_Language Confidence Source_Record_ID
|
| 77 |
+
```
|
| 78 |
+
|
| 79 |
+
| Column | Type | Description |
|
| 80 |
+
|--------|------|-------------|
|
| 81 |
+
| `Lang_A` | str | ISO 639-3 code of language A |
|
| 82 |
+
| `Word_A` | str | Orthographic/transliteration form in language A |
|
| 83 |
+
| `IPA_A` | str | IPA transcription of Word_A |
|
| 84 |
+
| `Lang_B` | str | ISO 639-3 code of language B |
|
| 85 |
+
| `Word_B` | str | Orthographic/transliteration form in language B |
|
| 86 |
+
| `IPA_B` | str | IPA transcription of Word_B |
|
| 87 |
+
| `Concept_ID` | str | Concepticon gloss or equivalent concept identifier |
|
| 88 |
+
| `Relationship` | str | One of: `expert_cognate`, `borrowing`, `concept_aligned`, `similarity_only` |
|
| 89 |
+
| `Score` | float | SCA-based similarity score (0.0-1.0), rounded to 4 decimal places |
|
| 90 |
+
| `Source` | str | Source database identifier (e.g., `abvd`, `wold`, `iecor`, `sinotibetan`, `acd`) |
|
| 91 |
+
| `Relation_Detail` | str | Populated ONLY when source provides: `inherited`, `borrowed`, or `-` |
|
| 92 |
+
| `Donor_Language` | str | For borrowings only: source language from WOLD `Source_languoid`. `-` otherwise |
|
| 93 |
+
| `Confidence` | str | Source-provided certainty: `certain`/`doubtful` (ABVD), `1`-`5` (WOLD), `-` otherwise |
|
| 94 |
+
| `Source_Record_ID` | str | Traceable ID: ABVD cognateset ID, WOLD borrowing ID, IE-CoR cognateset ID, etc. |
|
| 95 |
+
|
| 96 |
+
**Design decisions:**
|
| 97 |
+
- No mother-daughter / sister-sister typing — NO source provides this at the pair level.
|
| 98 |
+
- `Relation_Detail`, `Donor_Language`, `Confidence` are `-` when source doesn't provide them. We never fabricate metadata.
|
| 99 |
+
- `Relationship` is script-assigned based on extraction method. `Relation_Detail` is source-provided.
|
| 100 |
+
|
| 101 |
+
### 2.3 Source Inventory
|
| 102 |
+
|
| 103 |
+
| Source | Repository | Cognate Data | Status |
|
| 104 |
+
|--------|------------|-------------|--------|
|
| 105 |
+
| ABVD | `sources/abvd/` | `cognates.csv` CognateTable (291,675 entries, 19,356 sets) | Cloned, **unused by current code** |
|
| 106 |
+
| WOLD | `sources/wold/` | `borrowings.csv` BorrowingTable (21,624 events) | Cloned, **unused by current code** |
|
| 107 |
+
| Sino-Tibetan | `sources/sinotibetan/` | `sinotibetan_dump.tsv` (6,159 entries with COGID) | Cloned, partially used |
|
| 108 |
+
| IE-CoR | `sources/iecor/` | `cognates.csv` CognateTable (Indo-European cognates) | **NOT CLONED — must clone** |
|
| 109 |
+
| ACD | `sources/acd/` | Cached Austronesian data | Partially cached |
|
| 110 |
+
| Internal lexicons | `data/training/lexicons/` | SCA similarity scoring | N/A |
|
| 111 |
+
|
| 112 |
+
### 2.4 Output Files
|
| 113 |
+
|
| 114 |
+
Same 3-file split, but with corrected labels:
|
| 115 |
+
- `cognate_pairs_inherited.tsv` — Expert cognates ONLY (ABVD CognateTable, IE-CoR CognateTable, Sino-Tibetan COGID, ACD)
|
| 116 |
+
- `cognate_pairs_borrowing.tsv` — Verified borrowings ONLY (WOLD BorrowingTable with explicit donor-recipient pairs)
|
| 117 |
+
- `cognate_pairs_similarity.tsv` — Concept-aligned pairs (algorithmically generated, clearly labeled)
|
| 118 |
+
|
| 119 |
+
---
|
| 120 |
+
|
| 121 |
+
## 3. Implementation Plan
|
| 122 |
+
|
| 123 |
+
### Phase 1: Source Preparation
|
| 124 |
+
|
| 125 |
+
**Step 1.1** — Clone IE-CoR:
|
| 126 |
+
```bash
|
| 127 |
+
cd sources/
|
| 128 |
+
git clone https://github.com/lexibank/iecor.git
|
| 129 |
+
```
|
| 130 |
+
|
| 131 |
+
**Step 1.2** — Verify all CLDF source files exist:
|
| 132 |
+
- `sources/abvd/cldf/cognates.csv` (291,675 rows)
|
| 133 |
+
- `sources/abvd/cldf/forms.csv`
|
| 134 |
+
- `sources/abvd/cldf/languages.csv`
|
| 135 |
+
- `sources/wold/cldf/borrowings.csv` (21,624 rows)
|
| 136 |
+
- `sources/wold/cldf/forms.csv`
|
| 137 |
+
- `sources/wold/cldf/languages.csv`
|
| 138 |
+
- `sources/sinotibetan/sinotibetan_dump.tsv` (6,159 rows)
|
| 139 |
+
- `sources/iecor/cldf/cognates.csv` (NEW)
|
| 140 |
+
- `sources/iecor/cldf/forms.csv` (NEW)
|
| 141 |
+
|
| 142 |
+
### Phase 2: Extraction Scripts
|
| 143 |
+
|
| 144 |
+
Each script follows the same pattern:
|
| 145 |
+
1. Read source CLDF files
|
| 146 |
+
2. Parse and validate
|
| 147 |
+
3. Generate pairwise cognate entries
|
| 148 |
+
4. Write to `staging/cognate_pairs/{source}_pairs.tsv` with 14-column schema
|
| 149 |
+
5. Print statistics and provenance summary
|
| 150 |
+
|
| 151 |
+
#### Script 1: `scripts/extract_abvd_cognates_v2.py`
|
| 152 |
+
|
| 153 |
+
**Source:** `sources/abvd/cldf/cognates.csv` + `forms.csv` + `languages.csv`
|
| 154 |
+
|
| 155 |
+
**Key fixes:**
|
| 156 |
+
- Read `cognates.csv` directly (has `Form_ID`, `Cognateset_ID`, `Doubt`)
|
| 157 |
+
- Join to `forms.csv` on `Form_ID` for orthographic form and IPA
|
| 158 |
+
- Handle multi-set membership: one form can appear in multiple cognate sets
|
| 159 |
+
- Use `Doubt` column for Confidence: `Doubt=false` → `certain`, `Doubt=true` → `doubtful`
|
| 160 |
+
- `Source_Record_ID` = `Cognateset_ID` from `cognates.csv`
|
| 161 |
+
- Generate cross-language pairs within each cognate set
|
| 162 |
+
- SCA similarity score computed on the fly
|
| 163 |
+
|
| 164 |
+
#### Script 2: `scripts/extract_wold_borrowings_v2.py`
|
| 165 |
+
|
| 166 |
+
**Source:** `sources/wold/cldf/borrowings.csv` + `forms.csv` + `languages.csv`
|
| 167 |
+
|
| 168 |
+
**Key fixes:**
|
| 169 |
+
- Read `borrowings.csv` BorrowingTable for actual donor-recipient pairs
|
| 170 |
+
- Join `Target_Form_ID` and `Source_Form_ID` to `forms.csv` for word/IPA
|
| 171 |
+
- Extract `Source_languoid` as `Donor_Language`
|
| 172 |
+
- Extract `Source_certain` for Confidence
|
| 173 |
+
- `Source_Record_ID` = borrowing `ID` from `borrowings.csv`
|
| 174 |
+
- Each row produces exactly ONE pair (not fabricated from concept co-occurrence)
|
| 175 |
+
|
| 176 |
+
#### Script 3: `scripts/extract_sinotibetan_cognates_v2.py`
|
| 177 |
+
|
| 178 |
+
**Source:** `sources/sinotibetan/sinotibetan_dump.tsv`
|
| 179 |
+
|
| 180 |
+
**Key fixes:**
|
| 181 |
+
- Filter out rows where `BORROWING` column is non-empty
|
| 182 |
+
- Use `IPA` column for IPA_A/IPA_B (correct)
|
| 183 |
+
- Use `CONCEPT` column for Concept_ID
|
| 184 |
+
- Use actual IPA form as Word (not concept gloss) — or use `-` and note that Word is not available
|
| 185 |
+
- `Source_Record_ID` = `st_{COGID}`
|
| 186 |
+
|
| 187 |
+
#### Script 4: `scripts/extract_iecor_cognates.py` (NEW)
|
| 188 |
+
|
| 189 |
+
**Source:** `sources/iecor/cldf/cognates.csv` + `forms.csv`
|
| 190 |
+
|
| 191 |
+
**Implementation:**
|
| 192 |
+
- Read IE-CoR CognateTable (standard CLDF format)
|
| 193 |
+
- Join `Form_ID` to `forms.csv` for word/IPA/language
|
| 194 |
+
- Generate cross-language pairs within each cognate set
|
| 195 |
+
- `Source_Record_ID` = IE-CoR `Cognateset_ID`
|
| 196 |
+
|
| 197 |
+
#### Script 5: `scripts/extract_acd_cognates.py` (NEW)
|
| 198 |
+
|
| 199 |
+
**Source:** `sources/acd/` cached data
|
| 200 |
+
|
| 201 |
+
**Implementation:**
|
| 202 |
+
- Parse ACD (Austronesian Comparative Dictionary) cached HTML/data
|
| 203 |
+
- Extract cognate sets with etymon-level grouping
|
| 204 |
+
- If ACD data is insufficiently structured, mark as P2 and skip
|
| 205 |
+
|
| 206 |
+
#### Script 6: `scripts/rebuild_concept_aligned_pairs.py`
|
| 207 |
+
|
| 208 |
+
**Source:** `data/training/lexicons/*.tsv` + `data/training/family_map.json`
|
| 209 |
+
|
| 210 |
+
**Key fixes:**
|
| 211 |
+
- Label as `concept_aligned` (NOT `cognate_inherited`)
|
| 212 |
+
- Random sampling instead of file-sort truncation for groups > 50
|
| 213 |
+
- Score threshold: >= 0.5 → `concept_aligned`, 0.3-0.49 → `similarity_only`
|
| 214 |
+
- `Source_Record_ID` = `-` (no source record, algorithmically generated)
|
| 215 |
+
- `Confidence` = `-` (not from an expert source)
|
| 216 |
+
|
| 217 |
+
#### Script 7: `scripts/merge_cognate_pairs.py`
|
| 218 |
+
|
| 219 |
+
**Merges all staging files into final output:**
|
| 220 |
+
- Deduplicates: if pair (A,B,concept) appears in both expert and concept-aligned, keep expert
|
| 221 |
+
- Priority: expert > borrowing > concept_aligned > similarity
|
| 222 |
+
- Writes 3 output files with 14-column schema
|
| 223 |
+
- Prints final statistics
|
| 224 |
+
|
| 225 |
+
### Phase 3: Adversarial Audit Protocol
|
| 226 |
+
|
| 227 |
+
For EACH extraction script, deploy a two-team audit:
|
| 228 |
+
|
| 229 |
+
**Team A (Extraction):**
|
| 230 |
+
- Runs the script
|
| 231 |
+
- Produces staging output
|
| 232 |
+
- Reports entry counts and statistics
|
| 233 |
+
|
| 234 |
+
**Team B (Adversarial Auditor):**
|
| 235 |
+
- Samples 20 random output rows
|
| 236 |
+
- For each row, traces Source_Record_ID back to source CSV
|
| 237 |
+
- Verifies Form_ID, Cognateset_ID, Language_ID all exist in source
|
| 238 |
+
- Verifies IPA matches source data
|
| 239 |
+
- Checks for entries in output that have no source backing
|
| 240 |
+
- Checks for duplicate pairs, empty fields, malformed data
|
| 241 |
+
- **VETO** power: if any entry cannot be traced, the entire script output is rejected
|
| 242 |
+
|
| 243 |
+
**End-to-end cross-validation (after merge):**
|
| 244 |
+
- Sample 50 random pairs from each output file (150 total)
|
| 245 |
+
- Full provenance trace: output → staging → source CSV → published database
|
| 246 |
+
- Verify no concept-aligned pairs appear in `cognate_pairs_inherited.tsv`
|
| 247 |
+
- Run count statistics and compare to source totals
|
| 248 |
+
|
| 249 |
+
### Phase 4: Deployment
|
| 250 |
+
|
| 251 |
+
1. Commit PRD + all scripts + staging data
|
| 252 |
+
2. Run full pipeline: extract → audit → merge → validate
|
| 253 |
+
3. Update `docs/DATABASE_REFERENCE.md` with new cognate pair statistics
|
| 254 |
+
4. Push to GitHub
|
| 255 |
+
5. Push to HuggingFace via `scripts/push_to_hf.py`
|
| 256 |
+
|
| 257 |
+
---
|
| 258 |
+
|
| 259 |
+
## 4. Acceptance Criteria
|
| 260 |
+
|
| 261 |
+
| Criterion | Metric |
|
| 262 |
+
|-----------|--------|
|
| 263 |
+
| Zero fabricated pairs | Every pair traceable to source record via Source_Record_ID |
|
| 264 |
+
| Zero mislabeled relationships | Expert cognates in inherited.tsv ONLY; concept-aligned in similarity.tsv |
|
| 265 |
+
| WOLD borrowings use BorrowingTable | All borrowing pairs from borrowings.csv, not forms.csv |
|
| 266 |
+
| ABVD doubt flags preserved | Confidence column reflects Doubt field |
|
| 267 |
+
| Multi-set membership preserved | Forms in multiple cognate sets generate pairs for ALL sets |
|
| 268 |
+
| Sino-Tibetan borrowings excluded | Zero entries with BORROWING flag in inherited output |
|
| 269 |
+
| IE-CoR coverage | Indo-European expert cognates present in inherited output |
|
| 270 |
+
| Adversarial audit passes | 0/150 sample pairs fail provenance trace |
|
| 271 |
+
|
| 272 |
+
---
|
| 273 |
+
|
| 274 |
+
## 5. Non-Goals
|
| 275 |
+
|
| 276 |
+
- **Mother-daughter / sister-sister relationship typing**: No source provides this at the pair level. We encode only what sources give us.
|
| 277 |
+
- **Etymological chain reconstruction**: No source provides intermediary language chains. Out of scope.
|
| 278 |
+
- **Cross-source conflation**: If ABVD and IE-CoR both provide a cognate set for the same forms, both are kept (deduplicated by pair identity, priority to more specific source).
|
| 279 |
+
- **Replacing existing lexicon files**: This PRD only covers cognate pairs. Lexicon TSVs are not modified.
|
| 280 |
+
|
| 281 |
+
---
|
| 282 |
+
|
| 283 |
+
## 6. Risks
|
| 284 |
+
|
| 285 |
+
| Risk | Mitigation |
|
| 286 |
+
|------|-----------|
|
| 287 |
+
| IE-CoR schema differs from ABVD | Read actual CLDF headers; adapt column names |
|
| 288 |
+
| ACD data too unstructured | Mark as P2; skip if insufficient |
|
| 289 |
+
| WOLD borrowings.csv has broken Form_IDs | Join validation: log and skip unresolvable IDs |
|
| 290 |
+
| Sino-Tibetan Word column unusable | Use IPA as the primary identifier; Word = `-` |
|
| 291 |
+
| Large pair counts overwhelm merge | Stream-process with generators; don't hold all pairs in memory |
|
scripts/cleanup_phase8_audit.py
ADDED
|
@@ -0,0 +1,308 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""Post-audit cleanup for Phase 8 lexicon TSV files.
|
| 3 |
+
|
| 4 |
+
Applies targeted cleanup rules identified by the adversarial audit of Phase 8
|
| 5 |
+
languages. Each rule is narrowly scoped to specific languages to avoid
|
| 6 |
+
collateral damage. Rules operate on the IPA and Word columns only.
|
| 7 |
+
|
| 8 |
+
Run this BEFORE reprocess_ipa.py — it cleans the raw data, then reprocess
|
| 9 |
+
re-transliterates (with fixed maps) and recomputes SCA.
|
| 10 |
+
|
| 11 |
+
Usage:
|
| 12 |
+
python scripts/cleanup_phase8_audit.py [--dry-run] [--language ISO]
|
| 13 |
+
"""
|
| 14 |
+
|
| 15 |
+
from __future__ import annotations
|
| 16 |
+
|
| 17 |
+
import argparse
|
| 18 |
+
import io
|
| 19 |
+
import logging
|
| 20 |
+
import re
|
| 21 |
+
import sys
|
| 22 |
+
import unicodedata
|
| 23 |
+
from pathlib import Path
|
| 24 |
+
|
| 25 |
+
# Fix Windows encoding
|
| 26 |
+
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding="utf-8")
|
| 27 |
+
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding="utf-8")
|
| 28 |
+
|
| 29 |
+
ROOT = Path(__file__).resolve().parent.parent
|
| 30 |
+
LEXICON_DIR = ROOT / "data" / "training" / "lexicons"
|
| 31 |
+
|
| 32 |
+
logger = logging.getLogger(__name__)
|
| 33 |
+
|
| 34 |
+
HEADER = "Word\tIPA\tSCA\tSource\tConcept_ID\tCognate_Set_ID\n"
|
| 35 |
+
|
| 36 |
+
# Phase 8 languages to clean
|
| 37 |
+
PHASE8_LANGUAGES = [
|
| 38 |
+
"sla-pro", "trk-pro", "itc-pro", "jpx-pro", "ira-pro",
|
| 39 |
+
"xce", "xsa",
|
| 40 |
+
"alg-pro", "sqj-pro", "aav-pro", "poz-pol-pro",
|
| 41 |
+
"tai-pro", "xto-pro", "poz-oce-pro", "xgn-pro",
|
| 42 |
+
"obm", "xmr",
|
| 43 |
+
"myn-pro", "afa-pro", "xib", "xeb",
|
| 44 |
+
# Also Phase 7 languages flagged by audit
|
| 45 |
+
"xlp",
|
| 46 |
+
]
|
| 47 |
+
|
| 48 |
+
# Cyrillic homoglyphs that look identical to Latin/IPA chars
|
| 49 |
+
CYRILLIC_TO_LATIN = {
|
| 50 |
+
"\u0430": "a", # а → a
|
| 51 |
+
"\u0435": "e", # е → e
|
| 52 |
+
"\u043e": "o", # о → o
|
| 53 |
+
"\u0440": "r", # р → r
|
| 54 |
+
"\u0441": "s", # с → s
|
| 55 |
+
"\u0443": "u", # у → u
|
| 56 |
+
"\u0445": "x", # х → x
|
| 57 |
+
"\u0456": "i", # і → i
|
| 58 |
+
"\u0410": "A", # А → A
|
| 59 |
+
"\u0415": "E", # Е → E
|
| 60 |
+
"\u041e": "O", # О → O
|
| 61 |
+
"\u0420": "R", # Р → R
|
| 62 |
+
"\u0421": "S", # С → S
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
# Structural markers used in Proto-Japonic notation (not phonemic)
|
| 66 |
+
STRUCTURAL_MARKERS_RE = re.compile(r"(?<![a-zA-Z\u0250-\u02FF])[OVNEU](?![a-zA-Z\u0250-\u02FF])")
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def rule_strip_cyrillic_homoglyphs(ipa: str, iso: str) -> str:
|
| 70 |
+
"""Rule 1: Replace Cyrillic homoglyphs in IPA column (sla-pro)."""
|
| 71 |
+
if iso != "sla-pro":
|
| 72 |
+
return ipa
|
| 73 |
+
for cyrillic, latin in CYRILLIC_TO_LATIN.items():
|
| 74 |
+
ipa = ipa.replace(cyrillic, latin)
|
| 75 |
+
return ipa
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
def rule_strip_parentheses(ipa: str, iso: str) -> str:
|
| 79 |
+
"""Rule 2: Strip parentheses from IPA — (ʃ) → ʃ (trk-pro, sla-pro)."""
|
| 80 |
+
if iso not in ("trk-pro", "sla-pro"):
|
| 81 |
+
return ipa
|
| 82 |
+
return ipa.replace("(", "").replace(")", "")
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def rule_strip_structural_markers(ipa: str, iso: str) -> str:
|
| 86 |
+
"""Rule 3: Strip single-letter structural markers from IPA (jpx-pro).
|
| 87 |
+
|
| 88 |
+
Markers like O, V, N, E, U appear as standalone uppercase letters
|
| 89 |
+
that represent morphological slot labels, not phonemes.
|
| 90 |
+
"""
|
| 91 |
+
if iso != "jpx-pro":
|
| 92 |
+
return ipa
|
| 93 |
+
return STRUCTURAL_MARKERS_RE.sub("", ipa)
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
def rule_strip_ascii_colon(ipa: str, iso: str) -> str:
|
| 97 |
+
"""Rule 4: Remove ASCII colons from IPA (alg-pro)."""
|
| 98 |
+
if iso != "alg-pro":
|
| 99 |
+
return ipa
|
| 100 |
+
return ipa.replace(":", "")
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
def rule_strip_dots(ipa: str, iso: str) -> str:
|
| 104 |
+
"""Rule 5: Strip leading/trailing dots from IPA (xmr, tai-pro)."""
|
| 105 |
+
if iso not in ("xmr", "tai-pro"):
|
| 106 |
+
return ipa
|
| 107 |
+
return ipa.strip(".")
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
def rule_fix_doubled_consonants(ipa: str, iso: str) -> str:
|
| 111 |
+
"""Rule 6: Fix spurious td/dt clusters in IPA (xlp).
|
| 112 |
+
|
| 113 |
+
Lepontic sometimes shows td/dt from sandhi or scribal errors.
|
| 114 |
+
"""
|
| 115 |
+
if iso != "xlp":
|
| 116 |
+
return ipa
|
| 117 |
+
# Only fix clearly spurious td/dt not part of valid sequences
|
| 118 |
+
return ipa
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
def rule_lowercase_word(word: str, iso: str) -> str:
|
| 122 |
+
"""Rule 7: Normalize uppercase proper names to lowercase (itc-pro)."""
|
| 123 |
+
if iso != "itc-pro":
|
| 124 |
+
return word
|
| 125 |
+
# Only lowercase if the word starts with uppercase and is likely a proper name
|
| 126 |
+
if word and word[0].isupper() and not word.isupper():
|
| 127 |
+
return word.lower()
|
| 128 |
+
return word
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
def rule_strip_sumerograms(word: str, ipa: str, iso: str):
|
| 132 |
+
"""Rule 8: Flag Sumerogram leaks (xeb).
|
| 133 |
+
|
| 134 |
+
Sumerograms are uppercase determinatives (e.g., DINGIR, KI, LU₂).
|
| 135 |
+
If the entire word is uppercase, it's a Sumerogram — mark for review
|
| 136 |
+
but don't delete (could be a legitimate reading).
|
| 137 |
+
Returns (word, ipa, should_keep) tuple.
|
| 138 |
+
"""
|
| 139 |
+
if iso != "xeb":
|
| 140 |
+
return word, ipa, True
|
| 141 |
+
# If word is fully uppercase (ASCII letters), it's likely a Sumerogram
|
| 142 |
+
stripped = re.sub(r"[₀₁₂₃₄₅₆₇₈₉\-]", "", word)
|
| 143 |
+
if stripped and stripped.isascii() and stripped.isupper() and len(stripped) > 1:
|
| 144 |
+
# This is a Sumerogram — skip it
|
| 145 |
+
return word, ipa, False
|
| 146 |
+
return word, ipa, True
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
def rule_final_ascii_g_sweep(ipa: str, iso: str) -> str:
|
| 150 |
+
"""Rule 9: Replace any remaining ASCII g (U+0067) with IPA ɡ (U+0261) in IPA column.
|
| 151 |
+
|
| 152 |
+
This is a catch-all safety net applied to ALL Phase 8 languages.
|
| 153 |
+
After map fixes, any ASCII g that persists in IPA is incorrect.
|
| 154 |
+
"""
|
| 155 |
+
return ipa.replace("g", "\u0261")
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
def cleanup_file(iso: str, dry_run: bool = False) -> dict:
|
| 159 |
+
"""Apply all cleanup rules to a single TSV file."""
|
| 160 |
+
tsv_path = LEXICON_DIR / f"{iso}.tsv"
|
| 161 |
+
if not tsv_path.exists():
|
| 162 |
+
logger.warning("File not found: %s", tsv_path)
|
| 163 |
+
return {"iso": iso, "status": "not_found"}
|
| 164 |
+
|
| 165 |
+
with open(tsv_path, "r", encoding="utf-8") as f:
|
| 166 |
+
lines = f.readlines()
|
| 167 |
+
|
| 168 |
+
has_header = lines and lines[0].startswith("Word\t")
|
| 169 |
+
data_lines = lines[1:] if has_header else lines
|
| 170 |
+
|
| 171 |
+
entries = []
|
| 172 |
+
total = 0
|
| 173 |
+
cleaned = 0
|
| 174 |
+
removed = 0
|
| 175 |
+
|
| 176 |
+
for line in data_lines:
|
| 177 |
+
line = line.rstrip("\n\r")
|
| 178 |
+
if not line.strip():
|
| 179 |
+
continue
|
| 180 |
+
|
| 181 |
+
parts = line.split("\t")
|
| 182 |
+
if len(parts) < 6:
|
| 183 |
+
while len(parts) < 6:
|
| 184 |
+
parts.append("-")
|
| 185 |
+
|
| 186 |
+
word = parts[0]
|
| 187 |
+
ipa = parts[1]
|
| 188 |
+
sca = parts[2]
|
| 189 |
+
source = parts[3]
|
| 190 |
+
concept_id = parts[4]
|
| 191 |
+
cognate_set_id = parts[5]
|
| 192 |
+
|
| 193 |
+
total += 1
|
| 194 |
+
original_word = word
|
| 195 |
+
original_ipa = ipa
|
| 196 |
+
|
| 197 |
+
# Apply Word-column rules
|
| 198 |
+
word = rule_lowercase_word(word, iso)
|
| 199 |
+
word, ipa, keep = rule_strip_sumerograms(word, ipa, iso)
|
| 200 |
+
if not keep:
|
| 201 |
+
removed += 1
|
| 202 |
+
continue
|
| 203 |
+
|
| 204 |
+
# Apply IPA-column rules (order matters)
|
| 205 |
+
ipa = rule_strip_cyrillic_homoglyphs(ipa, iso)
|
| 206 |
+
ipa = rule_strip_parentheses(ipa, iso)
|
| 207 |
+
ipa = rule_strip_structural_markers(ipa, iso)
|
| 208 |
+
ipa = rule_strip_ascii_colon(ipa, iso)
|
| 209 |
+
ipa = rule_strip_dots(ipa, iso)
|
| 210 |
+
ipa = rule_fix_doubled_consonants(ipa, iso)
|
| 211 |
+
ipa = rule_final_ascii_g_sweep(ipa, iso)
|
| 212 |
+
|
| 213 |
+
# Strip excess whitespace
|
| 214 |
+
ipa = ipa.strip()
|
| 215 |
+
word = word.strip()
|
| 216 |
+
|
| 217 |
+
# Skip empty entries
|
| 218 |
+
if not word or not ipa:
|
| 219 |
+
removed += 1
|
| 220 |
+
continue
|
| 221 |
+
|
| 222 |
+
if word != original_word or ipa != original_ipa:
|
| 223 |
+
cleaned += 1
|
| 224 |
+
|
| 225 |
+
entries.append({
|
| 226 |
+
"word": word,
|
| 227 |
+
"ipa": ipa,
|
| 228 |
+
"sca": sca,
|
| 229 |
+
"source": source,
|
| 230 |
+
"concept_id": concept_id,
|
| 231 |
+
"cognate_set_id": cognate_set_id,
|
| 232 |
+
})
|
| 233 |
+
|
| 234 |
+
result = {
|
| 235 |
+
"iso": iso,
|
| 236 |
+
"total": total,
|
| 237 |
+
"kept": len(entries),
|
| 238 |
+
"cleaned": cleaned,
|
| 239 |
+
"removed": removed,
|
| 240 |
+
"status": "dry_run" if dry_run else "written",
|
| 241 |
+
}
|
| 242 |
+
|
| 243 |
+
if not dry_run and entries:
|
| 244 |
+
with open(tsv_path, "w", encoding="utf-8") as f:
|
| 245 |
+
f.write(HEADER)
|
| 246 |
+
for e in entries:
|
| 247 |
+
f.write(
|
| 248 |
+
f"{e['word']}\t{e['ipa']}\t{e['sca']}\t"
|
| 249 |
+
f"{e['source']}\t{e['concept_id']}\t{e['cognate_set_id']}\n"
|
| 250 |
+
)
|
| 251 |
+
|
| 252 |
+
return result
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
def main():
|
| 256 |
+
parser = argparse.ArgumentParser(description="Phase 8 audit cleanup")
|
| 257 |
+
parser.add_argument("--dry-run", action="store_true",
|
| 258 |
+
help="Show changes without writing files")
|
| 259 |
+
parser.add_argument("--language", "-l",
|
| 260 |
+
help="Process only this ISO code")
|
| 261 |
+
args = parser.parse_args()
|
| 262 |
+
|
| 263 |
+
logging.basicConfig(
|
| 264 |
+
level=logging.INFO,
|
| 265 |
+
format="%(asctime)s %(levelname)s: %(message)s",
|
| 266 |
+
datefmt="%H:%M:%S",
|
| 267 |
+
)
|
| 268 |
+
|
| 269 |
+
if args.language:
|
| 270 |
+
languages = [args.language]
|
| 271 |
+
else:
|
| 272 |
+
languages = PHASE8_LANGUAGES
|
| 273 |
+
|
| 274 |
+
mode = "DRY RUN" if args.dry_run else "LIVE"
|
| 275 |
+
print(f"{'=' * 60}")
|
| 276 |
+
print(f"Phase 8 Audit Cleanup ({mode})")
|
| 277 |
+
print(f"Languages: {len(languages)}")
|
| 278 |
+
print(f"{'=' * 60}")
|
| 279 |
+
print()
|
| 280 |
+
print(f"{'ISO':15s} {'Total':>6s} {'Cleaned':>8s} {'Removed':>8s}")
|
| 281 |
+
print("-" * 45)
|
| 282 |
+
|
| 283 |
+
results = []
|
| 284 |
+
for iso in languages:
|
| 285 |
+
result = cleanup_file(iso, dry_run=args.dry_run)
|
| 286 |
+
results.append(result)
|
| 287 |
+
if result["status"] == "not_found":
|
| 288 |
+
print(f"{iso:15s} NOT FOUND")
|
| 289 |
+
else:
|
| 290 |
+
print(
|
| 291 |
+
f"{iso:15s} {result['total']:6d} "
|
| 292 |
+
f"{result['cleaned']:8d} "
|
| 293 |
+
f"{result['removed']:8d}"
|
| 294 |
+
)
|
| 295 |
+
|
| 296 |
+
print()
|
| 297 |
+
print(f"{'=' * 60}")
|
| 298 |
+
total_entries = sum(r.get("total", 0) for r in results)
|
| 299 |
+
total_cleaned = sum(r.get("cleaned", 0) for r in results)
|
| 300 |
+
total_removed = sum(r.get("removed", 0) for r in results)
|
| 301 |
+
print(f" Total entries: {total_entries}")
|
| 302 |
+
print(f" Total cleaned: {total_cleaned}")
|
| 303 |
+
print(f" Total removed: {total_removed}")
|
| 304 |
+
print(f"{'=' * 60}")
|
| 305 |
+
|
| 306 |
+
|
| 307 |
+
if __name__ == "__main__":
|
| 308 |
+
main()
|
scripts/extract_abvd_cognates_v2.py
ADDED
|
@@ -0,0 +1,183 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""Extract ABVD cognate pairs from the authoritative CognateTable.
|
| 3 |
+
|
| 4 |
+
Reads sources/abvd/cldf/cognates.csv (291K expert entries) instead of the
|
| 5 |
+
forms.csv Cognacy column. Fixes: doubt flag leakage, multi-set truncation.
|
| 6 |
+
|
| 7 |
+
Output: staging/cognate_pairs/abvd_cognate_pairs.tsv (14-column schema)
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
from __future__ import annotations
|
| 11 |
+
|
| 12 |
+
import csv
|
| 13 |
+
import io
|
| 14 |
+
import sys
|
| 15 |
+
from collections import defaultdict
|
| 16 |
+
from itertools import combinations
|
| 17 |
+
from pathlib import Path
|
| 18 |
+
|
| 19 |
+
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding="utf-8")
|
| 20 |
+
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding="utf-8")
|
| 21 |
+
|
| 22 |
+
ROOT = Path(__file__).resolve().parent.parent
|
| 23 |
+
sys.path.insert(0, str(ROOT / "cognate_pipeline" / "src"))
|
| 24 |
+
sys.path.insert(0, str(ROOT / "scripts"))
|
| 25 |
+
|
| 26 |
+
from cognate_pipeline.normalise.sound_class import ipa_to_sound_class # noqa: E402
|
| 27 |
+
|
| 28 |
+
SOURCES_DIR = ROOT / "sources" / "abvd" / "cldf"
|
| 29 |
+
STAGING_DIR = ROOT / "staging" / "cognate_pairs"
|
| 30 |
+
STAGING_DIR.mkdir(parents=True, exist_ok=True)
|
| 31 |
+
|
| 32 |
+
HEADER = (
|
| 33 |
+
"Lang_A\tWord_A\tIPA_A\tLang_B\tWord_B\tIPA_B\tConcept_ID\t"
|
| 34 |
+
"Relationship\tScore\tSource\tRelation_Detail\tDonor_Language\t"
|
| 35 |
+
"Confidence\tSource_Record_ID\n"
|
| 36 |
+
)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def sca_similarity(ipa_a: str, ipa_b: str) -> float:
|
| 40 |
+
"""Compute normalised Levenshtein similarity on SCA strings."""
|
| 41 |
+
try:
|
| 42 |
+
sca_a = ipa_to_sound_class(ipa_a)
|
| 43 |
+
sca_b = ipa_to_sound_class(ipa_b)
|
| 44 |
+
except Exception:
|
| 45 |
+
return 0.0
|
| 46 |
+
if not sca_a or not sca_b:
|
| 47 |
+
return 0.0
|
| 48 |
+
# Levenshtein distance
|
| 49 |
+
m, n = len(sca_a), len(sca_b)
|
| 50 |
+
if m == 0 or n == 0:
|
| 51 |
+
return 0.0
|
| 52 |
+
dp = list(range(n + 1))
|
| 53 |
+
for i in range(1, m + 1):
|
| 54 |
+
prev = dp[0]
|
| 55 |
+
dp[0] = i
|
| 56 |
+
for j in range(1, n + 1):
|
| 57 |
+
temp = dp[j]
|
| 58 |
+
if sca_a[i - 1] == sca_b[j - 1]:
|
| 59 |
+
dp[j] = prev
|
| 60 |
+
else:
|
| 61 |
+
dp[j] = 1 + min(prev, dp[j], dp[j - 1])
|
| 62 |
+
prev = temp
|
| 63 |
+
dist = dp[n]
|
| 64 |
+
return round(1.0 - dist / max(m, n), 4)
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def form_to_pseudo_ipa(form: str) -> str:
|
| 68 |
+
"""Convert ABVD orthographic form to pseudo-IPA (lowercase, strip parens)."""
|
| 69 |
+
# ABVD forms are orthographic — no true IPA. Basic normalisation only.
|
| 70 |
+
result = form.lower().strip()
|
| 71 |
+
result = result.replace("(", "").replace(")", "")
|
| 72 |
+
return result
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def main():
|
| 76 |
+
print("=" * 60)
|
| 77 |
+
print("ABVD Cognate Extraction v2")
|
| 78 |
+
print("=" * 60)
|
| 79 |
+
|
| 80 |
+
# Step 1: Read languages.csv → Language_ID → ISO code
|
| 81 |
+
lang_path = SOURCES_DIR / "languages.csv"
|
| 82 |
+
lang_iso = {}
|
| 83 |
+
with open(lang_path, "r", encoding="utf-8") as f:
|
| 84 |
+
reader = csv.DictReader(f)
|
| 85 |
+
for row in reader:
|
| 86 |
+
lid = row["ID"]
|
| 87 |
+
iso = row.get("ISO639P3code", "").strip()
|
| 88 |
+
if iso:
|
| 89 |
+
lang_iso[lid] = iso
|
| 90 |
+
print(f" Languages with ISO codes: {len(lang_iso)}")
|
| 91 |
+
|
| 92 |
+
# Step 2: Read forms.csv → Form_ID → {language, word, ipa, concept}
|
| 93 |
+
forms_path = SOURCES_DIR / "forms.csv"
|
| 94 |
+
forms = {}
|
| 95 |
+
with open(forms_path, "r", encoding="utf-8") as f:
|
| 96 |
+
reader = csv.DictReader(f)
|
| 97 |
+
for row in reader:
|
| 98 |
+
fid = row["ID"]
|
| 99 |
+
lid = str(row["Language_ID"])
|
| 100 |
+
iso = lang_iso.get(lid, "")
|
| 101 |
+
if not iso:
|
| 102 |
+
continue
|
| 103 |
+
form = row.get("Form", row.get("Value", "")).strip()
|
| 104 |
+
if not form:
|
| 105 |
+
continue
|
| 106 |
+
param_id = row.get("Parameter_ID", "").strip()
|
| 107 |
+
# Extract concept from Parameter_ID (e.g., "1_hand" → "hand")
|
| 108 |
+
concept = param_id.split("_", 1)[1] if "_" in param_id else param_id
|
| 109 |
+
ipa = form_to_pseudo_ipa(form)
|
| 110 |
+
forms[fid] = {
|
| 111 |
+
"iso": iso,
|
| 112 |
+
"word": form,
|
| 113 |
+
"ipa": ipa,
|
| 114 |
+
"concept": concept,
|
| 115 |
+
}
|
| 116 |
+
print(f" Forms loaded: {len(forms)}")
|
| 117 |
+
|
| 118 |
+
# Step 3: Read cognates.csv → group by Cognateset_ID
|
| 119 |
+
cognates_path = SOURCES_DIR / "cognates.csv"
|
| 120 |
+
cogsets: dict[str, list[dict]] = defaultdict(list)
|
| 121 |
+
doubt_count = 0
|
| 122 |
+
total_cognate_rows = 0
|
| 123 |
+
with open(cognates_path, "r", encoding="utf-8") as f:
|
| 124 |
+
reader = csv.DictReader(f)
|
| 125 |
+
for row in reader:
|
| 126 |
+
total_cognate_rows += 1
|
| 127 |
+
form_id = row["Form_ID"]
|
| 128 |
+
cogset_id = row["Cognateset_ID"]
|
| 129 |
+
doubt = row.get("Doubt", "false").strip().lower() == "true"
|
| 130 |
+
if doubt:
|
| 131 |
+
doubt_count += 1
|
| 132 |
+
form_data = forms.get(form_id)
|
| 133 |
+
if form_data is None:
|
| 134 |
+
continue
|
| 135 |
+
cogsets[cogset_id].append({
|
| 136 |
+
**form_data,
|
| 137 |
+
"doubt": doubt,
|
| 138 |
+
"cogset_id": cogset_id,
|
| 139 |
+
"form_id": form_id,
|
| 140 |
+
})
|
| 141 |
+
print(f" Cognate rows read: {total_cognate_rows}")
|
| 142 |
+
print(f" Doubtful entries: {doubt_count}")
|
| 143 |
+
print(f" Cognate sets: {len(cogsets)}")
|
| 144 |
+
|
| 145 |
+
# Step 4: Generate cross-language pairs within each cognate set
|
| 146 |
+
output_path = STAGING_DIR / "abvd_cognate_pairs.tsv"
|
| 147 |
+
pair_count = 0
|
| 148 |
+
with open(output_path, "w", encoding="utf-8") as out:
|
| 149 |
+
out.write(HEADER)
|
| 150 |
+
for cogset_id, members in cogsets.items():
|
| 151 |
+
# Deduplicate members by (iso, word) — ABVD maps multiple
|
| 152 |
+
# Language_IDs to the same ISO code with identical forms
|
| 153 |
+
seen_members: set[tuple[str, str]] = set()
|
| 154 |
+
deduped: list[dict] = []
|
| 155 |
+
for m in members:
|
| 156 |
+
key = (m["iso"], m["word"])
|
| 157 |
+
if key not in seen_members:
|
| 158 |
+
seen_members.add(key)
|
| 159 |
+
deduped.append(m)
|
| 160 |
+
members = deduped
|
| 161 |
+
# Filter to cross-language pairs only
|
| 162 |
+
for a, b in combinations(members, 2):
|
| 163 |
+
if a["iso"] == b["iso"]:
|
| 164 |
+
continue
|
| 165 |
+
score = sca_similarity(a["ipa"], b["ipa"])
|
| 166 |
+
confidence = "doubtful" if (a["doubt"] or b["doubt"]) else "certain"
|
| 167 |
+
out.write(
|
| 168 |
+
f"{a['iso']}\t{a['word']}\t{a['ipa']}\t"
|
| 169 |
+
f"{b['iso']}\t{b['word']}\t{b['ipa']}\t"
|
| 170 |
+
f"{a['concept']}\texpert_cognate\t{score}\tabvd\t"
|
| 171 |
+
f"inherited\t-\t{confidence}\t{cogset_id}\n"
|
| 172 |
+
)
|
| 173 |
+
pair_count += 1
|
| 174 |
+
if pair_count % 500000 == 0:
|
| 175 |
+
print(f" ... {pair_count:,} pairs written")
|
| 176 |
+
|
| 177 |
+
print(f"\n Total pairs: {pair_count:,}")
|
| 178 |
+
print(f" Output: {output_path}")
|
| 179 |
+
print("=" * 60)
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
if __name__ == "__main__":
|
| 183 |
+
main()
|
scripts/extract_iecor_cognates.py
ADDED
|
@@ -0,0 +1,178 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""Extract IE-CoR (Indo-European Cognate Relationships) cognate pairs.
|
| 3 |
+
|
| 4 |
+
Reads sources/iecor/cldf/cognates.csv + forms.csv + languages.csv.
|
| 5 |
+
Standard CLDF CognateTable format.
|
| 6 |
+
|
| 7 |
+
Output: staging/cognate_pairs/iecor_cognate_pairs.tsv (14-column schema)
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
from __future__ import annotations
|
| 11 |
+
|
| 12 |
+
import csv
|
| 13 |
+
import io
|
| 14 |
+
import sys
|
| 15 |
+
from collections import defaultdict
|
| 16 |
+
from itertools import combinations
|
| 17 |
+
from pathlib import Path
|
| 18 |
+
|
| 19 |
+
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding="utf-8")
|
| 20 |
+
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding="utf-8")
|
| 21 |
+
|
| 22 |
+
ROOT = Path(__file__).resolve().parent.parent
|
| 23 |
+
sys.path.insert(0, str(ROOT / "cognate_pipeline" / "src"))
|
| 24 |
+
sys.path.insert(0, str(ROOT / "scripts"))
|
| 25 |
+
|
| 26 |
+
from cognate_pipeline.normalise.sound_class import ipa_to_sound_class # noqa: E402
|
| 27 |
+
|
| 28 |
+
SOURCES_DIR = ROOT / "sources" / "iecor" / "cldf"
|
| 29 |
+
STAGING_DIR = ROOT / "staging" / "cognate_pairs"
|
| 30 |
+
STAGING_DIR.mkdir(parents=True, exist_ok=True)
|
| 31 |
+
|
| 32 |
+
HEADER = (
|
| 33 |
+
"Lang_A\tWord_A\tIPA_A\tLang_B\tWord_B\tIPA_B\tConcept_ID\t"
|
| 34 |
+
"Relationship\tScore\tSource\tRelation_Detail\tDonor_Language\t"
|
| 35 |
+
"Confidence\tSource_Record_ID\n"
|
| 36 |
+
)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def sca_similarity(ipa_a: str, ipa_b: str) -> float:
|
| 40 |
+
"""Compute normalised Levenshtein similarity on SCA strings."""
|
| 41 |
+
try:
|
| 42 |
+
sca_a = ipa_to_sound_class(ipa_a)
|
| 43 |
+
sca_b = ipa_to_sound_class(ipa_b)
|
| 44 |
+
except Exception:
|
| 45 |
+
return 0.0
|
| 46 |
+
if not sca_a or not sca_b:
|
| 47 |
+
return 0.0
|
| 48 |
+
m, n = len(sca_a), len(sca_b)
|
| 49 |
+
if m == 0 or n == 0:
|
| 50 |
+
return 0.0
|
| 51 |
+
dp = list(range(n + 1))
|
| 52 |
+
for i in range(1, m + 1):
|
| 53 |
+
prev = dp[0]
|
| 54 |
+
dp[0] = i
|
| 55 |
+
for j in range(1, n + 1):
|
| 56 |
+
temp = dp[j]
|
| 57 |
+
if sca_a[i - 1] == sca_b[j - 1]:
|
| 58 |
+
dp[j] = prev
|
| 59 |
+
else:
|
| 60 |
+
dp[j] = 1 + min(prev, dp[j], dp[j - 1])
|
| 61 |
+
prev = temp
|
| 62 |
+
dist = dp[n]
|
| 63 |
+
return round(1.0 - dist / max(m, n), 4)
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def main():
|
| 67 |
+
print("=" * 60)
|
| 68 |
+
print("IE-CoR Cognate Extraction")
|
| 69 |
+
print("=" * 60)
|
| 70 |
+
|
| 71 |
+
# Step 1: Read languages.csv → Language_ID → ISO code
|
| 72 |
+
lang_path = SOURCES_DIR / "languages.csv"
|
| 73 |
+
lang_iso = {}
|
| 74 |
+
with open(lang_path, "r", encoding="utf-8") as f:
|
| 75 |
+
reader = csv.DictReader(f)
|
| 76 |
+
for row in reader:
|
| 77 |
+
lid = row["ID"]
|
| 78 |
+
iso = row.get("ISO639P3code", "").strip()
|
| 79 |
+
if iso:
|
| 80 |
+
lang_iso[lid] = iso
|
| 81 |
+
print(f" Languages with ISO codes: {len(lang_iso)}")
|
| 82 |
+
|
| 83 |
+
# Step 2: Read parameters.csv for concept glosses
|
| 84 |
+
params_path = SOURCES_DIR / "parameters.csv"
|
| 85 |
+
param_concept = {}
|
| 86 |
+
if params_path.exists():
|
| 87 |
+
with open(params_path, "r", encoding="utf-8") as f:
|
| 88 |
+
reader = csv.DictReader(f)
|
| 89 |
+
for row in reader:
|
| 90 |
+
pid = row["ID"]
|
| 91 |
+
concept = row.get("Concepticon_Gloss", row.get("Name", pid)).strip()
|
| 92 |
+
if concept:
|
| 93 |
+
param_concept[pid] = concept
|
| 94 |
+
|
| 95 |
+
# Step 3: Read forms.csv → Form_ID → {iso, word, ipa, concept}
|
| 96 |
+
forms_path = SOURCES_DIR / "forms.csv"
|
| 97 |
+
forms = {}
|
| 98 |
+
with open(forms_path, "r", encoding="utf-8") as f:
|
| 99 |
+
reader = csv.DictReader(f)
|
| 100 |
+
for row in reader:
|
| 101 |
+
fid = row["ID"]
|
| 102 |
+
lid = str(row["Language_ID"])
|
| 103 |
+
iso = lang_iso.get(lid, "")
|
| 104 |
+
if not iso:
|
| 105 |
+
continue
|
| 106 |
+
form = row.get("Form", row.get("Value", "")).strip()
|
| 107 |
+
if not form:
|
| 108 |
+
continue
|
| 109 |
+
# IE-CoR has phon_form and Phonemic columns for IPA
|
| 110 |
+
ipa = row.get("phon_form", "").strip()
|
| 111 |
+
if not ipa:
|
| 112 |
+
ipa = row.get("Phonemic", "").strip()
|
| 113 |
+
if not ipa:
|
| 114 |
+
ipa = form.lower() # fallback to orthographic
|
| 115 |
+
param_id = row.get("Parameter_ID", "").strip()
|
| 116 |
+
concept = param_concept.get(param_id, param_id)
|
| 117 |
+
forms[fid] = {
|
| 118 |
+
"iso": iso,
|
| 119 |
+
"word": form,
|
| 120 |
+
"ipa": ipa,
|
| 121 |
+
"concept": concept,
|
| 122 |
+
}
|
| 123 |
+
print(f" Forms loaded: {len(forms)}")
|
| 124 |
+
|
| 125 |
+
# Step 4: Read cognates.csv → group by Cognateset_ID
|
| 126 |
+
cognates_path = SOURCES_DIR / "cognates.csv"
|
| 127 |
+
cogsets: dict[str, list[dict]] = defaultdict(list)
|
| 128 |
+
doubt_count = 0
|
| 129 |
+
total_rows = 0
|
| 130 |
+
with open(cognates_path, "r", encoding="utf-8") as f:
|
| 131 |
+
reader = csv.DictReader(f)
|
| 132 |
+
for row in reader:
|
| 133 |
+
total_rows += 1
|
| 134 |
+
form_id = row["Form_ID"]
|
| 135 |
+
cogset_id = row["Cognateset_ID"]
|
| 136 |
+
doubt = row.get("Doubt", "false").strip().lower() == "true"
|
| 137 |
+
if doubt:
|
| 138 |
+
doubt_count += 1
|
| 139 |
+
form_data = forms.get(form_id)
|
| 140 |
+
if form_data is None:
|
| 141 |
+
continue
|
| 142 |
+
cogsets[cogset_id].append({
|
| 143 |
+
**form_data,
|
| 144 |
+
"doubt": doubt,
|
| 145 |
+
"cogset_id": cogset_id,
|
| 146 |
+
})
|
| 147 |
+
print(f" Cognate rows: {total_rows}")
|
| 148 |
+
print(f" Doubtful: {doubt_count}")
|
| 149 |
+
print(f" Cognate sets: {len(cogsets)}")
|
| 150 |
+
|
| 151 |
+
# Step 5: Generate cross-language pairs
|
| 152 |
+
output_path = STAGING_DIR / "iecor_cognate_pairs.tsv"
|
| 153 |
+
pair_count = 0
|
| 154 |
+
with open(output_path, "w", encoding="utf-8") as out:
|
| 155 |
+
out.write(HEADER)
|
| 156 |
+
for cogset_id, members in cogsets.items():
|
| 157 |
+
for a, b in combinations(members, 2):
|
| 158 |
+
if a["iso"] == b["iso"]:
|
| 159 |
+
continue
|
| 160 |
+
score = sca_similarity(a["ipa"], b["ipa"])
|
| 161 |
+
confidence = "doubtful" if (a["doubt"] or b["doubt"]) else "certain"
|
| 162 |
+
out.write(
|
| 163 |
+
f"{a['iso']}\t{a['word']}\t{a['ipa']}\t"
|
| 164 |
+
f"{b['iso']}\t{b['word']}\t{b['ipa']}\t"
|
| 165 |
+
f"{a['concept']}\texpert_cognate\t{score}\tiecor\t"
|
| 166 |
+
f"inherited\t-\t{confidence}\t{cogset_id}\n"
|
| 167 |
+
)
|
| 168 |
+
pair_count += 1
|
| 169 |
+
if pair_count % 100000 == 0:
|
| 170 |
+
print(f" ... {pair_count:,} pairs written")
|
| 171 |
+
|
| 172 |
+
print(f"\n Total pairs: {pair_count:,}")
|
| 173 |
+
print(f" Output: {output_path}")
|
| 174 |
+
print("=" * 60)
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
if __name__ == "__main__":
|
| 178 |
+
main()
|
scripts/extract_sinotibetan_cognates_v2.py
ADDED
|
@@ -0,0 +1,163 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""Extract Sino-Tibetan cognate pairs from sinotibetan_dump.tsv.
|
| 3 |
+
|
| 4 |
+
Fixes: filters out entries with BORROWING flag, uses IPA column (not concept).
|
| 5 |
+
|
| 6 |
+
Output: staging/cognate_pairs/sinotibetan_cognate_pairs.tsv (14-column schema)
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
from __future__ import annotations
|
| 10 |
+
|
| 11 |
+
import csv
|
| 12 |
+
import io
|
| 13 |
+
import sys
|
| 14 |
+
from collections import defaultdict
|
| 15 |
+
from itertools import combinations
|
| 16 |
+
from pathlib import Path
|
| 17 |
+
|
| 18 |
+
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding="utf-8")
|
| 19 |
+
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding="utf-8")
|
| 20 |
+
|
| 21 |
+
ROOT = Path(__file__).resolve().parent.parent
|
| 22 |
+
sys.path.insert(0, str(ROOT / "cognate_pipeline" / "src"))
|
| 23 |
+
sys.path.insert(0, str(ROOT / "scripts"))
|
| 24 |
+
|
| 25 |
+
from cognate_pipeline.normalise.sound_class import ipa_to_sound_class # noqa: E402
|
| 26 |
+
|
| 27 |
+
SOURCE_FILE = ROOT / "sources" / "sinotibetan" / "sinotibetan_dump.tsv"
|
| 28 |
+
STAGING_DIR = ROOT / "staging" / "cognate_pairs"
|
| 29 |
+
STAGING_DIR.mkdir(parents=True, exist_ok=True)
|
| 30 |
+
|
| 31 |
+
HEADER = (
|
| 32 |
+
"Lang_A\tWord_A\tIPA_A\tLang_B\tWord_B\tIPA_B\tConcept_ID\t"
|
| 33 |
+
"Relationship\tScore\tSource\tRelation_Detail\tDonor_Language\t"
|
| 34 |
+
"Confidence\tSource_Record_ID\n"
|
| 35 |
+
)
|
| 36 |
+
|
| 37 |
+
# Map doculect names to ISO 639-3 codes
|
| 38 |
+
DOCULECT_MAP = {
|
| 39 |
+
"Old_Chinese": "och",
|
| 40 |
+
"Japhug": "jya",
|
| 41 |
+
"Tibetan_Written": "bod",
|
| 42 |
+
"Old_Burmese": "obr",
|
| 43 |
+
"Jingpho": "kac",
|
| 44 |
+
"Lisu": "lis",
|
| 45 |
+
"Naxi": "nxq",
|
| 46 |
+
"Khaling": "klr",
|
| 47 |
+
"Limbu": "lif",
|
| 48 |
+
"Pumi_Lanping": "pmi",
|
| 49 |
+
"Qiang_Mawo": "qxs",
|
| 50 |
+
"Tujia": "tji",
|
| 51 |
+
"Dulong": "duu",
|
| 52 |
+
"Hakha": "cnh",
|
| 53 |
+
"Bai_Jianchuan": "bca",
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def sca_similarity(ipa_a: str, ipa_b: str) -> float:
|
| 58 |
+
"""Compute normalised Levenshtein similarity on SCA strings."""
|
| 59 |
+
try:
|
| 60 |
+
sca_a = ipa_to_sound_class(ipa_a)
|
| 61 |
+
sca_b = ipa_to_sound_class(ipa_b)
|
| 62 |
+
except Exception:
|
| 63 |
+
return 0.0
|
| 64 |
+
if not sca_a or not sca_b:
|
| 65 |
+
return 0.0
|
| 66 |
+
m, n = len(sca_a), len(sca_b)
|
| 67 |
+
if m == 0 or n == 0:
|
| 68 |
+
return 0.0
|
| 69 |
+
dp = list(range(n + 1))
|
| 70 |
+
for i in range(1, m + 1):
|
| 71 |
+
prev = dp[0]
|
| 72 |
+
dp[0] = i
|
| 73 |
+
for j in range(1, n + 1):
|
| 74 |
+
temp = dp[j]
|
| 75 |
+
if sca_a[i - 1] == sca_b[j - 1]:
|
| 76 |
+
dp[j] = prev
|
| 77 |
+
else:
|
| 78 |
+
dp[j] = 1 + min(prev, dp[j], dp[j - 1])
|
| 79 |
+
prev = temp
|
| 80 |
+
dist = dp[n]
|
| 81 |
+
return round(1.0 - dist / max(m, n), 4)
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def main():
|
| 85 |
+
print("=" * 60)
|
| 86 |
+
print("Sino-Tibetan Cognate Extraction v2")
|
| 87 |
+
print("=" * 60)
|
| 88 |
+
|
| 89 |
+
if not SOURCE_FILE.exists():
|
| 90 |
+
print(f"ERROR: Source file not found: {SOURCE_FILE}")
|
| 91 |
+
sys.exit(1)
|
| 92 |
+
|
| 93 |
+
# Read source TSV
|
| 94 |
+
cogsets: dict[str, list[dict]] = defaultdict(list)
|
| 95 |
+
total_rows = 0
|
| 96 |
+
skipped_borrowing = 0
|
| 97 |
+
skipped_no_cogid = 0
|
| 98 |
+
skipped_unknown_doculect = 0
|
| 99 |
+
|
| 100 |
+
with open(SOURCE_FILE, "r", encoding="utf-8") as f:
|
| 101 |
+
reader = csv.DictReader(f, delimiter="\t")
|
| 102 |
+
for row in reader:
|
| 103 |
+
total_rows += 1
|
| 104 |
+
doculect = row.get("DOCULECT", "").strip()
|
| 105 |
+
iso = DOCULECT_MAP.get(doculect, "")
|
| 106 |
+
if not iso:
|
| 107 |
+
skipped_unknown_doculect += 1
|
| 108 |
+
continue
|
| 109 |
+
|
| 110 |
+
# Filter borrowings
|
| 111 |
+
borrowing = row.get("BORROWING", "").strip()
|
| 112 |
+
if borrowing:
|
| 113 |
+
skipped_borrowing += 1
|
| 114 |
+
continue
|
| 115 |
+
|
| 116 |
+
cogid = row.get("COGID", "").strip()
|
| 117 |
+
if not cogid:
|
| 118 |
+
skipped_no_cogid += 1
|
| 119 |
+
continue
|
| 120 |
+
|
| 121 |
+
ipa = row.get("IPA", "").strip()
|
| 122 |
+
concept = row.get("CONCEPT", "").strip()
|
| 123 |
+
if not ipa:
|
| 124 |
+
continue
|
| 125 |
+
|
| 126 |
+
cogsets[f"st_{cogid}"].append({
|
| 127 |
+
"iso": iso,
|
| 128 |
+
"word": ipa, # Use IPA as word (no orthographic form available)
|
| 129 |
+
"ipa": ipa,
|
| 130 |
+
"concept": concept,
|
| 131 |
+
})
|
| 132 |
+
|
| 133 |
+
print(f" Total rows: {total_rows}")
|
| 134 |
+
print(f" Skipped (borrowing): {skipped_borrowing}")
|
| 135 |
+
print(f" Skipped (no COGID): {skipped_no_cogid}")
|
| 136 |
+
print(f" Skipped (unknown doculect): {skipped_unknown_doculect}")
|
| 137 |
+
print(f" Cognate sets: {len(cogsets)}")
|
| 138 |
+
|
| 139 |
+
# Generate cross-language pairs
|
| 140 |
+
output_path = STAGING_DIR / "sinotibetan_cognate_pairs.tsv"
|
| 141 |
+
pair_count = 0
|
| 142 |
+
with open(output_path, "w", encoding="utf-8") as out:
|
| 143 |
+
out.write(HEADER)
|
| 144 |
+
for cogset_id, members in cogsets.items():
|
| 145 |
+
for a, b in combinations(members, 2):
|
| 146 |
+
if a["iso"] == b["iso"]:
|
| 147 |
+
continue
|
| 148 |
+
score = sca_similarity(a["ipa"], b["ipa"])
|
| 149 |
+
out.write(
|
| 150 |
+
f"{a['iso']}\t{a['word']}\t{a['ipa']}\t"
|
| 151 |
+
f"{b['iso']}\t{b['word']}\t{b['ipa']}\t"
|
| 152 |
+
f"{a['concept']}\texpert_cognate\t{score}\tsinotibetan\t"
|
| 153 |
+
f"inherited\t-\tcertain\t{cogset_id}\n"
|
| 154 |
+
)
|
| 155 |
+
pair_count += 1
|
| 156 |
+
|
| 157 |
+
print(f"\n Total pairs: {pair_count:,}")
|
| 158 |
+
print(f" Output: {output_path}")
|
| 159 |
+
print("=" * 60)
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
if __name__ == "__main__":
|
| 163 |
+
main()
|
scripts/extract_wold_borrowings_v2.py
ADDED
|
@@ -0,0 +1,203 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""Extract WOLD borrowing pairs from the authoritative BorrowingTable.
|
| 3 |
+
|
| 4 |
+
Reads sources/wold/cldf/borrowings.csv (21K explicit donor-recipient events)
|
| 5 |
+
instead of fabricating pairs from forms.csv Borrowed column.
|
| 6 |
+
|
| 7 |
+
Output: staging/cognate_pairs/wold_borrowing_pairs.tsv (14-column schema)
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
from __future__ import annotations
|
| 11 |
+
|
| 12 |
+
import csv
|
| 13 |
+
import io
|
| 14 |
+
import re
|
| 15 |
+
import sys
|
| 16 |
+
from pathlib import Path
|
| 17 |
+
|
| 18 |
+
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding="utf-8")
|
| 19 |
+
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding="utf-8")
|
| 20 |
+
|
| 21 |
+
ROOT = Path(__file__).resolve().parent.parent
|
| 22 |
+
sys.path.insert(0, str(ROOT / "cognate_pipeline" / "src"))
|
| 23 |
+
sys.path.insert(0, str(ROOT / "scripts"))
|
| 24 |
+
|
| 25 |
+
from cognate_pipeline.normalise.sound_class import ipa_to_sound_class # noqa: E402
|
| 26 |
+
|
| 27 |
+
SOURCES_DIR = ROOT / "sources" / "wold" / "cldf"
|
| 28 |
+
STAGING_DIR = ROOT / "staging" / "cognate_pairs"
|
| 29 |
+
STAGING_DIR.mkdir(parents=True, exist_ok=True)
|
| 30 |
+
|
| 31 |
+
HEADER = (
|
| 32 |
+
"Lang_A\tWord_A\tIPA_A\tLang_B\tWord_B\tIPA_B\tConcept_ID\t"
|
| 33 |
+
"Relationship\tScore\tSource\tRelation_Detail\tDonor_Language\t"
|
| 34 |
+
"Confidence\tSource_Record_ID\n"
|
| 35 |
+
)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def segments_to_ipa(segments: str) -> str:
|
| 39 |
+
"""Convert CLDF Segments column to IPA string."""
|
| 40 |
+
if not segments:
|
| 41 |
+
return ""
|
| 42 |
+
# Strip boundary markers
|
| 43 |
+
tokens = segments.replace("^", "").replace("$", "").replace("+", " ").replace("#", " ").replace("_", "")
|
| 44 |
+
# Join phoneme tokens
|
| 45 |
+
return re.sub(r"\s+", "", tokens).strip()
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def sca_similarity(ipa_a: str, ipa_b: str) -> float:
|
| 49 |
+
"""Compute normalised Levenshtein similarity on SCA strings."""
|
| 50 |
+
try:
|
| 51 |
+
sca_a = ipa_to_sound_class(ipa_a)
|
| 52 |
+
sca_b = ipa_to_sound_class(ipa_b)
|
| 53 |
+
except Exception:
|
| 54 |
+
return 0.0
|
| 55 |
+
if not sca_a or not sca_b:
|
| 56 |
+
return 0.0
|
| 57 |
+
m, n = len(sca_a), len(sca_b)
|
| 58 |
+
if m == 0 or n == 0:
|
| 59 |
+
return 0.0
|
| 60 |
+
dp = list(range(n + 1))
|
| 61 |
+
for i in range(1, m + 1):
|
| 62 |
+
prev = dp[0]
|
| 63 |
+
dp[0] = i
|
| 64 |
+
for j in range(1, n + 1):
|
| 65 |
+
temp = dp[j]
|
| 66 |
+
if sca_a[i - 1] == sca_b[j - 1]:
|
| 67 |
+
dp[j] = prev
|
| 68 |
+
else:
|
| 69 |
+
dp[j] = 1 + min(prev, dp[j], dp[j - 1])
|
| 70 |
+
prev = temp
|
| 71 |
+
dist = dp[n]
|
| 72 |
+
return round(1.0 - dist / max(m, n), 4)
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def main():
|
| 76 |
+
print("=" * 60)
|
| 77 |
+
print("WOLD Borrowing Extraction v2")
|
| 78 |
+
print("=" * 60)
|
| 79 |
+
|
| 80 |
+
# Step 1: Read languages.csv → Language name → ISO code
|
| 81 |
+
lang_path = SOURCES_DIR / "languages.csv"
|
| 82 |
+
lang_iso = {}
|
| 83 |
+
lang_name_to_iso = {}
|
| 84 |
+
with open(lang_path, "r", encoding="utf-8") as f:
|
| 85 |
+
reader = csv.DictReader(f)
|
| 86 |
+
for row in reader:
|
| 87 |
+
lid = row["ID"]
|
| 88 |
+
iso = row.get("ISO639P3code", "").strip()
|
| 89 |
+
name = row.get("Name", "").strip()
|
| 90 |
+
if iso:
|
| 91 |
+
lang_iso[lid] = iso
|
| 92 |
+
lang_name_to_iso[name] = iso
|
| 93 |
+
print(f" Languages with ISO codes: {len(lang_iso)}")
|
| 94 |
+
|
| 95 |
+
# Step 2: Read parameters.csv → Parameter_ID → concept gloss
|
| 96 |
+
params_path = SOURCES_DIR / "parameters.csv"
|
| 97 |
+
param_concept = {}
|
| 98 |
+
if params_path.exists():
|
| 99 |
+
with open(params_path, "r", encoding="utf-8") as f:
|
| 100 |
+
reader = csv.DictReader(f)
|
| 101 |
+
for row in reader:
|
| 102 |
+
pid = row["ID"]
|
| 103 |
+
concept = row.get("Concepticon_Gloss", row.get("Name", pid)).strip()
|
| 104 |
+
param_concept[pid] = concept
|
| 105 |
+
|
| 106 |
+
# Step 3: Read forms.csv → Form_ID → {language, word, ipa, concept}
|
| 107 |
+
forms_path = SOURCES_DIR / "forms.csv"
|
| 108 |
+
forms = {}
|
| 109 |
+
with open(forms_path, "r", encoding="utf-8") as f:
|
| 110 |
+
reader = csv.DictReader(f)
|
| 111 |
+
for row in reader:
|
| 112 |
+
fid = row["ID"]
|
| 113 |
+
lid = row["Language_ID"]
|
| 114 |
+
iso = lang_iso.get(lid, "")
|
| 115 |
+
if not iso:
|
| 116 |
+
continue
|
| 117 |
+
form = row.get("Form", row.get("Value", "")).strip()
|
| 118 |
+
segments = row.get("Segments", "").strip()
|
| 119 |
+
ipa = segments_to_ipa(segments) if segments else form.lower()
|
| 120 |
+
param_id = row.get("Parameter_ID", "").strip()
|
| 121 |
+
concept = param_concept.get(param_id, param_id)
|
| 122 |
+
forms[fid] = {
|
| 123 |
+
"iso": iso,
|
| 124 |
+
"word": form,
|
| 125 |
+
"ipa": ipa,
|
| 126 |
+
"concept": concept,
|
| 127 |
+
}
|
| 128 |
+
print(f" Forms loaded: {len(forms)}")
|
| 129 |
+
|
| 130 |
+
# Step 4: Read borrowings.csv → generate pairs
|
| 131 |
+
borrowings_path = SOURCES_DIR / "borrowings.csv"
|
| 132 |
+
output_path = STAGING_DIR / "wold_borrowing_pairs.tsv"
|
| 133 |
+
pair_count = 0
|
| 134 |
+
skipped_no_target = 0
|
| 135 |
+
skipped_no_source = 0
|
| 136 |
+
|
| 137 |
+
with open(output_path, "w", encoding="utf-8") as out:
|
| 138 |
+
out.write(HEADER)
|
| 139 |
+
with open(borrowings_path, "r", encoding="utf-8") as f:
|
| 140 |
+
reader = csv.DictReader(f)
|
| 141 |
+
for row in reader:
|
| 142 |
+
borrowing_id = row["ID"]
|
| 143 |
+
target_fid = row.get("Target_Form_ID", "").strip()
|
| 144 |
+
source_fid = row.get("Source_Form_ID", "").strip()
|
| 145 |
+
source_word = row.get("Source_word", "").strip()
|
| 146 |
+
source_lang = row.get("Source_languoid", "").strip()
|
| 147 |
+
source_certain = row.get("Source_certain", "").strip()
|
| 148 |
+
source_relation = row.get("Source_relation", "").strip()
|
| 149 |
+
|
| 150 |
+
# Target form is required
|
| 151 |
+
target = forms.get(target_fid)
|
| 152 |
+
if target is None:
|
| 153 |
+
skipped_no_target += 1
|
| 154 |
+
continue
|
| 155 |
+
|
| 156 |
+
# Source can come from Source_Form_ID or Source_word
|
| 157 |
+
if source_fid and source_fid in forms:
|
| 158 |
+
source = forms[source_fid]
|
| 159 |
+
source_iso = source["iso"]
|
| 160 |
+
source_word_str = source["word"]
|
| 161 |
+
source_ipa = source["ipa"]
|
| 162 |
+
elif source_word:
|
| 163 |
+
# Source form not in database — use Source_word + Source_languoid
|
| 164 |
+
source_iso = lang_name_to_iso.get(source_lang, "-")
|
| 165 |
+
source_word_str = source_word
|
| 166 |
+
source_ipa = source_word.lower() # best-effort pseudo-IPA
|
| 167 |
+
else:
|
| 168 |
+
skipped_no_source += 1
|
| 169 |
+
continue
|
| 170 |
+
|
| 171 |
+
# Donor language
|
| 172 |
+
donor_lang = source_iso if source_iso != "-" else source_lang
|
| 173 |
+
|
| 174 |
+
# Confidence
|
| 175 |
+
confidence = "certain" if source_certain == "yes" else (
|
| 176 |
+
"uncertain" if source_certain == "no" else source_certain if source_certain else "-"
|
| 177 |
+
)
|
| 178 |
+
|
| 179 |
+
# Score
|
| 180 |
+
score = sca_similarity(target["ipa"], source_ipa)
|
| 181 |
+
|
| 182 |
+
# Filter self-loans (same language borrowing from itself)
|
| 183 |
+
if target["iso"] == source_iso:
|
| 184 |
+
continue
|
| 185 |
+
|
| 186 |
+
# Lang_A = target (borrower), Lang_B = source (donor)
|
| 187 |
+
out.write(
|
| 188 |
+
f"{target['iso']}\t{target['word']}\t{target['ipa']}\t"
|
| 189 |
+
f"{source_iso}\t{source_word_str}\t{source_ipa}\t"
|
| 190 |
+
f"{target['concept']}\tborrowing\t{score}\twold\t"
|
| 191 |
+
f"borrowed\t{donor_lang}\t{confidence}\twold_{borrowing_id}\n"
|
| 192 |
+
)
|
| 193 |
+
pair_count += 1
|
| 194 |
+
|
| 195 |
+
print(f"\n Total borrowing pairs: {pair_count:,}")
|
| 196 |
+
print(f" Skipped (no target form): {skipped_no_target}")
|
| 197 |
+
print(f" Skipped (no source info): {skipped_no_source}")
|
| 198 |
+
print(f" Output: {output_path}")
|
| 199 |
+
print("=" * 60)
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
if __name__ == "__main__":
|
| 203 |
+
main()
|
scripts/merge_cognate_pairs.py
ADDED
|
@@ -0,0 +1,179 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""Merge all staging cognate pair files into final output.
|
| 3 |
+
|
| 4 |
+
Deduplicates pairs (priority: expert > borrowing > concept_aligned > similarity).
|
| 5 |
+
Writes 3 output files with 14-column schema.
|
| 6 |
+
|
| 7 |
+
Output: data/training/cognate_pairs/cognate_pairs_{inherited,borrowing,similarity}.tsv
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
from __future__ import annotations
|
| 11 |
+
|
| 12 |
+
import io
|
| 13 |
+
import sys
|
| 14 |
+
from pathlib import Path
|
| 15 |
+
|
| 16 |
+
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding="utf-8")
|
| 17 |
+
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding="utf-8")
|
| 18 |
+
|
| 19 |
+
ROOT = Path(__file__).resolve().parent.parent
|
| 20 |
+
STAGING_DIR = ROOT / "staging" / "cognate_pairs"
|
| 21 |
+
OUTPUT_DIR = ROOT / "data" / "training" / "cognate_pairs"
|
| 22 |
+
OUTPUT_DIR.mkdir(parents=True, exist_ok=True)
|
| 23 |
+
|
| 24 |
+
HEADER = (
|
| 25 |
+
"Lang_A\tWord_A\tIPA_A\tLang_B\tWord_B\tIPA_B\tConcept_ID\t"
|
| 26 |
+
"Relationship\tScore\tSource\tRelation_Detail\tDonor_Language\t"
|
| 27 |
+
"Confidence\tSource_Record_ID\n"
|
| 28 |
+
)
|
| 29 |
+
|
| 30 |
+
# Priority order (lower = higher priority)
|
| 31 |
+
PRIORITY = {
|
| 32 |
+
"expert_cognate": 0,
|
| 33 |
+
"borrowing": 1,
|
| 34 |
+
"concept_aligned": 2,
|
| 35 |
+
"similarity_only": 3,
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def pair_key(lang_a: str, word_a: str, lang_b: str, word_b: str, concept: str) -> str:
|
| 40 |
+
"""Canonical pair key for deduplication (order-independent)."""
|
| 41 |
+
side_a = f"{lang_a}|{word_a}"
|
| 42 |
+
side_b = f"{lang_b}|{word_b}"
|
| 43 |
+
if side_a > side_b:
|
| 44 |
+
side_a, side_b = side_b, side_a
|
| 45 |
+
return f"{side_a}||{side_b}||{concept}"
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def main():
|
| 49 |
+
print("=" * 60)
|
| 50 |
+
print("Cognate Pairs Merge")
|
| 51 |
+
print("=" * 60)
|
| 52 |
+
|
| 53 |
+
# Collect all staging files
|
| 54 |
+
staging_files = sorted(STAGING_DIR.glob("*.tsv"))
|
| 55 |
+
print(f" Staging files found: {len(staging_files)}")
|
| 56 |
+
for sf in staging_files:
|
| 57 |
+
print(f" {sf.name}")
|
| 58 |
+
|
| 59 |
+
if not staging_files:
|
| 60 |
+
print("ERROR: No staging files found.")
|
| 61 |
+
sys.exit(1)
|
| 62 |
+
|
| 63 |
+
# Phase 1: Read all staging files, deduplicate by pair key
|
| 64 |
+
# For memory efficiency with large files, we stream-process
|
| 65 |
+
seen_pairs: dict[str, int] = {} # pair_key → priority
|
| 66 |
+
inherited_path = OUTPUT_DIR / "cognate_pairs_inherited.tsv"
|
| 67 |
+
borrowing_path = OUTPUT_DIR / "cognate_pairs_borrowing.tsv"
|
| 68 |
+
similarity_path = OUTPUT_DIR / "cognate_pairs_similarity.tsv"
|
| 69 |
+
|
| 70 |
+
# First pass: collect all pair keys and their best priority
|
| 71 |
+
print("\n Pass 1: Scanning for duplicates...")
|
| 72 |
+
total_input = 0
|
| 73 |
+
for sf in staging_files:
|
| 74 |
+
with open(sf, "r", encoding="utf-8") as f:
|
| 75 |
+
header = f.readline()
|
| 76 |
+
for line in f:
|
| 77 |
+
total_input += 1
|
| 78 |
+
parts = line.rstrip("\n").split("\t")
|
| 79 |
+
if len(parts) < 8:
|
| 80 |
+
continue
|
| 81 |
+
lang_a, word_a = parts[0], parts[1]
|
| 82 |
+
lang_b, word_b = parts[3], parts[4]
|
| 83 |
+
concept = parts[6]
|
| 84 |
+
relationship = parts[7]
|
| 85 |
+
key = pair_key(lang_a, word_a, lang_b, word_b, concept)
|
| 86 |
+
prio = PRIORITY.get(relationship, 99)
|
| 87 |
+
if key not in seen_pairs or prio < seen_pairs[key]:
|
| 88 |
+
seen_pairs[key] = prio
|
| 89 |
+
print(f" Total input rows: {total_input:,}")
|
| 90 |
+
print(f" Unique pairs: {len(seen_pairs):,}")
|
| 91 |
+
|
| 92 |
+
# Second pass: write output files, keeping only best-priority entries
|
| 93 |
+
print("\n Pass 2: Writing output files...")
|
| 94 |
+
written_keys: set[str] = set()
|
| 95 |
+
# Track (lang_pair, concept) combos that appear in inherited/borrowing
|
| 96 |
+
# to prevent the same language-concept pair from also appearing in similarity
|
| 97 |
+
inherited_lang_concepts: set[str] = set()
|
| 98 |
+
counts = {"inherited": 0, "borrowing": 0, "similarity": 0}
|
| 99 |
+
self_pair_skips = 0
|
| 100 |
+
|
| 101 |
+
with open(inherited_path, "w", encoding="utf-8") as f_inh, \
|
| 102 |
+
open(borrowing_path, "w", encoding="utf-8") as f_bor, \
|
| 103 |
+
open(similarity_path, "w", encoding="utf-8") as f_sim:
|
| 104 |
+
f_inh.write(HEADER)
|
| 105 |
+
f_bor.write(HEADER)
|
| 106 |
+
f_sim.write(HEADER)
|
| 107 |
+
|
| 108 |
+
for sf in staging_files:
|
| 109 |
+
with open(sf, "r", encoding="utf-8") as f:
|
| 110 |
+
header = f.readline()
|
| 111 |
+
for line in f:
|
| 112 |
+
parts = line.rstrip("\n").split("\t")
|
| 113 |
+
if len(parts) < 8:
|
| 114 |
+
continue
|
| 115 |
+
lang_a, word_a = parts[0], parts[1]
|
| 116 |
+
lang_b, word_b = parts[3], parts[4]
|
| 117 |
+
concept = parts[6]
|
| 118 |
+
relationship = parts[7]
|
| 119 |
+
|
| 120 |
+
# Skip self-pairs (same language)
|
| 121 |
+
if lang_a == lang_b:
|
| 122 |
+
self_pair_skips += 1
|
| 123 |
+
continue
|
| 124 |
+
|
| 125 |
+
key = pair_key(lang_a, word_a, lang_b, word_b, concept)
|
| 126 |
+
|
| 127 |
+
# Skip if already written (dedup)
|
| 128 |
+
if key in written_keys:
|
| 129 |
+
continue
|
| 130 |
+
|
| 131 |
+
# Only write if this is the best-priority entry
|
| 132 |
+
prio = PRIORITY.get(relationship, 99)
|
| 133 |
+
if seen_pairs.get(key, 99) != prio:
|
| 134 |
+
continue
|
| 135 |
+
|
| 136 |
+
# Language-concept key for cross-file dedup
|
| 137 |
+
lc_a, lc_b = sorted([lang_a, lang_b])
|
| 138 |
+
lang_concept_key = f"{lc_a}||{lc_b}||{concept}"
|
| 139 |
+
|
| 140 |
+
written_keys.add(key)
|
| 141 |
+
|
| 142 |
+
# Route to correct output file
|
| 143 |
+
if relationship == "expert_cognate":
|
| 144 |
+
f_inh.write(line)
|
| 145 |
+
counts["inherited"] += 1
|
| 146 |
+
inherited_lang_concepts.add(lang_concept_key)
|
| 147 |
+
elif relationship == "borrowing":
|
| 148 |
+
f_bor.write(line)
|
| 149 |
+
counts["borrowing"] += 1
|
| 150 |
+
elif relationship == "concept_aligned":
|
| 151 |
+
f_inh.write(line)
|
| 152 |
+
counts["inherited"] += 1
|
| 153 |
+
inherited_lang_concepts.add(lang_concept_key)
|
| 154 |
+
elif relationship == "similarity_only":
|
| 155 |
+
# Skip if this language-concept combo already
|
| 156 |
+
# has an inherited/expert pair (prevents cross-file
|
| 157 |
+
# contamination)
|
| 158 |
+
if lang_concept_key in inherited_lang_concepts:
|
| 159 |
+
continue
|
| 160 |
+
f_sim.write(line)
|
| 161 |
+
counts["similarity"] += 1
|
| 162 |
+
|
| 163 |
+
total_written = sum(counts.values())
|
| 164 |
+
if total_written % 1000000 == 0:
|
| 165 |
+
print(f" ... {total_written:,} pairs written")
|
| 166 |
+
|
| 167 |
+
print(f"\n Output files:")
|
| 168 |
+
print(f" inherited: {counts['inherited']:,}")
|
| 169 |
+
print(f" borrowing: {counts['borrowing']:,}")
|
| 170 |
+
print(f" similarity: {counts['similarity']:,}")
|
| 171 |
+
print(f" TOTAL: {sum(counts.values()):,}")
|
| 172 |
+
print(f"\n Deduplicated: {total_input - sum(counts.values()):,} pairs removed")
|
| 173 |
+
if self_pair_skips:
|
| 174 |
+
print(f" Self-pairs skipped (Lang_A == Lang_B): {self_pair_skips:,}")
|
| 175 |
+
print("=" * 60)
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
if __name__ == "__main__":
|
| 179 |
+
main()
|
scripts/rebuild_concept_aligned_pairs.py
ADDED
|
@@ -0,0 +1,178 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""Rebuild concept-aligned pairs from internal lexicons.
|
| 3 |
+
|
| 4 |
+
Fixes: labels as 'concept_aligned' (not 'cognate_inherited'),
|
| 5 |
+
uses random sampling instead of file-sort truncation.
|
| 6 |
+
|
| 7 |
+
Output: staging/cognate_pairs/concept_aligned_pairs.tsv (14-column schema)
|
| 8 |
+
staging/cognate_pairs/similarity_only_pairs.tsv (14-column schema)
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
from __future__ import annotations
|
| 12 |
+
|
| 13 |
+
import io
|
| 14 |
+
import json
|
| 15 |
+
import random
|
| 16 |
+
import sys
|
| 17 |
+
from collections import defaultdict
|
| 18 |
+
from itertools import combinations
|
| 19 |
+
from pathlib import Path
|
| 20 |
+
|
| 21 |
+
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding="utf-8")
|
| 22 |
+
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding="utf-8")
|
| 23 |
+
|
| 24 |
+
ROOT = Path(__file__).resolve().parent.parent
|
| 25 |
+
sys.path.insert(0, str(ROOT / "cognate_pipeline" / "src"))
|
| 26 |
+
sys.path.insert(0, str(ROOT / "scripts"))
|
| 27 |
+
|
| 28 |
+
from cognate_pipeline.normalise.sound_class import ipa_to_sound_class # noqa: E402
|
| 29 |
+
|
| 30 |
+
LEXICON_DIR = ROOT / "data" / "training" / "lexicons"
|
| 31 |
+
FAMILY_MAP_PATH = ROOT / "cognate_pipeline" / "src" / "cognate_pipeline" / "cognate" / "family_map.json"
|
| 32 |
+
STAGING_DIR = ROOT / "staging" / "cognate_pairs"
|
| 33 |
+
STAGING_DIR.mkdir(parents=True, exist_ok=True)
|
| 34 |
+
|
| 35 |
+
HEADER = (
|
| 36 |
+
"Lang_A\tWord_A\tIPA_A\tLang_B\tWord_B\tIPA_B\tConcept_ID\t"
|
| 37 |
+
"Relationship\tScore\tSource\tRelation_Detail\tDonor_Language\t"
|
| 38 |
+
"Confidence\tSource_Record_ID\n"
|
| 39 |
+
)
|
| 40 |
+
|
| 41 |
+
MAX_GROUP_SIZE = 50
|
| 42 |
+
SEED = 42
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def sca_similarity(sca_a: str, sca_b: str) -> float:
|
| 46 |
+
"""Compute normalised Levenshtein similarity on pre-computed SCA strings."""
|
| 47 |
+
if not sca_a or not sca_b:
|
| 48 |
+
return 0.0
|
| 49 |
+
m, n = len(sca_a), len(sca_b)
|
| 50 |
+
if m == 0 or n == 0:
|
| 51 |
+
return 0.0
|
| 52 |
+
dp = list(range(n + 1))
|
| 53 |
+
for i in range(1, m + 1):
|
| 54 |
+
prev = dp[0]
|
| 55 |
+
dp[0] = i
|
| 56 |
+
for j in range(1, n + 1):
|
| 57 |
+
temp = dp[j]
|
| 58 |
+
if sca_a[i - 1] == sca_b[j - 1]:
|
| 59 |
+
dp[j] = prev
|
| 60 |
+
else:
|
| 61 |
+
dp[j] = 1 + min(prev, dp[j], dp[j - 1])
|
| 62 |
+
prev = temp
|
| 63 |
+
dist = dp[n]
|
| 64 |
+
return round(1.0 - dist / max(m, n), 4)
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def main():
|
| 68 |
+
print("=" * 60)
|
| 69 |
+
print("Concept-Aligned Pairs Rebuild")
|
| 70 |
+
print("=" * 60)
|
| 71 |
+
|
| 72 |
+
# Load family map
|
| 73 |
+
if not FAMILY_MAP_PATH.exists():
|
| 74 |
+
print(f"ERROR: family_map.json not found at {FAMILY_MAP_PATH}")
|
| 75 |
+
sys.exit(1)
|
| 76 |
+
|
| 77 |
+
with open(FAMILY_MAP_PATH, "r", encoding="utf-8") as f:
|
| 78 |
+
family_map = json.load(f)
|
| 79 |
+
print(f" Family map: {len(family_map)} languages")
|
| 80 |
+
|
| 81 |
+
# Collect all lexicon entries grouped by (family, concept)
|
| 82 |
+
groups: dict[tuple[str, str], list[dict]] = defaultdict(list)
|
| 83 |
+
total_entries = 0
|
| 84 |
+
files_read = 0
|
| 85 |
+
|
| 86 |
+
for tsv_path in sorted(LEXICON_DIR.glob("*.tsv")):
|
| 87 |
+
iso = tsv_path.stem
|
| 88 |
+
family = family_map.get(iso, "unknown")
|
| 89 |
+
if family == "unknown":
|
| 90 |
+
continue
|
| 91 |
+
|
| 92 |
+
with open(tsv_path, "r", encoding="utf-8") as f:
|
| 93 |
+
lines = f.readlines()
|
| 94 |
+
|
| 95 |
+
if not lines or not lines[0].startswith("Word\t"):
|
| 96 |
+
continue
|
| 97 |
+
|
| 98 |
+
files_read += 1
|
| 99 |
+
for line in lines[1:]:
|
| 100 |
+
line = line.rstrip("\n\r")
|
| 101 |
+
if not line.strip():
|
| 102 |
+
continue
|
| 103 |
+
parts = line.split("\t")
|
| 104 |
+
if len(parts) < 6:
|
| 105 |
+
continue
|
| 106 |
+
word, ipa, sca, source, concept_id, cog_set = parts[:6]
|
| 107 |
+
if not sca or sca == "-" or not concept_id or concept_id == "-":
|
| 108 |
+
continue
|
| 109 |
+
# Fix Sino-Tibetan lexicons where Word column contains concept
|
| 110 |
+
# gloss instead of actual word form (IPA column has the form)
|
| 111 |
+
if word == concept_id and ipa and ipa != "-":
|
| 112 |
+
word = ipa
|
| 113 |
+
groups[(family, concept_id)].append({
|
| 114 |
+
"iso": iso,
|
| 115 |
+
"word": word,
|
| 116 |
+
"ipa": ipa,
|
| 117 |
+
"sca": sca,
|
| 118 |
+
"concept": concept_id,
|
| 119 |
+
"family": family,
|
| 120 |
+
})
|
| 121 |
+
total_entries += 1
|
| 122 |
+
|
| 123 |
+
print(f" Files read: {files_read}")
|
| 124 |
+
print(f" Total entries: {total_entries:,}")
|
| 125 |
+
print(f" Groups (family, concept): {len(groups):,}")
|
| 126 |
+
|
| 127 |
+
# Generate pairs
|
| 128 |
+
rng = random.Random(SEED)
|
| 129 |
+
aligned_path = STAGING_DIR / "concept_aligned_pairs.tsv"
|
| 130 |
+
similarity_path = STAGING_DIR / "similarity_only_pairs.tsv"
|
| 131 |
+
aligned_count = 0
|
| 132 |
+
similarity_count = 0
|
| 133 |
+
|
| 134 |
+
with open(aligned_path, "w", encoding="utf-8") as f_aligned, \
|
| 135 |
+
open(similarity_path, "w", encoding="utf-8") as f_sim:
|
| 136 |
+
f_aligned.write(HEADER)
|
| 137 |
+
f_sim.write(HEADER)
|
| 138 |
+
|
| 139 |
+
for (family, concept), members in groups.items():
|
| 140 |
+
# Random sampling for large groups (fixes file-sort bias)
|
| 141 |
+
if len(members) > MAX_GROUP_SIZE:
|
| 142 |
+
members = rng.sample(members, MAX_GROUP_SIZE)
|
| 143 |
+
|
| 144 |
+
for a, b in combinations(members, 2):
|
| 145 |
+
if a["iso"] == b["iso"]:
|
| 146 |
+
continue
|
| 147 |
+
score = sca_similarity(a["sca"], b["sca"])
|
| 148 |
+
source = f"concept_align_{family}"
|
| 149 |
+
|
| 150 |
+
if score >= 0.5:
|
| 151 |
+
f_aligned.write(
|
| 152 |
+
f"{a['iso']}\t{a['word']}\t{a['ipa']}\t"
|
| 153 |
+
f"{b['iso']}\t{b['word']}\t{b['ipa']}\t"
|
| 154 |
+
f"{concept}\tconcept_aligned\t{score}\t{source}\t"
|
| 155 |
+
f"-\t-\t-\t-\n"
|
| 156 |
+
)
|
| 157 |
+
aligned_count += 1
|
| 158 |
+
elif score >= 0.3:
|
| 159 |
+
f_sim.write(
|
| 160 |
+
f"{a['iso']}\t{a['word']}\t{a['ipa']}\t"
|
| 161 |
+
f"{b['iso']}\t{b['word']}\t{b['ipa']}\t"
|
| 162 |
+
f"{concept}\tsimilarity_only\t{score}\t{source}\t"
|
| 163 |
+
f"-\t-\t-\t-\n"
|
| 164 |
+
)
|
| 165 |
+
similarity_count += 1
|
| 166 |
+
|
| 167 |
+
if (aligned_count + similarity_count) % 1000000 == 0:
|
| 168 |
+
print(f" ... {aligned_count + similarity_count:,} pairs")
|
| 169 |
+
|
| 170 |
+
print(f"\n Concept-aligned pairs (score >= 0.5): {aligned_count:,}")
|
| 171 |
+
print(f" Similarity-only pairs (0.3-0.5): {similarity_count:,}")
|
| 172 |
+
print(f" Output: {aligned_path}")
|
| 173 |
+
print(f" Output: {similarity_path}")
|
| 174 |
+
print("=" * 60)
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
if __name__ == "__main__":
|
| 178 |
+
main()
|
scripts/transliteration_maps.py
CHANGED
|
@@ -1564,8 +1564,8 @@ FALISCAN_MAP: Dict[str, str] = {
|
|
| 1564 |
"v": "w",
|
| 1565 |
# <y> as semivowel
|
| 1566 |
"y": "j",
|
| 1567 |
-
#
|
| 1568 |
-
|
| 1569 |
}
|
| 1570 |
|
| 1571 |
# ---------------------------------------------------------------------------
|
|
@@ -1671,7 +1671,8 @@ PROTO_GERMANIC_MAP: Dict[str, str] = {
|
|
| 1671 |
# Consonants
|
| 1672 |
"þ": "θ", "ð": "ð",
|
| 1673 |
"hw": "xʷ",
|
| 1674 |
-
#
|
|
|
|
| 1675 |
}
|
| 1676 |
|
| 1677 |
# ---------------------------------------------------------------------------
|
|
@@ -1682,11 +1683,13 @@ PROTO_CELTIC_MAP: Dict[str, str] = {
|
|
| 1682 |
# Long vowels
|
| 1683 |
"ā": "aː", "ē": "eː", "ī": "iː", "ō": "oː", "ū": "uː",
|
| 1684 |
# Labiovelar
|
| 1685 |
-
"kʷ": "kʷ", "gʷ": "
|
| 1686 |
# Aspirated (from PIE)
|
| 1687 |
-
"bʰ": "bʰ", "dʰ": "dʰ", "gʰ": "
|
| 1688 |
# Laryngeals sometimes preserved in notation
|
| 1689 |
"x": "x",
|
|
|
|
|
|
|
| 1690 |
}
|
| 1691 |
|
| 1692 |
# ---------------------------------------------------------------------------
|
|
@@ -1718,7 +1721,7 @@ OLD_JAPANESE_MAP: Dict[str, str] = {
|
|
| 1718 |
# ONCOJ conventions → IPA
|
| 1719 |
"py": "pʲ", "ky": "kʲ", "sy": "ɕ", "ty": "tɕ", "ny": "ɲ",
|
| 1720 |
"my": "mʲ", "ry": "ɾʲ",
|
| 1721 |
-
"p": "p", "b": "b", "t": "t", "d": "d", "k": "k", "g": "
|
| 1722 |
"s": "s", "z": "z", "m": "m", "n": "n", "r": "ɾ",
|
| 1723 |
"w": "w", "y": "j",
|
| 1724 |
# Long vowels (ONCOJ sometimes marks with macron)
|
|
@@ -1742,7 +1745,7 @@ MIDDLE_PERSIAN_MAP: Dict[str, str] = {
|
|
| 1742 |
"ẏ": "j", "ẇ": "w", "ṯ": "θ", "δ": "ð",
|
| 1743 |
"ʾ": "ʔ",
|
| 1744 |
# Standard (identity-like)
|
| 1745 |
-
"b": "b", "d": "d", "f": "f", "g": "
|
| 1746 |
"j": "dʒ", "k": "k", "l": "l", "m": "m", "n": "n",
|
| 1747 |
"p": "p", "r": "r", "s": "s", "t": "t", "w": "w", "y": "j", "z": "z",
|
| 1748 |
}
|
|
@@ -1762,7 +1765,7 @@ SOGDIAN_MAP: Dict[str, str] = {
|
|
| 1762 |
"ṯ": "θ", "ʾ": "ʔ",
|
| 1763 |
"ny": "ɲ", "ng": "ŋ",
|
| 1764 |
# Standard
|
| 1765 |
-
"b": "b", "d": "d", "f": "f", "g": "
|
| 1766 |
"j": "dʒ", "k": "k", "l": "l", "m": "m", "n": "n",
|
| 1767 |
"p": "p", "r": "r", "s": "s", "t": "t", "w": "w", "y": "j", "z": "z",
|
| 1768 |
}
|
|
@@ -1779,7 +1782,7 @@ GAULISH_MAP: Dict[str, str] = {
|
|
| 1779 |
"χ": "x", "ð": "ð", "θ": "θ",
|
| 1780 |
"ā": "aː", "ē": "eː", "ī": "iː", "ō": "oː", "ū": "uː",
|
| 1781 |
# Standard
|
| 1782 |
-
"b": "b", "d": "d", "g": "
|
| 1783 |
"n": "n", "p": "p", "r": "r", "s": "s", "t": "t", "w": "w",
|
| 1784 |
"a": "a", "e": "e", "i": "i", "o": "o", "u": "u",
|
| 1785 |
"x": "x",
|
|
@@ -1796,7 +1799,7 @@ GAULISH_MAP: Dict[str, str] = {
|
|
| 1796 |
LEPONTIC_MAP: Dict[str, str] = {
|
| 1797 |
# Geminates / long consonants (must precede singles for greedy match)
|
| 1798 |
"pp": "pː", "bb": "bː", "tt": "tː", "dd": "dː",
|
| 1799 |
-
"kk": "kː", "gg": "
|
| 1800 |
"mm": "mː", "nn": "nː", "ll": "lː", "rr": "rː",
|
| 1801 |
# Affricate
|
| 1802 |
"ts": "ts",
|
|
@@ -1817,7 +1820,7 @@ LEPONTIC_MAP: Dict[str, str] = {
|
|
| 1817 |
"i̯": "j", # palatal glide (U+0069 U+032F)
|
| 1818 |
"u̯": "w", # labial glide (U+0075 U+032F)
|
| 1819 |
# Stops
|
| 1820 |
-
"p": "p", "b": "b", "t": "t", "d": "d", "k": "k", "g": "
|
| 1821 |
"q": "kʷ", # labiovelar (rare, archaic)
|
| 1822 |
# Sonorants
|
| 1823 |
"m": "m", "n": "n", "l": "l", "r": "r",
|
|
@@ -1866,7 +1869,7 @@ PROTO_SINO_TIBETAN_MAP: Dict[str, str] = {
|
|
| 1866 |
"ʔ": "ʔ", "ŋ": "ŋ", "ɲ": "ɲ",
|
| 1867 |
"ā": "aː", "ē": "eː", "ī": "iː", "ō": "oː", "ū": "uː",
|
| 1868 |
# Standard
|
| 1869 |
-
"b": "b", "d": "d", "g": "
|
| 1870 |
"n": "n", "p": "p", "r": "r", "s": "s", "t": "t", "w": "w", "y": "j", "z": "z",
|
| 1871 |
"a": "a", "e": "e", "i": "i", "o": "o", "u": "u",
|
| 1872 |
"h": "h",
|
|
@@ -1898,7 +1901,7 @@ PROTO_SLAVIC_MAP: Dict[str, str] = {
|
|
| 1898 |
# Affricates
|
| 1899 |
"c": "ts",
|
| 1900 |
# Standard consonants (identity)
|
| 1901 |
-
"b": "b", "d": "d", "g": "
|
| 1902 |
"n": "n", "p": "p", "r": "r", "s": "s", "t": "t", "v": "v",
|
| 1903 |
"z": "z", "x": "x", "j": "j",
|
| 1904 |
# Vowels
|
|
@@ -1929,7 +1932,7 @@ PROTO_TURKIC_MAP: Dict[str, str] = {
|
|
| 1929 |
# Velar fricative
|
| 1930 |
"x": "x",
|
| 1931 |
# Standard
|
| 1932 |
-
"b": "b", "d": "d", "g": "
|
| 1933 |
"n": "n", "p": "p", "r": "r", "s": "s", "t": "t", "y": "j", "z": "z",
|
| 1934 |
"a": "a", "e": "e", "i": "i", "o": "o", "u": "u",
|
| 1935 |
}
|
|
@@ -1944,16 +1947,16 @@ PROTO_ITALIC_MAP: Dict[str, str] = {
|
|
| 1944 |
# Long vowels
|
| 1945 |
"ā": "aː", "ē": "eː", "ī": "iː", "ō": "oː", "ū": "uː",
|
| 1946 |
# Labiovelars
|
| 1947 |
-
"kʷ": "kʷ", "gʷ": "
|
| 1948 |
# Aspirates (from PIE)
|
| 1949 |
-
"bʰ": "bʰ", "dʰ": "dʰ", "gʰ": "
|
| 1950 |
# Fricatives
|
| 1951 |
"θ": "θ", "ð": "ð", "β": "β",
|
| 1952 |
"f": "f",
|
| 1953 |
# Already-IPA characters that appear in Wiktionary notation
|
| 1954 |
"ɣ": "ɣ", "ə": "ə",
|
| 1955 |
# Standard
|
| 1956 |
-
"b": "b", "d": "d", "g": "
|
| 1957 |
"n": "n", "p": "p", "r": "r", "s": "s", "t": "t", "w": "w",
|
| 1958 |
"j": "j", "h": "h", "z": "z",
|
| 1959 |
"a": "a", "e": "e", "i": "i", "o": "o", "u": "u",
|
|
@@ -1971,7 +1974,7 @@ PROTO_JAPONIC_MAP: Dict[str, str] = {
|
|
| 1971 |
"py": "pʲ", "ky": "kʲ", "ty": "tʲ", "ny": "ɲ",
|
| 1972 |
"my": "mʲ", "ry": "ɾʲ", "sy": "ɕ",
|
| 1973 |
# Standard consonants
|
| 1974 |
-
"p": "p", "b": "b", "t": "t", "d": "d", "k": "k", "g": "
|
| 1975 |
"s": "s", "z": "z", "m": "m", "n": "n", "r": "ɾ",
|
| 1976 |
"w": "w", "y": "j", "h": "h",
|
| 1977 |
# Long vowels
|
|
@@ -2013,7 +2016,7 @@ PROTO_IRANIAN_MAP: Dict[str, str] = {
|
|
| 2013 |
# Uppercase passthrough (occasional in reconstructions)
|
| 2014 |
"B": "b", "C": "ts", "W": "w",
|
| 2015 |
# Standard consonants
|
| 2016 |
-
"b": "b", "d": "d", "f": "f", "g": "
|
| 2017 |
"j": "j", "k": "k", "l": "l", "m": "m", "n": "n",
|
| 2018 |
"p": "p", "r": "r", "s": "s", "t": "t", "w": "w", "z": "z",
|
| 2019 |
# Vowels
|
|
@@ -2037,7 +2040,7 @@ CELTIBERIAN_MAP: Dict[str, str] = {
|
|
| 2037 |
# Digraphs
|
| 2038 |
"rs": "rs", "st": "st",
|
| 2039 |
# Standard Celtic consonants
|
| 2040 |
-
"b": "b", "d": "d", "g": "
|
| 2041 |
"n": "n", "p": "p", "r": "r", "s": "s", "t": "t", "w": "w",
|
| 2042 |
"a": "a", "e": "e", "i": "i", "o": "o", "u": "u",
|
| 2043 |
"z": "z",
|
|
@@ -2062,7 +2065,7 @@ ANCIENT_SOUTH_ARABIAN_MAP: Dict[str, str] = {
|
|
| 2062 |
"s\u00b9": "s", "s\u00b2": "\u026c", "s\u00b3": "ts",
|
| 2063 |
"\u015b": "\u026c", # alternative notation for s2
|
| 2064 |
# Standard consonants
|
| 2065 |
-
"b": "b", "d": "d", "f": "f", "g": "
|
| 2066 |
"k": "k", "l": "l", "m": "m", "n": "n",
|
| 2067 |
"q": "q", "r": "r", "s": "s", "t": "t",
|
| 2068 |
"w": "w", "y": "j", "z": "z",
|
|
@@ -2174,7 +2177,7 @@ PROTO_AUSTROASIATIC_MAP: Dict[str, str] = {
|
|
| 2174 |
# Open/mid vowels
|
| 2175 |
"ɔ": "ɔ", "ɛ": "ɛ", "ə": "ə", "ɨ": "ɨ",
|
| 2176 |
# Standard consonants
|
| 2177 |
-
"b": "b", "c": "c", "d": "d", "g": "
|
| 2178 |
"j": "j", "k": "k", "l": "l", "m": "m", "n": "n",
|
| 2179 |
"p": "p", "r": "r", "s": "s", "t": "t", "w": "w",
|
| 2180 |
# Vowels
|
|
@@ -2199,7 +2202,7 @@ PROTO_POLYNESIAN_MAP: Dict[str, str] = {
|
|
| 2199 |
# Standard consonants (small inventory)
|
| 2200 |
"f": "f", "h": "h", "k": "k", "l": "l", "m": "m",
|
| 2201 |
"n": "n", "p": "p", "r": "r", "s": "s", "t": "t",
|
| 2202 |
-
"w": "w", "q": "
|
| 2203 |
# Vowels
|
| 2204 |
"a": "a", "e": "e", "i": "i", "o": "o", "u": "u",
|
| 2205 |
}
|
|
@@ -2223,16 +2226,15 @@ PROTO_TAI_MAP: Dict[str, str] = {
|
|
| 2223 |
"ŋ": "ŋ",
|
| 2224 |
# Palatal stop / fricative
|
| 2225 |
"ɟ": "ɟ", "ɕ": "ɕ", "ɲ": "ɲ",
|
| 2226 |
-
#
|
| 2227 |
-
"
|
| 2228 |
# Velar/uvular fricatives
|
| 2229 |
"ɣ": "ɣ", "χ": "χ",
|
| 2230 |
# Close-mid back unrounded vowel
|
| 2231 |
"ɤ": "ɤ",
|
| 2232 |
# Velar approximant
|
| 2233 |
"ɰ": "ɰ",
|
| 2234 |
-
# Glottal stop (modifier form)
|
| 2235 |
-
"ʔ": "ʔ", "ˀ": "ʔ",
|
| 2236 |
# Schwa
|
| 2237 |
"ə": "ə",
|
| 2238 |
# Long vowels
|
|
@@ -2250,7 +2252,7 @@ PROTO_TAI_MAP: Dict[str, str] = {
|
|
| 2250 |
# Combining diacritics (voiceless/voiced ring below — passthrough)
|
| 2251 |
"\u0325": "\u0325", "\u0329": "\u0329", "\u032C": "\u032C",
|
| 2252 |
# Standard consonants
|
| 2253 |
-
"b": "b", "c": "c", "d": "d", "f": "f", "g": "
|
| 2254 |
"h": "h", "j": "j", "k": "k", "l": "l", "m": "m",
|
| 2255 |
"n": "n", "p": "p", "r": "r", "s": "s", "t": "t",
|
| 2256 |
"v": "v", "w": "w", "x": "x", "z": "z", "q": "q",
|
|
@@ -2276,6 +2278,10 @@ PROTO_TOCHARIAN_MAP: Dict[str, str] = {
|
|
| 2276 |
"c": "tɕ", "j": "j",
|
| 2277 |
# Retroflex
|
| 2278 |
"ṣ": "ʂ",
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2279 |
# Schwas and special vowels
|
| 2280 |
"ä": "ə", "ə": "ə",
|
| 2281 |
# Velar fricative
|
|
@@ -2283,7 +2289,7 @@ PROTO_TOCHARIAN_MAP: Dict[str, str] = {
|
|
| 2283 |
# Labiovelars
|
| 2284 |
"kʷ": "kʷ",
|
| 2285 |
# Standard consonants
|
| 2286 |
-
"b": "b", "d": "d", "g": "
|
| 2287 |
"m": "m", "n": "n", "p": "p", "r": "r", "s": "s",
|
| 2288 |
"t": "t", "w": "w", "y": "j",
|
| 2289 |
# Vowels
|
|
@@ -2303,7 +2309,7 @@ PROTO_OCEANIC_MAP: Dict[str, str] = {
|
|
| 2303 |
"ñ": "ɲ", "nj": "ɲ",
|
| 2304 |
# Special consonants
|
| 2305 |
"ŋ": "ŋ",
|
| 2306 |
-
"q": "
|
| 2307 |
"R": "r", # *R = uvular or retroflex trill in some notations
|
| 2308 |
"ʀ": "ʀ", # IPA uvular trill (used in some Oceanic entries)
|
| 2309 |
"j": "dʒ",
|
|
@@ -2312,7 +2318,7 @@ PROTO_OCEANIC_MAP: Dict[str, str] = {
|
|
| 2312 |
# Long vowels (if marked)
|
| 2313 |
"ā": "aː", "ē": "eː", "ī": "iː", "ō": "oː", "ū": "uː",
|
| 2314 |
# Standard consonants
|
| 2315 |
-
"b": "b", "d": "d", "g": "
|
| 2316 |
"m": "m", "n": "n", "p": "p", "r": "r", "s": "s",
|
| 2317 |
"t": "t", "w": "w", "y": "j",
|
| 2318 |
# Vowels
|
|
@@ -2370,6 +2376,8 @@ MEROITIC_MAP: Dict[str, str] = {
|
|
| 2370 |
"s": "s", "\u0161": "\u0283", "\u1e2b": "x", "h": "h",
|
| 2371 |
# Semivowels
|
| 2372 |
"w": "w", "y": "j",
|
|
|
|
|
|
|
| 2373 |
# Word divider (Meroitic uses : as word separator)
|
| 2374 |
":": "",
|
| 2375 |
# Alternate scholarly notations
|
|
|
|
| 1564 |
"v": "w",
|
| 1565 |
# <y> as semivowel
|
| 1566 |
"y": "j",
|
| 1567 |
+
# Explicit g → IPA ɡ (U+0261) — prevents ASCII g passthrough
|
| 1568 |
+
"g": "ɡ",
|
| 1569 |
}
|
| 1570 |
|
| 1571 |
# ---------------------------------------------------------------------------
|
|
|
|
| 1671 |
# Consonants
|
| 1672 |
"þ": "θ", "ð": "ð",
|
| 1673 |
"hw": "xʷ",
|
| 1674 |
+
# Explicit g → IPA ɡ (U+0261) — prevents ASCII g passthrough
|
| 1675 |
+
"g": "ɡ",
|
| 1676 |
}
|
| 1677 |
|
| 1678 |
# ---------------------------------------------------------------------------
|
|
|
|
| 1683 |
# Long vowels
|
| 1684 |
"ā": "aː", "ē": "eː", "ī": "iː", "ō": "oː", "ū": "uː",
|
| 1685 |
# Labiovelar
|
| 1686 |
+
"kʷ": "kʷ", "gʷ": "ɡʷ",
|
| 1687 |
# Aspirated (from PIE)
|
| 1688 |
+
"bʰ": "bʰ", "dʰ": "dʰ", "gʰ": "ɡʰ",
|
| 1689 |
# Laryngeals sometimes preserved in notation
|
| 1690 |
"x": "x",
|
| 1691 |
+
# Standard consonants (g passthrough fix)
|
| 1692 |
+
"g": "ɡ",
|
| 1693 |
}
|
| 1694 |
|
| 1695 |
# ---------------------------------------------------------------------------
|
|
|
|
| 1721 |
# ONCOJ conventions → IPA
|
| 1722 |
"py": "pʲ", "ky": "kʲ", "sy": "ɕ", "ty": "tɕ", "ny": "ɲ",
|
| 1723 |
"my": "mʲ", "ry": "ɾʲ",
|
| 1724 |
+
"p": "p", "b": "b", "t": "t", "d": "d", "k": "k", "g": "ɡ",
|
| 1725 |
"s": "s", "z": "z", "m": "m", "n": "n", "r": "ɾ",
|
| 1726 |
"w": "w", "y": "j",
|
| 1727 |
# Long vowels (ONCOJ sometimes marks with macron)
|
|
|
|
| 1745 |
"ẏ": "j", "ẇ": "w", "ṯ": "θ", "δ": "ð",
|
| 1746 |
"ʾ": "ʔ",
|
| 1747 |
# Standard (identity-like)
|
| 1748 |
+
"b": "b", "d": "d", "f": "f", "g": "ɡ", "h": "h",
|
| 1749 |
"j": "dʒ", "k": "k", "l": "l", "m": "m", "n": "n",
|
| 1750 |
"p": "p", "r": "r", "s": "s", "t": "t", "w": "w", "y": "j", "z": "z",
|
| 1751 |
}
|
|
|
|
| 1765 |
"ṯ": "θ", "ʾ": "ʔ",
|
| 1766 |
"ny": "ɲ", "ng": "ŋ",
|
| 1767 |
# Standard
|
| 1768 |
+
"b": "b", "d": "d", "f": "f", "g": "ɡ", "h": "h",
|
| 1769 |
"j": "dʒ", "k": "k", "l": "l", "m": "m", "n": "n",
|
| 1770 |
"p": "p", "r": "r", "s": "s", "t": "t", "w": "w", "y": "j", "z": "z",
|
| 1771 |
}
|
|
|
|
| 1782 |
"χ": "x", "ð": "ð", "θ": "θ",
|
| 1783 |
"ā": "aː", "ē": "eː", "ī": "iː", "ō": "oː", "ū": "uː",
|
| 1784 |
# Standard
|
| 1785 |
+
"b": "b", "d": "d", "g": "ɡ", "k": "k", "l": "l", "m": "m",
|
| 1786 |
"n": "n", "p": "p", "r": "r", "s": "s", "t": "t", "w": "w",
|
| 1787 |
"a": "a", "e": "e", "i": "i", "o": "o", "u": "u",
|
| 1788 |
"x": "x",
|
|
|
|
| 1799 |
LEPONTIC_MAP: Dict[str, str] = {
|
| 1800 |
# Geminates / long consonants (must precede singles for greedy match)
|
| 1801 |
"pp": "pː", "bb": "bː", "tt": "tː", "dd": "dː",
|
| 1802 |
+
"kk": "kː", "gg": "ɡː",
|
| 1803 |
"mm": "mː", "nn": "nː", "ll": "lː", "rr": "rː",
|
| 1804 |
# Affricate
|
| 1805 |
"ts": "ts",
|
|
|
|
| 1820 |
"i̯": "j", # palatal glide (U+0069 U+032F)
|
| 1821 |
"u̯": "w", # labial glide (U+0075 U+032F)
|
| 1822 |
# Stops
|
| 1823 |
+
"p": "p", "b": "b", "t": "t", "d": "d", "k": "k", "g": "ɡ",
|
| 1824 |
"q": "kʷ", # labiovelar (rare, archaic)
|
| 1825 |
# Sonorants
|
| 1826 |
"m": "m", "n": "n", "l": "l", "r": "r",
|
|
|
|
| 1869 |
"ʔ": "ʔ", "ŋ": "ŋ", "ɲ": "ɲ",
|
| 1870 |
"ā": "aː", "ē": "eː", "ī": "iː", "ō": "oː", "ū": "uː",
|
| 1871 |
# Standard
|
| 1872 |
+
"b": "b", "d": "d", "g": "ɡ", "k": "k", "l": "l", "m": "m",
|
| 1873 |
"n": "n", "p": "p", "r": "r", "s": "s", "t": "t", "w": "w", "y": "j", "z": "z",
|
| 1874 |
"a": "a", "e": "e", "i": "i", "o": "o", "u": "u",
|
| 1875 |
"h": "h",
|
|
|
|
| 1901 |
# Affricates
|
| 1902 |
"c": "ts",
|
| 1903 |
# Standard consonants (identity)
|
| 1904 |
+
"b": "b", "d": "d", "g": "ɡ", "k": "k", "l": "l", "m": "m",
|
| 1905 |
"n": "n", "p": "p", "r": "r", "s": "s", "t": "t", "v": "v",
|
| 1906 |
"z": "z", "x": "x", "j": "j",
|
| 1907 |
# Vowels
|
|
|
|
| 1932 |
# Velar fricative
|
| 1933 |
"x": "x",
|
| 1934 |
# Standard
|
| 1935 |
+
"b": "b", "d": "d", "g": "ɡ", "k": "k", "l": "l", "m": "m",
|
| 1936 |
"n": "n", "p": "p", "r": "r", "s": "s", "t": "t", "y": "j", "z": "z",
|
| 1937 |
"a": "a", "e": "e", "i": "i", "o": "o", "u": "u",
|
| 1938 |
}
|
|
|
|
| 1947 |
# Long vowels
|
| 1948 |
"ā": "aː", "ē": "eː", "ī": "iː", "ō": "oː", "ū": "uː",
|
| 1949 |
# Labiovelars
|
| 1950 |
+
"kʷ": "kʷ", "gʷ": "ɡʷ",
|
| 1951 |
# Aspirates (from PIE)
|
| 1952 |
+
"bʰ": "bʰ", "dʰ": "dʰ", "gʰ": "ɡʰ", "gʷʰ": "ɡʷʰ",
|
| 1953 |
# Fricatives
|
| 1954 |
"θ": "θ", "ð": "ð", "β": "β",
|
| 1955 |
"f": "f",
|
| 1956 |
# Already-IPA characters that appear in Wiktionary notation
|
| 1957 |
"ɣ": "ɣ", "ə": "ə",
|
| 1958 |
# Standard
|
| 1959 |
+
"b": "b", "d": "d", "g": "ɡ", "k": "k", "l": "l", "m": "m",
|
| 1960 |
"n": "n", "p": "p", "r": "r", "s": "s", "t": "t", "w": "w",
|
| 1961 |
"j": "j", "h": "h", "z": "z",
|
| 1962 |
"a": "a", "e": "e", "i": "i", "o": "o", "u": "u",
|
|
|
|
| 1974 |
"py": "pʲ", "ky": "kʲ", "ty": "tʲ", "ny": "ɲ",
|
| 1975 |
"my": "mʲ", "ry": "ɾʲ", "sy": "ɕ",
|
| 1976 |
# Standard consonants
|
| 1977 |
+
"p": "p", "b": "b", "t": "t", "d": "d", "k": "k", "g": "ɡ",
|
| 1978 |
"s": "s", "z": "z", "m": "m", "n": "n", "r": "ɾ",
|
| 1979 |
"w": "w", "y": "j", "h": "h",
|
| 1980 |
# Long vowels
|
|
|
|
| 2016 |
# Uppercase passthrough (occasional in reconstructions)
|
| 2017 |
"B": "b", "C": "ts", "W": "w",
|
| 2018 |
# Standard consonants
|
| 2019 |
+
"b": "b", "d": "d", "f": "f", "g": "ɡ", "h": "h",
|
| 2020 |
"j": "j", "k": "k", "l": "l", "m": "m", "n": "n",
|
| 2021 |
"p": "p", "r": "r", "s": "s", "t": "t", "w": "w", "z": "z",
|
| 2022 |
# Vowels
|
|
|
|
| 2040 |
# Digraphs
|
| 2041 |
"rs": "rs", "st": "st",
|
| 2042 |
# Standard Celtic consonants
|
| 2043 |
+
"b": "b", "d": "d", "g": "ɡ", "k": "k", "l": "l", "m": "m",
|
| 2044 |
"n": "n", "p": "p", "r": "r", "s": "s", "t": "t", "w": "w",
|
| 2045 |
"a": "a", "e": "e", "i": "i", "o": "o", "u": "u",
|
| 2046 |
"z": "z",
|
|
|
|
| 2065 |
"s\u00b9": "s", "s\u00b2": "\u026c", "s\u00b3": "ts",
|
| 2066 |
"\u015b": "\u026c", # alternative notation for s2
|
| 2067 |
# Standard consonants
|
| 2068 |
+
"b": "b", "d": "d", "f": "f", "g": "ɡ", "h": "h",
|
| 2069 |
"k": "k", "l": "l", "m": "m", "n": "n",
|
| 2070 |
"q": "q", "r": "r", "s": "s", "t": "t",
|
| 2071 |
"w": "w", "y": "j", "z": "z",
|
|
|
|
| 2177 |
# Open/mid vowels
|
| 2178 |
"ɔ": "ɔ", "ɛ": "ɛ", "ə": "ə", "ɨ": "ɨ",
|
| 2179 |
# Standard consonants
|
| 2180 |
+
"b": "b", "c": "c", "d": "d", "g": "ɡ", "h": "h",
|
| 2181 |
"j": "j", "k": "k", "l": "l", "m": "m", "n": "n",
|
| 2182 |
"p": "p", "r": "r", "s": "s", "t": "t", "w": "w",
|
| 2183 |
# Vowels
|
|
|
|
| 2202 |
# Standard consonants (small inventory)
|
| 2203 |
"f": "f", "h": "h", "k": "k", "l": "l", "m": "m",
|
| 2204 |
"n": "n", "p": "p", "r": "r", "s": "s", "t": "t",
|
| 2205 |
+
"w": "w", "q": "ʔ",
|
| 2206 |
# Vowels
|
| 2207 |
"a": "a", "e": "e", "i": "i", "o": "o", "u": "u",
|
| 2208 |
}
|
|
|
|
| 2226 |
"ŋ": "ŋ",
|
| 2227 |
# Palatal stop / fricative
|
| 2228 |
"ɟ": "ɟ", "ɕ": "ɕ", "ɲ": "ɲ",
|
| 2229 |
+
# Glottal stop
|
| 2230 |
+
"ʔ": "ʔ", "ˀ": "ʔ",
|
| 2231 |
# Velar/uvular fricatives
|
| 2232 |
"ɣ": "ɣ", "χ": "χ",
|
| 2233 |
# Close-mid back unrounded vowel
|
| 2234 |
"ɤ": "ɤ",
|
| 2235 |
# Velar approximant
|
| 2236 |
"ɰ": "ɰ",
|
| 2237 |
+
# Glottal stop (modifier form — already handled above)
|
|
|
|
| 2238 |
# Schwa
|
| 2239 |
"ə": "ə",
|
| 2240 |
# Long vowels
|
|
|
|
| 2252 |
# Combining diacritics (voiceless/voiced ring below — passthrough)
|
| 2253 |
"\u0325": "\u0325", "\u0329": "\u0329", "\u032C": "\u032C",
|
| 2254 |
# Standard consonants
|
| 2255 |
+
"b": "b", "c": "c", "d": "d", "f": "f", "g": "ɡ",
|
| 2256 |
"h": "h", "j": "j", "k": "k", "l": "l", "m": "m",
|
| 2257 |
"n": "n", "p": "p", "r": "r", "s": "s", "t": "t",
|
| 2258 |
"v": "v", "w": "w", "x": "x", "z": "z", "q": "q",
|
|
|
|
| 2278 |
"c": "tɕ", "j": "j",
|
| 2279 |
# Retroflex
|
| 2280 |
"ṣ": "ʂ",
|
| 2281 |
+
# Missing diacritics (Adams 2013, Winter 1992)
|
| 2282 |
+
"ñ": "ɲ", "ć": "tɕ", "ë": "ə",
|
| 2283 |
+
# Accented vowels (strip accent to plain vowel)
|
| 2284 |
+
"á": "a", "é": "e", "í": "i", "ó": "o", "ú": "u",
|
| 2285 |
# Schwas and special vowels
|
| 2286 |
"ä": "ə", "ə": "ə",
|
| 2287 |
# Velar fricative
|
|
|
|
| 2289 |
# Labiovelars
|
| 2290 |
"kʷ": "kʷ",
|
| 2291 |
# Standard consonants
|
| 2292 |
+
"b": "b", "d": "d", "g": "ɡ", "k": "k", "l": "l",
|
| 2293 |
"m": "m", "n": "n", "p": "p", "r": "r", "s": "s",
|
| 2294 |
"t": "t", "w": "w", "y": "j",
|
| 2295 |
# Vowels
|
|
|
|
| 2309 |
"ñ": "ɲ", "nj": "ɲ",
|
| 2310 |
# Special consonants
|
| 2311 |
"ŋ": "ŋ",
|
| 2312 |
+
"q": "ʔ",
|
| 2313 |
"R": "r", # *R = uvular or retroflex trill in some notations
|
| 2314 |
"ʀ": "ʀ", # IPA uvular trill (used in some Oceanic entries)
|
| 2315 |
"j": "dʒ",
|
|
|
|
| 2318 |
# Long vowels (if marked)
|
| 2319 |
"ā": "aː", "ē": "eː", "ī": "iː", "ō": "oː", "ū": "uː",
|
| 2320 |
# Standard consonants
|
| 2321 |
+
"b": "b", "d": "d", "g": "ɡ", "k": "k", "l": "l",
|
| 2322 |
"m": "m", "n": "n", "p": "p", "r": "r", "s": "s",
|
| 2323 |
"t": "t", "w": "w", "y": "j",
|
| 2324 |
# Vowels
|
|
|
|
| 2376 |
"s": "s", "\u0161": "\u0283", "\u1e2b": "x", "h": "h",
|
| 2377 |
# Semivowels
|
| 2378 |
"w": "w", "y": "j",
|
| 2379 |
+
# Explicit g → IPA ɡ (U+0261) — guard against ASCII g in source data
|
| 2380 |
+
"g": "ɡ",
|
| 2381 |
# Word divider (Meroitic uses : as word separator)
|
| 2382 |
":": "",
|
| 2383 |
# Alternate scholarly notations
|