Datasets:
Phase 8: Add 24 new ancient/proto-language lexicons (12,911 entries) + scripts
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- data/training/lexicons/aav-pro.tsv +3 -0
- data/training/lexicons/afa-pro.tsv +3 -0
- data/training/lexicons/alg-pro.tsv +3 -0
- data/training/lexicons/ang.tsv +3 -0
- data/training/lexicons/bnt-pro.tsv +3 -0
- data/training/lexicons/cel-pro.tsv +3 -0
- data/training/lexicons/cop.tsv +3 -0
- data/training/lexicons/gem-pro.tsv +3 -0
- data/training/lexicons/gez.tsv +3 -0
- data/training/lexicons/hbo.tsv +3 -0
- data/training/lexicons/ira-pro.tsv +3 -0
- data/training/lexicons/itc-pro.tsv +3 -0
- data/training/lexicons/jpx-pro.tsv +3 -0
- data/training/lexicons/map.tsv +3 -0
- data/training/lexicons/myn-pro.tsv +3 -0
- data/training/lexicons/nci.tsv +3 -0
- data/training/lexicons/obm.tsv +3 -0
- data/training/lexicons/ojp.tsv +3 -0
- data/training/lexicons/osc.tsv +3 -0
- data/training/lexicons/pal.tsv +3 -0
- data/training/lexicons/pli.tsv +3 -0
- data/training/lexicons/poz-oce-pro.tsv +3 -0
- data/training/lexicons/poz-pol-pro.tsv +3 -0
- data/training/lexicons/sga.tsv +3 -0
- data/training/lexicons/sit-pro.tsv +3 -0
- data/training/lexicons/sla-pro.tsv +3 -0
- data/training/lexicons/sog.tsv +3 -0
- data/training/lexicons/sqj-pro.tsv +3 -0
- data/training/lexicons/tai-pro.tsv +3 -0
- data/training/lexicons/trk-pro.tsv +3 -0
- data/training/lexicons/urj-pro.tsv +3 -0
- data/training/lexicons/xce.tsv +3 -0
- data/training/lexicons/xcl.tsv +3 -0
- data/training/lexicons/xeb.tsv +3 -0
- data/training/lexicons/xfa.tsv +3 -0
- data/training/lexicons/xgn-pro.tsv +3 -0
- data/training/lexicons/xht.tsv +3 -0
- data/training/lexicons/xib.tsv +3 -0
- data/training/lexicons/xlp.tsv +3 -0
- data/training/lexicons/xmr.tsv +3 -0
- data/training/lexicons/xsa.tsv +3 -0
- data/training/lexicons/xtg.tsv +3 -0
- data/training/lexicons/xto-pro.tsv +3 -0
- data/training/lexicons/xum.tsv +3 -0
- data/training/lexicons/xve.tsv +3 -0
- data/training/metadata/languages.tsv +3 -0
- docs/DATABASE_REFERENCE.md +1003 -0
- docs/prd/PRD_DATABASE_RECTIFICATION.md +796 -0
- scripts/fetch_wiktionary_raw.py +201 -0
- scripts/ingest_acd.py +307 -0
data/training/lexicons/aav-pro.tsv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e79666cda701eb7cacdeed5dabd6a42efee4b3f166370098ae9dadbdd64c4531
|
| 3 |
+
size 7221
|
data/training/lexicons/afa-pro.tsv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:61133fa6798c15a365e718b5a9661a93a111704ac44835096aa1c6a110002805
|
| 3 |
+
size 1754
|
data/training/lexicons/alg-pro.tsv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:688232af1cf00874d4a03d40ee70fdf891f49c0753a550d8ead36f1849e1e25f
|
| 3 |
+
size 12516
|
data/training/lexicons/ang.tsv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:97148a592a22d23447219bafbaf162a2a6dfdfa9be5e62fd8de2a79501436e87
|
| 3 |
+
size 1380125
|
data/training/lexicons/bnt-pro.tsv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:60e43f675233ad626ce95faf531f2fe62de60ffd1f89861503277313fe15a2d0
|
| 3 |
+
size 21342
|
data/training/lexicons/cel-pro.tsv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:edb8dccc90cd4fca4240c2d9b633a157e0d2e286dfda77c38bcbddf9d445caaf
|
| 3 |
+
size 69341
|
data/training/lexicons/cop.tsv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4e21fd347ad46ae447d2fa035be6b16bb5ee3909686db37ad7f4d31da8b6546d
|
| 3 |
+
size 612872
|
data/training/lexicons/gem-pro.tsv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f4e072d2c7c801551aea6ecbc7ff3990d890a123929d39c7da0810de65b5722a
|
| 3 |
+
size 247613
|
data/training/lexicons/gez.tsv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:74a847a4436cda72eca00c8fef513e62b89d6f292e5330e97f89fcac4c333d3b
|
| 3 |
+
size 24220
|
data/training/lexicons/hbo.tsv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8b61db4b0beca87cf5295dfd6b2886335cf01fd020f2ff22c80f7ac82e6ce2ff
|
| 3 |
+
size 557976
|
data/training/lexicons/ira-pro.tsv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f13c6f351a44148c6756b475dba2ad57bbe51cd22abe5d827aaf473d3f8c175c
|
| 3 |
+
size 16964
|
data/training/lexicons/itc-pro.tsv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d261a9c43d834be998cb15ebaba0b5d5dcfb2bad7399c8fc7e77bf1bffb16573
|
| 3 |
+
size 31068
|
data/training/lexicons/jpx-pro.tsv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e631d8018dfb1a5f6e49fa0701eecbf4b546fa266ca8df2fc0c340e0de60ae31
|
| 3 |
+
size 15732
|
data/training/lexicons/map.tsv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0aa3f80b9d64b9e91525b830a391a1922f56065af0ce8563eaf21de512f33d60
|
| 3 |
+
size 354233
|
data/training/lexicons/myn-pro.tsv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fcaea06aab389a39f5bba7c957ca5fe7c378cf33dc0207ab49294db97a29a08e
|
| 3 |
+
size 2744
|
data/training/lexicons/nci.tsv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6d9653460ed3fe2dc15a8b845828ab15f3bb024473cba810f2a5a46a14965b98
|
| 3 |
+
size 207573
|
data/training/lexicons/obm.tsv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:456818e7df5c705bceb3ffcf07ec8d622ce305880042911f0c667d4d9f8a4413
|
| 3 |
+
size 1423
|
data/training/lexicons/ojp.tsv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fc09525f1db3c46b6b7a06e2d7e5f32a8716032e0425127ea63ec62eb7cd27e1
|
| 3 |
+
size 186624
|
data/training/lexicons/osc.tsv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1e2dabe2edd32d53ae87518f265c4d3b200596dd3e04c2cbb87201f1e7200e95
|
| 3 |
+
size 75402
|
data/training/lexicons/pal.tsv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cb7237468dbb09443f993b90f8428226150ab8747aac22d338f5ed7062ecad50
|
| 3 |
+
size 11959
|
data/training/lexicons/pli.tsv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:309bed32d4a1a30ab025762ead709019230ee559caa1e55588ca665a2ff50578
|
| 3 |
+
size 130234
|
data/training/lexicons/poz-oce-pro.tsv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cc107d7ac5cb3a1b86e4708bcaae85e68cb8b351fa0105fc997906db496f93ab
|
| 3 |
+
size 4462
|
data/training/lexicons/poz-pol-pro.tsv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:02312e2e280f98f3c1310d729cd36ddb772daa209a56cbed7e620778c0af081e
|
| 3 |
+
size 5655
|
data/training/lexicons/sga.tsv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:acf7b4bb75992066e0f8853300ea83df8b940f8b37541b7f79b2ac3b552ee357
|
| 3 |
+
size 1501984
|
data/training/lexicons/sit-pro.tsv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:12bd35e6fb1bdea808cb741f50c53e9cbf82b6440f8fb2f110877af0ab84b012
|
| 3 |
+
size 13739
|
data/training/lexicons/sla-pro.tsv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d1908678551d9fb10053004ce7a91a04d76a7c7f4f2dec03ff5b02a52fd534f7
|
| 3 |
+
size 220064
|
data/training/lexicons/sog.tsv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6500b1de893c8c643c5a81ce22ec249da693c67c3b4c9ca710c64a349f6f2c82
|
| 3 |
+
size 6883
|
data/training/lexicons/sqj-pro.tsv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b66f53ceec08d01ee57913b7c6a1259f301650d19cc2754cab53f781c2e6680b
|
| 3 |
+
size 8540
|
data/training/lexicons/tai-pro.tsv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a11ac8b7d9287870b00a2455c2620c1fdf3b4d2f34d4841af2bcce2a08ec7df2
|
| 3 |
+
size 5849
|
data/training/lexicons/trk-pro.tsv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:49fe64e6da2e399c85b29255c124836215099d18863357eddb12cd2be82b176c
|
| 3 |
+
size 39499
|
data/training/lexicons/urj-pro.tsv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e89b1c38e6c5c370f949a88f7801ea6e4e1f881d5fc73cda95195767dde31d86
|
| 3 |
+
size 22682
|
data/training/lexicons/xce.tsv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6c341342ebb3dfa3293e109a91221252b3087b510b34b859b75b5b2d13ef589a
|
| 3 |
+
size 497
|
data/training/lexicons/xcl.tsv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:701be1b023101c427700053fca0c26feb572980e0c56a7b71873cc24f70a22c5
|
| 3 |
+
size 311750
|
data/training/lexicons/xeb.tsv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7028c905272098ae4d9e4e963c305d372577d24d90b5dbcb8236f3539062ff5e
|
| 3 |
+
size 30017
|
data/training/lexicons/xfa.tsv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:17ad979a03bce750cb6fdc11931aa4eb8b594601e9e6afa485bd6d81a49e0d4b
|
| 3 |
+
size 17401
|
data/training/lexicons/xgn-pro.tsv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c41c4de9488420092713d0d0fc55fa3718b155f6f74dd5558520ed4fde6ddd8e
|
| 3 |
+
size 5029
|
data/training/lexicons/xht.tsv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4809b7c19c84179c2620c39f5583b2352b86a1f0f7a75569c6b489c4b7d7d67a
|
| 3 |
+
size 9337
|
data/training/lexicons/xib.tsv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d510524e0a000a5e0a2f46a3642f0b7d7f47cbb616cf9e4f789fd9efa5334e9d
|
| 3 |
+
size 1416
|
data/training/lexicons/xlp.tsv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f5df739a8ea99c7f596df81c611470f7c5d17b3146b9d683753853ee950a1a0c
|
| 3 |
+
size 14942
|
data/training/lexicons/xmr.tsv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f4c7f024775825b84326561eb9807d5860914977efcdaea91b85a6410c1293f1
|
| 3 |
+
size 98939
|
data/training/lexicons/xsa.tsv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a98e9845399f78284581435401155e20020233e3fcdf05e72e1f22780c2c89bd
|
| 3 |
+
size 4935
|
data/training/lexicons/xtg.tsv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ff1215f061de476ef0d0a7fc67047179a88a6257d2cc5a21604677d9dac286d0
|
| 3 |
+
size 9388
|
data/training/lexicons/xto-pro.tsv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b4fc7376fb22ee052059363f36bdf934e670d5794b5fa9c6018e259e5f17be03
|
| 3 |
+
size 5721
|
data/training/lexicons/xum.tsv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3e8fd6d438f641afbdefd4ec727d1e1e97701f99d2a3bec3729250b19c6d7bb5
|
| 3 |
+
size 60372
|
data/training/lexicons/xve.tsv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e02116a96d18c5571c2d407c55e234f114c6f6e2a7499c7bd1d3af7a6a36dc97
|
| 3 |
+
size 24924
|
data/training/metadata/languages.tsv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9a3f345f6180a9acffa6feef04d3181c706d9e8b1539b6f318783d1e1e0fa8a7
|
| 3 |
+
size 39342
|
docs/DATABASE_REFERENCE.md
ADDED
|
@@ -0,0 +1,1003 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Ancient Scripts Datasets — Master Database Reference
|
| 2 |
+
|
| 3 |
+
> **Last updated:** 2026-03-13 | **Commit:** `3e3fdf1` | **Total entries:** 3,466,000+ across 1,178 languages
|
| 4 |
+
|
| 5 |
+
This document is the single source of truth for understanding, modifying, and extending this database. It is designed for both human researchers and AI agents.
|
| 6 |
+
|
| 7 |
+
---
|
| 8 |
+
|
| 9 |
+
## Table of Contents
|
| 10 |
+
|
| 11 |
+
1. [Database Overview](#1-database-overview)
|
| 12 |
+
2. [TSV Schema & Format](#2-tsv-schema--format)
|
| 13 |
+
3. [Ancient Languages — Complete Registry](#3-ancient-languages--complete-registry)
|
| 14 |
+
4. [Non-Ancient Languages — Summary](#4-non-ancient-languages--summary)
|
| 15 |
+
5. [Source Registry](#5-source-registry)
|
| 16 |
+
6. [IPA & Phonetic Processing Pipeline](#6-ipa--phonetic-processing-pipeline)
|
| 17 |
+
7. [Transliteration Maps System](#7-transliteration-maps-system)
|
| 18 |
+
8. [Sound Class (SCA) System](#8-sound-class-sca-system)
|
| 19 |
+
9. [Scripts & Data Flow](#9-scripts--data-flow)
|
| 20 |
+
10. [PRD: Adding New Data](#10-prd-adding-new-data)
|
| 21 |
+
11. [PRD: Adding New Languages](#11-prd-adding-new-languages)
|
| 22 |
+
12. [Data Acquisition Rules (Iron Law)](#12-data-acquisition-rules-iron-law)
|
| 23 |
+
13. [Adversarial Review Protocol](#13-adversarial-review-protocol)
|
| 24 |
+
14. [Re-processing & Cleaning Runbook](#14-re-processing--cleaning-runbook)
|
| 25 |
+
15. [Known Limitations & Future Work](#15-known-limitations--future-work)
|
| 26 |
+
|
| 27 |
+
---
|
| 28 |
+
|
| 29 |
+
## 1. Database Overview
|
| 30 |
+
|
| 31 |
+
### Locations
|
| 32 |
+
|
| 33 |
+
| Location | Path / URL | What |
|
| 34 |
+
|----------|-----------|------|
|
| 35 |
+
| **HuggingFace dataset** | `https://huggingface.co/datasets/PhaistosLabs/ancient-scripts-datasets` | **PRIMARY cloud copy.** All lexicons, cognate pairs, metadata, sources, scripts, docs. Push here after any data change. |
|
| 36 |
+
| **HuggingFace local clone** | `C:\Users\alvin\hf-ancient-scripts\` | Local clone of HuggingFace repo. Use `huggingface_hub` API or `git push` to sync. |
|
| 37 |
+
| **GitHub repo** | `https://github.com/Nacryos/ancient-scripts-datasets.git` | Scripts, docs, pipeline code. Lexicon data is gitignored but committed via force-add for some ancient langs. |
|
| 38 |
+
| **Local working copy** | `C:\Users\alvin\ancient-scripts-datasets\` | Full repo + generated data + CLDF sources |
|
| 39 |
+
| **CLDF sources** | `sources/` (593 MB) | **Gitignored.** Cloned separately: `northeuralex`, `ids`, `abvd`, `wold`, `sinotibetan`, `wikipron` |
|
| 40 |
+
| **Total local footprint** | 2.2 GB | Includes all generated data + CLDF source repos |
|
| 41 |
+
|
| 42 |
+
### What IS Tracked in Git (GitHub)
|
| 43 |
+
|
| 44 |
+
- `scripts/` — All extraction and processing scripts
|
| 45 |
+
- `cognate_pipeline/` — Python package for phonetic processing
|
| 46 |
+
- `docs/` — PRDs, audit reports, this reference doc
|
| 47 |
+
- `data/training/metadata/` — `languages.tsv`, `source_stats.tsv` (small summary files)
|
| 48 |
+
- `data/training/validation/` — Validation sets (via Git LFS)
|
| 49 |
+
- `data/training/lexicons/*.tsv` — Ancient language TSVs (force-added despite gitignore)
|
| 50 |
+
|
| 51 |
+
### What is NOT Tracked in Git (gitignored)
|
| 52 |
+
|
| 53 |
+
- `data/training/lexicons/` — Modern language TSVs (1,113 files, regenerated from scripts)
|
| 54 |
+
- `data/training/cognate_pairs/` — Cognate pair datasets (regenerated)
|
| 55 |
+
- `sources/` — CLDF source repositories (cloned separately, ~593 MB)
|
| 56 |
+
|
| 57 |
+
### What IS on HuggingFace (everything)
|
| 58 |
+
|
| 59 |
+
**HuggingFace is the single source of truth for ALL data files.** It contains:
|
| 60 |
+
- All 1,136 lexicon TSVs (ancient + modern)
|
| 61 |
+
- All cognate pair datasets
|
| 62 |
+
- All metadata files
|
| 63 |
+
- All scripts, docs, and pipeline code
|
| 64 |
+
- All CLDF source repos (2,928 files in `sources/`)
|
| 65 |
+
- Raw audit trails and intermediate extraction files
|
| 66 |
+
|
| 67 |
+
### HuggingFace Push Rules
|
| 68 |
+
|
| 69 |
+
1. **After any data change** (new entries, IPA reprocessing, map fixes): push updated TSVs to HF
|
| 70 |
+
2. **After any script change** that affects output: push scripts to HF
|
| 71 |
+
3. **Use `huggingface_hub` API** for individual file uploads:
|
| 72 |
+
```python
|
| 73 |
+
from huggingface_hub import HfApi
|
| 74 |
+
api = HfApi()
|
| 75 |
+
api.upload_file(
|
| 76 |
+
path_or_fileobj="data/training/lexicons/ave.tsv",
|
| 77 |
+
path_in_repo="data/training/lexicons/ave.tsv",
|
| 78 |
+
repo_id="PhaistosLabs/ancient-scripts-datasets",
|
| 79 |
+
repo_type="dataset",
|
| 80 |
+
commit_message="fix: reprocess Avestan IPA with expanded transliteration map"
|
| 81 |
+
)
|
| 82 |
+
```
|
| 83 |
+
4. **For bulk uploads** (many files): use `upload_large_folder()` from the HF local clone at `C:\Users\alvin\hf-ancient-scripts\`
|
| 84 |
+
5. **Always push to BOTH** GitHub (scripts/docs) and HuggingFace (data + scripts/docs)
|
| 85 |
+
6. **Never let HF fall behind** — if data exists locally but not on HF, it's not deployed
|
| 86 |
+
|
| 87 |
+
**To reconstruct all data from scratch:**
|
| 88 |
+
```bash
|
| 89 |
+
# 1. Clone CLDF sources
|
| 90 |
+
git clone https://github.com/lexibank/northeuralex sources/northeuralex
|
| 91 |
+
git clone https://github.com/lexibank/ids sources/ids
|
| 92 |
+
git clone https://github.com/lexibank/abvd sources/abvd
|
| 93 |
+
git clone https://github.com/lexibank/wold sources/wold
|
| 94 |
+
git clone https://github.com/lexibank/sinotibetan sources/sinotibetan
|
| 95 |
+
# WikiPron: download from https://github.com/CUNY-CL/wikipron
|
| 96 |
+
|
| 97 |
+
# 2. Run extraction pipeline
|
| 98 |
+
python scripts/expand_cldf_full.py # Modern languages from CLDF
|
| 99 |
+
python scripts/ingest_wikipron.py # WikiPron IPA data
|
| 100 |
+
python scripts/run_lexicon_expansion.py # Ancient language extraction (requires internet)
|
| 101 |
+
python scripts/reprocess_ipa.py # Apply transliteration maps
|
| 102 |
+
python scripts/assemble_lexicons.py # Generate metadata
|
| 103 |
+
```
|
| 104 |
+
|
| 105 |
+
### Directory Structure
|
| 106 |
+
|
| 107 |
+
```
|
| 108 |
+
ancient-scripts-datasets/
|
| 109 |
+
data/training/
|
| 110 |
+
lexicons/ # 1,136 TSV files (one per language) [GITIGNORED]
|
| 111 |
+
metadata/ # languages.tsv, source_stats.tsv, etc. [TRACKED]
|
| 112 |
+
cognate_pairs/ # inherited, similarity, borrowing pairs [GITIGNORED]
|
| 113 |
+
validation/ # stratified ML training/test sets [GIT LFS]
|
| 114 |
+
language_profiles/ # per-language markdown profiles
|
| 115 |
+
raw/ # raw JSON audit trails
|
| 116 |
+
audit_trails/ # JSONL provenance logs
|
| 117 |
+
scripts/ # 23 extraction scripts + 7 parsers [TRACKED]
|
| 118 |
+
cognate_pipeline/ # Python package for phonetic processing [TRACKED]
|
| 119 |
+
docs/ # PRDs, audit reports, this file [TRACKED]
|
| 120 |
+
sources/ # CLDF repos [GITIGNORED, clone separately]
|
| 121 |
+
```
|
| 122 |
+
|
| 123 |
+
**Scale:**
|
| 124 |
+
- 1,178 languages (68 ancient/reconstructed + 1,113 modern — 3 overlap)
|
| 125 |
+
- 3,466,000+ total lexical entries
|
| 126 |
+
- 170,756 ancient language entries (68 languages)
|
| 127 |
+
- 3,296,156 modern language entries (1,113 languages)
|
| 128 |
+
|
| 129 |
+
---
|
| 130 |
+
|
| 131 |
+
## 2. TSV Schema & Format
|
| 132 |
+
|
| 133 |
+
Every lexicon file follows this 6-column tab-separated schema:
|
| 134 |
+
|
| 135 |
+
```
|
| 136 |
+
Word IPA SCA Source Concept_ID Cognate_Set_ID
|
| 137 |
+
```
|
| 138 |
+
|
| 139 |
+
| Column | Description | Example |
|
| 140 |
+
|--------|-------------|---------|
|
| 141 |
+
| **Word** | Orthographic/transliterated form | `pahhur`, `*wódr̥`, `𐬀𐬵𐬎𐬭𐬀` |
|
| 142 |
+
| **IPA** | Broad phonemic IPA transcription | `paxːur`, `wodr̩`, `ahura` |
|
| 143 |
+
| **SCA** | Sound Class Alphabet encoding (18C + 5V) | `PAKUR`, `WOTR`, `AHURA` |
|
| 144 |
+
| **Source** | Data provenance identifier | `wiktionary`, `ediana`, `wikipron` |
|
| 145 |
+
| **Concept_ID** | Semantic concept (first gloss word, snake_case) | `fire`, `water`, `-` |
|
| 146 |
+
| **Cognate_Set_ID** | Cognate grouping identifier | `PIE_fire_001`, `-` |
|
| 147 |
+
|
| 148 |
+
**Rules:**
|
| 149 |
+
- Header row MUST be present as line 1
|
| 150 |
+
- UTF-8 encoding, Unix line endings preferred
|
| 151 |
+
- No empty IPA fields — use Word as fallback if no conversion possible
|
| 152 |
+
- Source field must accurately reflect actual data origin
|
| 153 |
+
- `-` for unknown/unavailable fields
|
| 154 |
+
|
| 155 |
+
---
|
| 156 |
+
|
| 157 |
+
## 3. Ancient Languages — Complete Registry
|
| 158 |
+
|
| 159 |
+
### Entry Counts & IPA Quality (as of 2026-03-12)
|
| 160 |
+
|
| 161 |
+
| # | Language | ISO | Family | Entries | Identity% | Top Sources | IPA Type |
|
| 162 |
+
|---|----------|-----|--------|---------|-----------|-------------|----------|
|
| 163 |
+
| 1 | Avestan | ave | Indo-Iranian | 3,455 | 14.4% | avesta_org (2,716), wiktionary_cat (384), wiktionary (355) | Broad phonemic (Hoffmann & Forssman) |
|
| 164 |
+
| 2 | Tocharian B | txb | Indo-European | 2,386 | 25.2% | wiktionary_cat (2,386) | Broad phonemic (Tocharian map) |
|
| 165 |
+
| 3 | Luwian | xlw | Anatolian | 2,230 | 26.2% | ediana (1,985), palaeolexicon (225) | Broad phonemic (Luwian map) |
|
| 166 |
+
| 4 | Proto-Indo-European | ine-pro | Indo-European | 1,704 | 0.2% | wiktionary_cat (863), wiktionary (841) | Broad phonemic (reconstructed) |
|
| 167 |
+
| 5 | Lycian | xlc | Anatolian | 1,098 | 36.7% | ediana (517), palaeolexicon (482) | Broad phonemic (Melchert 2004) |
|
| 168 |
+
| 6 | Etruscan | ett | Tyrsenian | 753 | 25.5% | palaeolexicon (503), wikipron (207) | Broad phonemic (Bonfante) |
|
| 169 |
+
| 7 | Urartian | xur | Hurro-Urartian | 748 | 54.4% | oracc_ecut (704), wiktionary (44) | Partial (cuneiform sign names) |
|
| 170 |
+
| 8 | Lydian | xld | Anatolian | 693 | 53.0% | ediana (447), palaeolexicon (187) | Broad phonemic (Gusmani 1964) |
|
| 171 |
+
| 9 | Carian | xcr | Anatolian | 532 | 39.7% | palaeolexicon (304), ediana (174) | Broad phonemic (Adiego 2007) |
|
| 172 |
+
| 10 | Proto-Kartvelian | ccs-pro | Kartvelian | 504 | 22.2% | wiktionary (254), wiktionary_cat (250) | Broad phonemic (Klimov 1998) |
|
| 173 |
+
| 11 | Old Persian | peo | Indo-Iranian | 486 | 10.5% | wiktionary (244), wiktionary_cat (242) | Broad phonemic (Kent 1953) |
|
| 174 |
+
| 12 | Tocharian A | xto | Indo-European | 467 | 23.1% | wiktionary_cat (467) | Broad phonemic (Tocharian map) |
|
| 175 |
+
| 13 | Proto-Dravidian | dra-pro | Dravidian | 406 | 7.1% | wiktionary_cat (235), wiktionary (171) | Broad phonemic (Krishnamurti) |
|
| 176 |
+
| 14 | Proto-Semitic | sem-pro | Afroasiatic | 386 | 26.9% | wiktionary_cat (247), wiktionary (139) | Broad phonemic (Huehnergard) |
|
| 177 |
+
| 15 | Ugaritic | uga | Afroasiatic | 371 | 15.6% | wiktionary (344), wiktionary_cat (27) | Broad phonemic (Tropper 2000) |
|
| 178 |
+
| 16 | Hittite | hit | Anatolian | 266 | 20.3% | wiktionary (266) | Broad phonemic (Hoffner & Melchert) |
|
| 179 |
+
| 17 | Hurrian | xhu | Hurro-Urartian | 260 | 50.4% | palaeolexicon (259) | Broad phonemic (Wegner 2007) |
|
| 180 |
+
| 18 | Elamite | elx | Isolate | 301 | 71.1% | wiktionary (301) | Minimal (transparent orthography) |
|
| 181 |
+
| 19 | Rhaetic | xrr | Tyrsenian | 187 | 55.1% | tir_raetica (142), wiktionary (45) | Partial (North Italic alphabet) |
|
| 182 |
+
| 20 | Phoenician | phn | Afroasiatic | 180 | 18.3% | wiktionary (180) | Broad phonemic (abjad reconstruction) |
|
| 183 |
+
| 21 | Phrygian | xpg | Indo-European | 79 | 36.7% | wiktionary (79) | Partial (small corpus, Greek-script support) |
|
| 184 |
+
| 22 | Messapic | cms | Indo-European | 45 | 88.9% | wiktionary (45) | Minimal (Greek-alphabet, mostly identity) |
|
| 185 |
+
| 23 | Lemnian | xle | Tyrsenian | 30 | 53.3% | wiktionary (30) | Minimal (very small corpus) |
|
| 186 |
+
| | | | | | | | |
|
| 187 |
+
| **--- Tier 2 (Phase 6) ---** | | | | | | | |
|
| 188 |
+
| 24 | Old English | ang | Germanic | 31,319 | 10.5% | wiktionary_cat (31,319) | Broad phonemic (Hogg 1992) |
|
| 189 |
+
| 25 | Biblical Hebrew | hbo | Semitic | 12,182 | 0.1% | wiktionary_cat (12,182) | Broad phonemic (Blau 2010) |
|
| 190 |
+
| 26 | Coptic | cop | Egyptian | 11,180 | 0.1% | wiktionary_cat (7,987), kellia (3,193) | Broad phonemic (Layton 2000) |
|
| 191 |
+
| 27 | Old Armenian | xcl | Indo-European | 6,277 | 0.0% | wiktionary_cat (6,277) | Broad phonemic (Meillet 1913) |
|
| 192 |
+
| 28 | Pali | pli | Indo-Aryan | 2,792 | 19.1% | wiktionary_cat (2,792) | Broad phonemic (Geiger 1943) |
|
| 193 |
+
| 29 | Ge'ez | gez | Semitic | 496 | 0.0% | wiktionary_cat (496) | Broad phonemic (Dillmann 1857) |
|
| 194 |
+
| 30 | Hattic | xht | Isolate | 269 | 37.9% | wiktionary_cat (269) | Partial (cuneiformist conventions) |
|
| 195 |
+
| | | | | | | | |
|
| 196 |
+
| **--- Tier 3 (Phase 7) ---** | | | | | | | |
|
| 197 |
+
| 31 | Old Irish | sga | Celtic | 41,300 | 39.4% | edil (40,309), wiktionary_cat (991) | Broad phonemic (Thurneysen) |
|
| 198 |
+
| 32 | Old Japanese | ojp | Japonic | 5,393 | 59.7% | oncoj (4,974), wiktionary_cat (419) | Broad phonemic (Frellesvig 2010) |
|
| 199 |
+
| 33 | Classical Nahuatl | nci | Uto-Aztecan | 3,873 | 5.7% | wiktionary_cat (3,873) | Broad phonemic |
|
| 200 |
+
| 34 | Oscan | osc | Italic | 2,122 | 15.1% | ceipom (2,122) | Broad phonemic (CEIPoM Standard_aligned) |
|
| 201 |
+
| 35 | Umbrian | xum | Italic | 1,631 | 3.7% | ceipom (1,631) | Broad phonemic (CEIPoM Standard_aligned) |
|
| 202 |
+
| 36 | Venetic | xve | Italic | 721 | 86.5% | ceipom (721) | Minimal (Latin transliteration) |
|
| 203 |
+
| 37 | Gaulish | xtg | Celtic | 271 | 92.3% | diacl (183), wiktionary_cat (88) | Minimal (Latin transliteration) |
|
| 204 |
+
| 38 | Middle Persian | pal | Indo-Iranian | 242 | 62.8% | wiktionary_cat (242) | Broad phonemic (MacKenzie 1971) |
|
| 205 |
+
| 39 | Sogdian | sog | Indo-Iranian | 194 | 37.1% | iecor (161), wiktionary_cat (33) | Broad phonemic (Gharib 1995) |
|
| 206 |
+
| | | | | | | | |
|
| 207 |
+
| **--- Proto-Languages (Phase 7) ---** | | | | | | | |
|
| 208 |
+
| 40 | Proto-Austronesian | map | Austronesian | 11,624 | 41.1% | acd (11,624) | Broad phonemic (Blust notation) |
|
| 209 |
+
| 41 | Proto-Germanic | gem-pro | Germanic | 5,399 | 32.9% | wiktionary_cat (5,399) | Broad phonemic (reconstructed) |
|
| 210 |
+
| 42 | Proto-Celtic | cel-pro | Celtic | 1,584 | 68.3% | wiktionary_cat (1,584) | Partial (mixed Latin/IPA) |
|
| 211 |
+
| 43 | Proto-Uralic | urj-pro | Uralic | 585 | 50.3% | wiktionary_cat (585) | Broad phonemic (Sammallahti 1988) |
|
| 212 |
+
| 44 | Proto-Bantu | bnt-pro | Niger-Congo | 467 | 54.0% | wiktionary_cat (467) | Broad phonemic (BLR notation) |
|
| 213 |
+
| 45 | Proto-Sino-Tibetan | sit-pro | Sino-Tibetan | 358 | 100.0% | wiktionary_cat (358) | Already IPA (Wiktionary provides IPA) |
|
| 214 |
+
| | | | | | | | |
|
| 215 |
+
| **--- Phase 8 Batch 1 (Proto-Languages + Italic/Celtic) ---** | | | | | | | |
|
| 216 |
+
| 46 | Proto-Slavic | sla-pro | Balto-Slavic | 5,068 | 18.4% | wiktionary_cat (5,068) | Broad phonemic (reconstructed) |
|
| 217 |
+
| 47 | Proto-Turkic | trk-pro | Turkic | 1,027 | 27.8% | wiktionary_cat (1,027) | Broad phonemic (reconstructed) |
|
| 218 |
+
| 48 | Proto-Italic | itc-pro | Italic | 739 | 46.7% | wiktionary_cat (739) | Broad phonemic (reconstructed) |
|
| 219 |
+
| 49 | Faliscan | xfa | Italic | 566 | 67.1% | ceipom (566) | Partial (CEIPoM Standard_aligned) |
|
| 220 |
+
| 50 | Proto-Japonic | jpx-pro | Japonic | 426 | 70.2% | wiktionary_cat (426) | Partial (mixed notation) |
|
| 221 |
+
| 51 | Lepontic | xlp | Celtic | 421 | 27.6% | lexlep (421) | Broad phonemic (Lexicon Leponticum) |
|
| 222 |
+
| 52 | Proto-Iranian | ira-pro | Indo-Iranian | 366 | 4.6% | wiktionary_cat (366) | Broad phonemic (reconstructed) |
|
| 223 |
+
| 53 | Ancient South Arabian | xsa | Semitic | 127 | 25.2% | wiktionary (127) | Broad phonemic (Musnad abjad) |
|
| 224 |
+
| 54 | Celtiberian | xce | Celtic | 11 | 100.0% | wiktionary_cat (11) | Minimal (very small corpus) |
|
| 225 |
+
| | | | | | | | |
|
| 226 |
+
| **--- Phase 8 Batch 2 (Proto-Languages + Ancient) ---** | | | | | | | |
|
| 227 |
+
| 55 | Meroitic | xmr | Nilo-Saharan | 1,978 | 39.8% | meroitic-corpus (1,978) | Broad phonemic (Rilly 2007) |
|
| 228 |
+
| 56 | Proto-Algonquian | alg-pro | Algic | 258 | 28.7% | wiktionary_cat (258) | Broad phonemic (reconstructed) |
|
| 229 |
+
| 57 | Proto-Albanian | sqj-pro | Albanian | 210 | 43.8% | wiktionary_cat (210) | Broad phonemic (reconstructed) |
|
| 230 |
+
| 58 | Proto-Austroasiatic | aav-pro | Austroasiatic | 180 | 100.0% | wiktionary_cat (180) | Already IPA (Wiktionary provides IPA) |
|
| 231 |
+
| 59 | Proto-Polynesian | poz-pol-pro | Austronesian | 157 | 100.0% | wiktionary_cat (157) | Already IPA (Wiktionary provides IPA) |
|
| 232 |
+
| 60 | Proto-Tai | tai-pro | Kra-Dai | 148 | 0.7% | wiktionary_cat (148) | Broad phonemic (Li 1977) |
|
| 233 |
+
| 61 | Proto-Tocharian | xto-pro | Tocharian | 138 | 22.5% | wiktionary_cat (138) | Broad phonemic (reconstructed) |
|
| 234 |
+
| 62 | Proto-Mongolic | xgn-pro | Mongolic | 126 | 41.3% | wiktionary_cat (126) | Broad phonemic (reconstructed) |
|
| 235 |
+
| 63 | Proto-Oceanic | poz-oce-pro | Austronesian | 114 | 92.1% | wiktionary_cat (114) | Minimal (transparent orthography) |
|
| 236 |
+
| 64 | Moabite | obm | Semitic | 31 | 0.0% | wiktionary_cat (31) | Broad phonemic (Canaanite abjad) |
|
| 237 |
+
| | | | | | | | |
|
| 238 |
+
| **--- Phase 8 Batch 3 (Proto-Languages + Iberian) ---** | | | | | | | |
|
| 239 |
+
| 65 | Proto-Mayan | myn-pro | Mayan | 65 | 20.0% | wiktionary_cat (65) | Broad phonemic (Kaufman 2003) |
|
| 240 |
+
| 66 | Proto-Afroasiatic | afa-pro | Afroasiatic | 48 | 54.2% | wiktionary_cat (48) | Broad phonemic (Ehret 1995) |
|
| 241 |
+
| 67 | Iberian | xib | Isolate | 39 | 74.4% | wiktionary_cat (39) | Partial (undeciphered script) |
|
| 242 |
+
| | | | | | | | |
|
| 243 |
+
| **--- Phase 8 Eblaite ---** | | | | | | | |
|
| 244 |
+
| 68 | Eblaite | xeb | Semitic | 667 | 0.3% | dcclt-ebla (667) | Broad phonemic (Krebernik 1982) |
|
| 245 |
+
|
| 246 |
+
**Total ancient + classical: 170,756 entries across 68 languages | Overall identity rate: ~30%**
|
| 247 |
+
|
| 248 |
+
### Understanding Identity Rate
|
| 249 |
+
|
| 250 |
+
**Identity rate = % of entries where Word == IPA** (no phonetic conversion applied).
|
| 251 |
+
|
| 252 |
+
| Rate | Meaning | Example Languages |
|
| 253 |
+
|------|---------|-------------------|
|
| 254 |
+
| <10% | Excellent IPA conversion | ine-pro (0.2%), dra-pro (7.1%) |
|
| 255 |
+
| 10-30% | Good conversion | peo (10.5%), ave (14.4%), hit (20.3%), ccs-pro (22.2%), txb (25.2%) |
|
| 256 |
+
| 30-50% | Moderate — some chars unmapped | xlc (36.7%), xcr (39.7%), xhu (50.4%) |
|
| 257 |
+
| 50-70% | Partial — significant gaps | xld (53.0%), xur (54.4%), elx (71.1%) |
|
| 258 |
+
| >70% | Minimal — mostly passthrough | cms (88.9%) |
|
| 259 |
+
|
| 260 |
+
**Causes of high identity:**
|
| 261 |
+
- **Cuneiform sign notation** (xur): Uppercase Sumerograms like `LUGAL`, `URU` aren't phonemic — 156 entries in xur
|
| 262 |
+
- **Already-IPA characters** (cms): Some scripts use characters that ARE IPA (θ, ə, ŋ)
|
| 263 |
+
- **Transparent orthography** (elx): Latin letters already map 1:1 to IPA
|
| 264 |
+
- **eDiAna pre-transliterated forms** (xlc, xld): Source provides Latin transliterations that are already close to IPA
|
| 265 |
+
- **Plain ASCII stems** (txb, xto): Short roots like `ak`, `aik` are valid in both orthography and IPA
|
| 266 |
+
|
| 267 |
+
### IPA Quality Categories
|
| 268 |
+
|
| 269 |
+
| Category | Definition | Ancient Languages |
|
| 270 |
+
|----------|-----------|-------------------|
|
| 271 |
+
| **FULL** | >80% WikiPron-sourced IPA | (none — ancient langs don't have WikiPron) |
|
| 272 |
+
| **BROAD PHONEMIC** | Scholarly transliteration → IPA via cited map | hit, uga, phn, ave, peo, ine-pro, sem-pro, ccs-pro, dra-pro, xlw, xhu, ett, txb, xto, xld, xcr, xpg |
|
| 273 |
+
| **PARTIAL** | Some chars converted, gaps remain | xlc, xrr |
|
| 274 |
+
| **MINIMAL** | Mostly identity / transparent orthography | elx, xle, cms |
|
| 275 |
+
| **CUNEIFORM MIXED** | Mix of converted transliterations + unconverted sign names | xur |
|
| 276 |
+
|
| 277 |
+
**Important:** For dead languages, **broad phonemic is the ceiling**. Narrow allophonic IPA is not possible because allophonic variation is unrecoverable from written records. The IPA column represents the best scholarly reconstruction of phonemic values, not actual pronunciation.
|
| 278 |
+
|
| 279 |
+
---
|
| 280 |
+
|
| 281 |
+
## 4. Non-Ancient Languages — Summary
|
| 282 |
+
|
| 283 |
+
- **1,113 languages** with 3,296,156 entries
|
| 284 |
+
- **Dominant source:** WikiPron (85.3% of entries = 2,822,808)
|
| 285 |
+
- **Other sources:** ABVD (6.7%), NorthEuraLex (5.7%), WOLD (1.8%), sinotibetan (0.1%)
|
| 286 |
+
|
| 287 |
+
**WikiPron entries** have true broad phonemic IPA (scraped from Wiktionary pronunciation sections by trained linguists). These are the gold standard.
|
| 288 |
+
|
| 289 |
+
**ABVD entries** are often orthographic (Word == IPA). The `fix_abvd_ipa.py` script applies rule-based G2P conversion for Austronesian languages.
|
| 290 |
+
|
| 291 |
+
---
|
| 292 |
+
|
| 293 |
+
## 5. Source Registry
|
| 294 |
+
|
| 295 |
+
| Source ID | Full Name | Type | URL | Languages Covered |
|
| 296 |
+
|-----------|-----------|------|-----|-------------------|
|
| 297 |
+
| `wikipron` | WikiPron Pronunciation Dictionary | Scraped IPA | `sources/wikipron/` (local) | 800+ modern languages |
|
| 298 |
+
| `abvd` | Austronesian Basic Vocabulary Database | CLDF | `sources/abvd/` (local) | 500+ Austronesian |
|
| 299 |
+
| `northeuralex` | NorthEuraLex | CLDF | `sources/northeuralex/` (local) | 100+ Eurasian |
|
| 300 |
+
| `wold` | World Loanword Database | CLDF | `sources/wold/` (local) | 40+ worldwide |
|
| 301 |
+
| `sinotibetan` | Sino-Tibetan Etymological Database | CLDF | `sources/sinotibetan/` (local) | 50+ Sino-Tibetan |
|
| 302 |
+
| `wiktionary` | Wiktionary (appendix/lemma pages) | Web scrape | `en.wiktionary.org` | All ancient langs |
|
| 303 |
+
| `wiktionary_cat` | Wiktionary (category pagination) | MediaWiki API | `en.wiktionary.org/w/api.php` | ine-pro, uga, peo, ave, dra-pro, sem-pro, ccs-pro, txb, xto |
|
| 304 |
+
| `ediana` | eDiAna (LMU Munich) | POST API | `ediana.gwi.uni-muenchen.de` | xlc, xld, xcr, xlw |
|
| 305 |
+
| `palaeolexicon` | Palaeolexicon | REST API | `palaeolexicon.com/api/Search/` | xlc, xld, xcr, xlw, xhu, ett |
|
| 306 |
+
| `oracc_ecut` | Oracc eCUT (Urartian texts) | JSON API | `oracc.museum.upenn.edu/ecut/` | xur |
|
| 307 |
+
| `tir_raetica` | TIR (Thesaurus Inscriptionum Raeticarum) | Web scrape | `tir.univie.ac.at` | xrr |
|
| 308 |
+
| `wikipedia` | Wikipedia vocabulary tables | Web scrape | `en.wikipedia.org` | xur (supplement) |
|
| 309 |
+
| `avesta_org` | Avesta.org Avestan Dictionary | Web scrape | `avesta.org/avdict/avdict.htm` | ave |
|
| 310 |
+
| `kaikki` | Kaikki Wiktionary Dump | JSON dump | `kaikki.org` | Various |
|
| 311 |
+
| `kellia` | Kellia Coptic Lexicon | XML | `data.copticscriptorium.org` | cop |
|
| 312 |
+
| `ceipom` | CEIPoM (Italian Epigraphy) | CSV | `zenodo.org` (CC BY-SA 4.0) | osc, xum, xve |
|
| 313 |
+
| `edil` | eDIL (Electronic Dict of Irish Lang) | XML | `github.com/e-dil/dil` | sga |
|
| 314 |
+
| `acd` | ACD (Austronesian Comparative Dict) | CLDF | `github.com/lexibank/acd` (CC BY 4.0) | map |
|
| 315 |
+
| `oncoj` | ONCOJ (Oxford-NINJAL OJ Corpus) | XML | `github.com/ONCOJ/data` (CC BY 4.0) | ojp |
|
| 316 |
+
| `diacl` | DiACL (Diachronic Atlas of Comp Ling) | CLDF | `github.com/lexibank/diacl` (CC BY 4.0) | xtg |
|
| 317 |
+
| `iecor` | IE-CoR (IE Cognate Relationships) | CLDF | `github.com/lexibank/iecor` (CC BY 4.0) | sog |
|
| 318 |
+
| `lexlep` | Lexicon Leponticum (Zurich) | Web/CSV | `lexlep.univie.ac.at` | xlp |
|
| 319 |
+
| `meroitic-corpus` | Meroitic Language Corpus (GitHub) | JSON/CSV | `github.com/MeroiticLanguage/Meroitic-Corpus` | xmr |
|
| 320 |
+
| `dcclt-ebla` | DCCLT/Ebla (ORACC) | JSON ZIP | `oracc.museum.upenn.edu/dcclt-ebla/` (CC0) | xeb |
|
| 321 |
+
|
| 322 |
+
---
|
| 323 |
+
|
| 324 |
+
## 6. IPA & Phonetic Processing Pipeline
|
| 325 |
+
|
| 326 |
+
### Pipeline Architecture
|
| 327 |
+
|
| 328 |
+
```
|
| 329 |
+
Source Data (Word column)
|
| 330 |
+
↓
|
| 331 |
+
transliterate(word, iso) ← scripts/transliteration_maps.py
|
| 332 |
+
↓ (greedy longest-match, NFC-normalized)
|
| 333 |
+
IPA string (broad phonemic)
|
| 334 |
+
↓
|
| 335 |
+
ipa_to_sound_class(ipa) ← cognate_pipeline/.../sound_class.py
|
| 336 |
+
↓ (tokenize → segment_to_class → join)
|
| 337 |
+
SCA string (e.g., "PATA")
|
| 338 |
+
```
|
| 339 |
+
|
| 340 |
+
### IPA Generation Methods (by source type)
|
| 341 |
+
|
| 342 |
+
| Source | IPA Method | Quality |
|
| 343 |
+
|--------|-----------|---------|
|
| 344 |
+
| WikiPron | Pre-extracted from Wiktionary pronunciation | True broad IPA |
|
| 345 |
+
| Wiktionary (ancient) | `transliterate(word, iso)` via language-specific map | Broad phonemic |
|
| 346 |
+
| ABVD | Orthographic passthrough → `fix_abvd_ipa.py` G2P | Variable |
|
| 347 |
+
| eDiAna | `transliterate(word, iso)` | Broad phonemic |
|
| 348 |
+
| Palaeolexicon | Source IPA if available, else `transliterate()` | Broad phonemic |
|
| 349 |
+
| Oracc | `transliterate(word, iso)` | Partial (cuneiform) |
|
| 350 |
+
| NorthEuraLex/WOLD | CLDF Segments column → joined IPA | Good |
|
| 351 |
+
|
| 352 |
+
### Never-Regress Re-processing Rule
|
| 353 |
+
|
| 354 |
+
When re-applying transliteration maps to existing data (`scripts/reprocess_ipa.py`):
|
| 355 |
+
|
| 356 |
+
```python
|
| 357 |
+
candidate_ipa = transliterate(word, iso)
|
| 358 |
+
|
| 359 |
+
if candidate_ipa != word:
|
| 360 |
+
final_ipa = candidate_ipa # New map converts — use it
|
| 361 |
+
elif old_ipa != word:
|
| 362 |
+
final_ipa = old_ipa # New map can't, but old was good — keep
|
| 363 |
+
else:
|
| 364 |
+
final_ipa = word # Both identity — nothing to do
|
| 365 |
+
```
|
| 366 |
+
|
| 367 |
+
**This ensures:** IPA quality can only improve or stay the same. It never regresses.
|
| 368 |
+
|
| 369 |
+
---
|
| 370 |
+
|
| 371 |
+
## 7. Transliteration Maps System
|
| 372 |
+
|
| 373 |
+
**File:** `scripts/transliteration_maps.py` (~800 lines)
|
| 374 |
+
|
| 375 |
+
### How It Works
|
| 376 |
+
|
| 377 |
+
Each ancient language has a `Dict[str, str]` mapping scholarly transliteration conventions to broad IPA. The `transliterate()` function applies these via **greedy longest-match**: keys sorted by descending length, first match consumed at each position.
|
| 378 |
+
|
| 379 |
+
### Map Registry (updated 2026-03-13 — 180+ new rules across 13 original maps + 15 new maps in Phases 6-7 + 24 new maps in Phase 8)
|
| 380 |
+
|
| 381 |
+
| ISO | Language | Keys | Academic Reference |
|
| 382 |
+
|-----|----------|------|--------------------|
|
| 383 |
+
| `hit` | Hittite | 49 | Hoffner & Melchert (2008) — added š, ḫ, macron vowels |
|
| 384 |
+
| `uga` | Ugaritic | 68 | Tropper (2000) — added ʾ, macron/circumflex vowels, ḫ, ṣ, Ugaritic script (U+10380-1039F) |
|
| 385 |
+
| `phn` | Phoenician | 23 | Standard 22-letter abjad |
|
| 386 |
+
| `xur` | Urartian | 27 | Wegner (2007) — added ṣ, ṭ, y, w, ə, ʾ |
|
| 387 |
+
| `elx` | Elamite | 19 | Grillot-Susini (1987), Stolper (2004) |
|
| 388 |
+
| `xlc` | Lycian | 33 | Melchert (2004) — added x, j, o, long vowels |
|
| 389 |
+
| `xld` | Lydian | 38 | Gusmani (1964), Melchert — added ã, ẽ, ũ (nasalized vowels), c, h, z, x |
|
| 390 |
+
| `xcr` | Carian | 35 | Adiego (2007) — added β, z, v, j, f, ŋ, ĺ, ỳ, ý |
|
| 391 |
+
| `ave` | Avestan | 97 | Hoffmann & Forssman (1996) + Unicode 5.2 (U+10B00-10B3F) |
|
| 392 |
+
| `peo` | Old Persian | 68 | Kent (1953) — added z, č, Old Persian cuneiform syllabary (U+103A0-103C3, 31 signs) |
|
| 393 |
+
| `ine` | Proto-Indo-European | 61 | Fortson (2010), Beekes (2011) — added ḗ, ṓ, morpheme boundaries, accented syllabic sonorants |
|
| 394 |
+
| `sem` | Proto-Semitic | 44 | Huehnergard (2019) |
|
| 395 |
+
| `ccs` | Proto-Kartvelian | 66 | Klimov (1998) — added s₁/z₁/c₁/ʒ₁ subscript series, morpheme boundaries |
|
| 396 |
+
| `dra` | Proto-Dravidian | 49 | Krishnamurti (2003) |
|
| 397 |
+
| `xpg` | Phrygian | 55 | Brixhe & Lejeune (1984), Obrador-Cursach (2020) — added Greek alphabet support (22 letters) |
|
| 398 |
+
| `xle` | Lemnian | 24 | Greek-alphabet reconstruction |
|
| 399 |
+
| `xrr` | Rhaetic | 26 | North Italic alphabet reconstruction |
|
| 400 |
+
| `cms` | Messapic | 25 | Greek-alphabet reconstruction |
|
| 401 |
+
| `xlw` | Luwian | 39 | Melchert (2003), Yakubovich (2010) |
|
| 402 |
+
| `xhu` | Hurrian | 31 | Wegner (2007), Wilhelm (2008) |
|
| 403 |
+
| `ett` | Etruscan | 61 | Bonfante & Bonfante (2002), Rix (1963) + Old Italic Unicode — added z, o, d, g, b, q, σ→s |
|
| 404 |
+
| `txb`/`xto` | Tocharian A/B | 35 | Krause & Thomas (1960), Adams (2013), Peyrot (2008) — added retroflex series (ṭ, ḍ, ṇ, ḷ) |
|
| 405 |
+
| | | | |
|
| 406 |
+
| **--- Phase 6: Tier 2 Maps ---** | | | |
|
| 407 |
+
| `cop` | Coptic | 40+ | Layton (2000), Loprieno (1995) — Sahidic dialect |
|
| 408 |
+
| `pli` | Pali (IAST) | 30+ | Geiger (1943), Oberlies (2001) |
|
| 409 |
+
| `xcl` | Old Armenian | 40+ | Meillet (1913), Schmitt (1981) |
|
| 410 |
+
| `ang` | Old English | 30+ | Hogg (1992), Campbell (1959) |
|
| 411 |
+
| `gez` | Ge'ez (Ethiopic) | 50+ | Dillmann (1857), Tropper (2002) |
|
| 412 |
+
| `hbo` | Biblical Hebrew | 40+ | Blau (2010), Khan (2020) |
|
| 413 |
+
| | | | |
|
| 414 |
+
| **--- Phase 7: Tier 3 + Proto Maps ---** | | | |
|
| 415 |
+
| `osc` | Oscan | 12 | CEIPoM Standard_aligned conventions |
|
| 416 |
+
| `xum` | Umbrian | 12 | CEIPoM Standard_aligned conventions |
|
| 417 |
+
| `xve` | Venetic | 6 | CEIPoM Token_clean conventions |
|
| 418 |
+
| `sga` | Old Irish | 25 | Thurneysen (1946), Stifter (2006) — lenition + macron vowels |
|
| 419 |
+
| `xeb` | Eblaite | 20 | Standard Semitist notation |
|
| 420 |
+
| `nci` | Classical Nahuatl | 15 | Andrews (2003), Launey (2011) |
|
| 421 |
+
| `ojp` | Old Japanese | 20 | Frellesvig (2010), ONCOJ conventions |
|
| 422 |
+
| `pal` | Middle Persian | 25 | MacKenzie (1971), Skjærvø (2009) |
|
| 423 |
+
| `sog` | Sogdian | 25 | Gharib (1995), Sims-Williams (2000) |
|
| 424 |
+
| `xtg` | Gaulish | 15 | Delamarre (2003) |
|
| 425 |
+
| `gem-pro` | Proto-Germanic | 20 | Ringe (2006), Kroonen (2013) |
|
| 426 |
+
| `cel-pro` | Proto-Celtic | 15 | Matasović (2009) |
|
| 427 |
+
| `urj-pro` | Proto-Uralic | 12 | Sammallahti (1988), Janhunen (1981) |
|
| 428 |
+
| `bnt-pro` | Proto-Bantu | 20 | Bastin et al. (2002), Meeussen (1967) |
|
| 429 |
+
| `sit-pro` | Proto-Sino-Tibetan | 18 | Matisoff (2003), Sagart (2004) |
|
| 430 |
+
| | | | |
|
| 431 |
+
| **--- Phase 8 Maps ---** | | | |
|
| 432 |
+
| `sla-pro` | Proto-Slavic | 25+ | Shevelov (1964), Holzer (2007) |
|
| 433 |
+
| `trk-pro` | Proto-Turkic | 20+ | Clauson (1972), Róna-Tas (1991) |
|
| 434 |
+
| `itc-pro` | Proto-Italic | 15+ | Meiser (1998), Bakkum (2009) |
|
| 435 |
+
| `jpx-pro` | Proto-Japonic | 15+ | Vovin (2005), Frellesvig (2010) |
|
| 436 |
+
| `ira-pro` | Proto-Iranian | 20+ | Cheung (2007), Lubotsky (2001) |
|
| 437 |
+
| `xfa` | Faliscan | 12 | CEIPoM Standard_aligned conventions |
|
| 438 |
+
| `xlp` | Lepontic | 25 | Lexicon Leponticum (Stifter et al.) |
|
| 439 |
+
| `xce` | Celtiberian | 15+ | De Bernardo Stempel (1999) |
|
| 440 |
+
| `xsa` | Ancient South Arabian | 30+ | Stein (2003), Beeston (1984) |
|
| 441 |
+
| `alg-pro` | Proto-Algonquian | 15+ | Bloomfield (1946), Goddard (1994) |
|
| 442 |
+
| `sqj-pro` | Proto-Albanian | 15+ | Orel (1998), Demiraj (1997) |
|
| 443 |
+
| `aav-pro` | Proto-Austroasiatic | 10+ | Shorto (2006), Sidwell (2015) |
|
| 444 |
+
| `poz-pol-pro` | Proto-Polynesian | 10+ | Biggs (1978), Pawley (1966) |
|
| 445 |
+
| `tai-pro` | Proto-Tai | 20+ | Li (1977), Pittayaporn (2009) |
|
| 446 |
+
| `xto-pro` | Proto-Tocharian | 15+ | Adams (2013), Peyrot (2008) |
|
| 447 |
+
| `poz-oce-pro` | Proto-Oceanic | 10+ | Ross et al. (1998, 2003, 2008) |
|
| 448 |
+
| `xgn-pro` | Proto-Mongolic | 15+ | Poppe (1955), Nugteren (2011) |
|
| 449 |
+
| `xmr` | Meroitic | 30+ | Rilly (2007), Griffith (1911) |
|
| 450 |
+
| `obm` | Moabite | 22 | Canaanite abjad (shares Phoenician map base) |
|
| 451 |
+
| `myn-pro` | Proto-Mayan | 20+ | Kaufman (2003), Campbell & Kaufman (1985) |
|
| 452 |
+
| `afa-pro` | Proto-Afroasiatic | 15+ | Ehret (1995), Orel & Stolbova (1995) |
|
| 453 |
+
| `xib` | Iberian | 25+ | De Hoz (2010), Untermann (1990) |
|
| 454 |
+
| `xeb` | Eblaite | 20+ | Krebernik (1982), Fronzaroli (2003) |
|
| 455 |
+
|
| 456 |
+
### NFC Normalization
|
| 457 |
+
|
| 458 |
+
All map keys and input text are NFC-normalized before comparison. This ensures `š` (U+0161, composed) matches `s` + combining caron (U+0073 + U+030C, decomposed). Cache is per-ISO to prevent cross-language leakage.
|
| 459 |
+
|
| 460 |
+
### ISO Code Mapping for Proto-Languages
|
| 461 |
+
|
| 462 |
+
TSV filenames use hyphenated codes but `ALL_MAPS` uses short codes:
|
| 463 |
+
|
| 464 |
+
| TSV filename ISO | Map ISO |
|
| 465 |
+
|-----------------|---------|
|
| 466 |
+
| `ine-pro` | `ine` |
|
| 467 |
+
| `sem-pro` | `sem` |
|
| 468 |
+
| `ccs-pro` | `ccs` |
|
| 469 |
+
| `dra-pro` | `dra` |
|
| 470 |
+
| `gem-pro` | `gem-pro` |
|
| 471 |
+
| `cel-pro` | `cel-pro` |
|
| 472 |
+
| `urj-pro` | `urj-pro` |
|
| 473 |
+
| `bnt-pro` | `bnt-pro` |
|
| 474 |
+
| `sit-pro` | `sit-pro` |
|
| 475 |
+
|
| 476 |
+
### Adding a New Map
|
| 477 |
+
|
| 478 |
+
1. Add the `Dict[str, str]` constant (e.g., `NEW_LANG_MAP`) with cited reference
|
| 479 |
+
2. Register in `ALL_MAPS`: `"iso_code": NEW_LANG_MAP`
|
| 480 |
+
3. Clear `_nfc_cache` implicitly (happens on next call with new ISO)
|
| 481 |
+
4. Run `reprocess_ipa.py --language iso_code` to apply
|
| 482 |
+
5. Deploy adversarial auditor to verify
|
| 483 |
+
|
| 484 |
+
---
|
| 485 |
+
|
| 486 |
+
## 8. Sound Class (SCA) System
|
| 487 |
+
|
| 488 |
+
**File:** `cognate_pipeline/src/cognate_pipeline/normalise/sound_class.py`
|
| 489 |
+
|
| 490 |
+
### Class Inventory
|
| 491 |
+
|
| 492 |
+
| Class | IPA Segments | Description |
|
| 493 |
+
|-------|-------------|-------------|
|
| 494 |
+
| A | a, ɑ, æ, ɐ | Open vowels |
|
| 495 |
+
| E | e, ɛ, ə, ɘ, ø, œ | Mid vowels |
|
| 496 |
+
| I | i, ɪ, ɨ | Close front vowels |
|
| 497 |
+
| O | o, ɔ, ɵ | Mid back vowels |
|
| 498 |
+
| U | u, ʊ, ʉ, ɯ, y | Close back vowels |
|
| 499 |
+
| P/B | p, b, ɸ, β | Labial stops |
|
| 500 |
+
| T/D | t, d, ʈ, ɖ | Coronal stops |
|
| 501 |
+
| K/G | k, g, ɡ, q, ɢ | Dorsal stops |
|
| 502 |
+
| S | s, z, ʃ, ʒ, ɕ, ʑ, f, v, θ, ð, x, ɣ, χ, ts, dz, tʃ, dʒ | Fricatives + affricates |
|
| 503 |
+
| M/N | m, n, ɲ, ŋ, ɳ, ɴ | Nasals |
|
| 504 |
+
| L/R | l, ɫ, ɭ, ɬ, r, ɾ, ɽ, ʀ, ɹ, ʁ | Liquids |
|
| 505 |
+
| W/Y | w, ʋ, ɰ, j | Glides |
|
| 506 |
+
| H | ʔ, h, ɦ, ʕ, ħ | Glottals/pharyngeals |
|
| 507 |
+
| 0 | (anything unmapped) | Unknown |
|
| 508 |
+
|
| 509 |
+
### Processing Chain
|
| 510 |
+
|
| 511 |
+
```python
|
| 512 |
+
ipa_to_sound_class("paxːur")
|
| 513 |
+
→ tokenize_ipa("paxːur") → ["p", "a", "xː", "u", "r"]
|
| 514 |
+
→ [segment_to_class(s) for s in segments] → ["P", "A", "K", "U", "R"]
|
| 515 |
+
→ "PAKUR"
|
| 516 |
+
```
|
| 517 |
+
|
| 518 |
+
---
|
| 519 |
+
|
| 520 |
+
## 9. Scripts & Data Flow
|
| 521 |
+
|
| 522 |
+
### Data Flow Diagram
|
| 523 |
+
|
| 524 |
+
```
|
| 525 |
+
EXTERNAL SOURCES
|
| 526 |
+
├── Wiktionary API ──────────→ extract_ave_peo_xpg.py
|
| 527 |
+
│ extract_phn_elx.py
|
| 528 |
+
│ extract_pie_urartian.py
|
| 529 |
+
│ extract_wiktionary_lexicons.py
|
| 530 |
+
│ expand_wiktionary_categories.py
|
| 531 |
+
│ expand_xpg.py
|
| 532 |
+
├── eDiAna API ──────────────→ scrape_ediana.py
|
| 533 |
+
├── Palaeolexicon API ───────→ scrape_palaeolexicon.py
|
| 534 |
+
├── Oracc JSON API ──────────→ scrape_oracc_urartian.py
|
| 535 |
+
├── avesta.org ──────────────→ scrape_avesta_org.py
|
| 536 |
+
├── TIR (Vienna) ────────────→ scrape_tir_rhaetic.py
|
| 537 |
+
├── WikiPron TSVs ───────────→ ingest_wikipron.py
|
| 538 |
+
└── CLDF Sources ────────────→ expand_cldf_full.py
|
| 539 |
+
convert_cldf_to_tsv.py
|
| 540 |
+
↓
|
| 541 |
+
data/training/lexicons/{iso}.tsv
|
| 542 |
+
↓
|
| 543 |
+
normalize_lexicons.py (NFC, dedup, strip stress)
|
| 544 |
+
reprocess_ipa.py (re-apply updated transliteration maps)
|
| 545 |
+
fix_abvd_ipa.py (Austronesian G2P fix)
|
| 546 |
+
↓
|
| 547 |
+
assemble_lexicons.py → metadata/languages.tsv
|
| 548 |
+
assign_cognate_links.py → cognate_pairs/*.tsv
|
| 549 |
+
build_validation_sets.py → validation/*.tsv
|
| 550 |
+
```
|
| 551 |
+
|
| 552 |
+
### Script Quick Reference
|
| 553 |
+
|
| 554 |
+
| Script | Purpose | Languages |
|
| 555 |
+
|--------|---------|-----------|
|
| 556 |
+
| `extract_ave_peo_xpg.py` | Wiktionary Swadesh + category | ave, peo, xpg |
|
| 557 |
+
| `extract_phn_elx.py` | Wiktionary + appendix | phn, elx |
|
| 558 |
+
| `extract_pie_urartian.py` | Wiktionary + Wikipedia | ine-pro, xur |
|
| 559 |
+
| `extract_wiktionary_lexicons.py` | Wiktionary appendix | sem-pro, ccs-pro, dra-pro, xle |
|
| 560 |
+
| `extract_anatolian_lexicons.py` | Multi-source | xlc, xld, xcr |
|
| 561 |
+
| `expand_wiktionary_categories.py` | Wiktionary category pagination | ine-pro, uga, peo, ave, dra-pro, sem-pro, ccs-pro |
|
| 562 |
+
| `expand_xpg.py` | Wiktionary category + appendix | xpg |
|
| 563 |
+
| `scrape_ediana.py` | eDiAna POST API | xlc, xld, xcr, xlw |
|
| 564 |
+
| `scrape_palaeolexicon.py` | Palaeolexicon REST API | xlc, xld, xcr, xlw, xhu, ett |
|
| 565 |
+
| `scrape_avesta.py` | avesta.org (old, superseded) | ave |
|
| 566 |
+
| `scrape_avesta_org.py` | avesta.org dictionary (current, adversarial-audited) | ave |
|
| 567 |
+
| `scrape_oracc_urartian.py` | Oracc eCUT JSON API | xur |
|
| 568 |
+
| `scrape_tir_rhaetic.py` | TIR web scrape | xrr |
|
| 569 |
+
| `ingest_wikipron.py` | WikiPron TSV ingestion | 800+ modern |
|
| 570 |
+
| `expand_cldf_full.py` | CLDF full extraction | All CLDF languages |
|
| 571 |
+
| `reprocess_ipa.py` | Re-apply transliteration maps | 23 ancient |
|
| 572 |
+
| `fix_abvd_ipa.py` | G2P for Austronesian | ABVD languages |
|
| 573 |
+
| `normalize_lexicons.py` | NFC + dedup + SCA recompute | All |
|
| 574 |
+
| `assemble_lexicons.py` | Generate metadata | All |
|
| 575 |
+
| `ingest_wiktionary_tier2.py` | Wiktionary category ingestion (Tier 2+) | Phase 6-8 Wiktionary languages |
|
| 576 |
+
| `fetch_wiktionary_raw.py` | Fetch raw Wiktionary category JSON | Phase 6-8 Wiktionary languages |
|
| 577 |
+
| `ingest_dcclt_ebla.py` | ORACC DCCLT/Ebla extraction | xeb |
|
| 578 |
+
| `ingest_meroitic.py` | Meroitic Language Corpus | xmr |
|
| 579 |
+
| `ingest_lexlep.py` | Lexicon Leponticum extraction | xlp |
|
| 580 |
+
| `ingest_ceipom_italic.py` | CEIPoM italic epigraphy | osc, xum, xve, xfa |
|
| 581 |
+
| `update_metadata.py` | Update languages.tsv from disk | All |
|
| 582 |
+
| `validate_all.py` | Comprehensive TSV validation | All |
|
| 583 |
+
| `push_to_hf.py` | Push files to HuggingFace | All Phase 6-8 |
|
| 584 |
+
|
| 585 |
+
---
|
| 586 |
+
|
| 587 |
+
## 10. PRD: Adding New Data to Existing Languages
|
| 588 |
+
|
| 589 |
+
### Prerequisites
|
| 590 |
+
|
| 591 |
+
- The language already has a TSV file in `data/training/lexicons/`
|
| 592 |
+
- You have identified a new external source with verifiable data
|
| 593 |
+
- A transliteration map exists in `transliteration_maps.py` (if ancient)
|
| 594 |
+
|
| 595 |
+
### Step-by-Step
|
| 596 |
+
|
| 597 |
+
#### Step 1: Identify Source
|
| 598 |
+
- Find a publicly accessible online source (API, web page, database)
|
| 599 |
+
- Verify it returns real lexical data (not AI-generated)
|
| 600 |
+
- Document the URL, API format, and expected entry count
|
| 601 |
+
|
| 602 |
+
#### Step 2: Write Extraction Script
|
| 603 |
+
```python
|
| 604 |
+
# Template: scripts/scrape_{source}_{iso}.py
|
| 605 |
+
#!/usr/bin/env python3
|
| 606 |
+
"""Scrape {Source Name} for {Language} word lists.
|
| 607 |
+
Source: {URL}
|
| 608 |
+
"""
|
| 609 |
+
import urllib.request # MANDATORY — proves data comes from HTTP
|
| 610 |
+
...
|
| 611 |
+
|
| 612 |
+
def fetch_data(url):
|
| 613 |
+
"""Fetch from external source."""
|
| 614 |
+
req = urllib.request.Request(url, headers={"User-Agent": "..."})
|
| 615 |
+
with urllib.request.urlopen(req) as resp:
|
| 616 |
+
return json.loads(resp.read())
|
| 617 |
+
|
| 618 |
+
def process_language(iso, config, dry_run=False):
|
| 619 |
+
"""Process and deduplicate."""
|
| 620 |
+
existing = load_existing_words(tsv_path) # MUST deduplicate
|
| 621 |
+
entries = fetch_data(url)
|
| 622 |
+
new_entries = [e for e in entries if e["word"] not in existing]
|
| 623 |
+
...
|
| 624 |
+
# Apply transliteration
|
| 625 |
+
ipa = transliterate(word, iso)
|
| 626 |
+
sca = ipa_to_sound_class(ipa)
|
| 627 |
+
f.write(f"{word}\t{ipa}\t{sca}\t{source_id}\t{concept_id}\t-\n")
|
| 628 |
+
```
|
| 629 |
+
|
| 630 |
+
**Critical:** Script MUST contain `urllib.request.urlopen()`, `requests.get()`, or equivalent HTTP fetch. No hardcoded word lists.
|
| 631 |
+
|
| 632 |
+
#### Step 3: Run with --dry-run
|
| 633 |
+
```bash
|
| 634 |
+
python scripts/scrape_new_source.py --dry-run --language {iso}
|
| 635 |
+
```
|
| 636 |
+
|
| 637 |
+
#### Step 4: Run Live
|
| 638 |
+
```bash
|
| 639 |
+
python scripts/scrape_new_source.py --language {iso}
|
| 640 |
+
```
|
| 641 |
+
|
| 642 |
+
#### Step 5: Re-process IPA (if map was updated)
|
| 643 |
+
```bash
|
| 644 |
+
python scripts/reprocess_ipa.py --language {iso}
|
| 645 |
+
```
|
| 646 |
+
|
| 647 |
+
#### Step 6: Deploy Adversarial Auditor
|
| 648 |
+
See [Section 13](#13-adversarial-review-protocol).
|
| 649 |
+
|
| 650 |
+
#### Step 7: Commit & Push to Both Repos
|
| 651 |
+
```bash
|
| 652 |
+
# GitHub
|
| 653 |
+
git add scripts/scrape_new_source.py data/training/lexicons/{iso}.tsv
|
| 654 |
+
git commit -m "Add {N} entries to {Language} from {Source}"
|
| 655 |
+
git push
|
| 656 |
+
|
| 657 |
+
# HuggingFace (MANDATORY — HF is the primary data host)
|
| 658 |
+
python -c "
|
| 659 |
+
from huggingface_hub import HfApi
|
| 660 |
+
api = HfApi()
|
| 661 |
+
for f in ['data/training/lexicons/{iso}.tsv', 'scripts/scrape_new_source.py']:
|
| 662 |
+
api.upload_file(path_or_fileobj=f, path_in_repo=f,
|
| 663 |
+
repo_id='PhaistosLabs/ancient-scripts-datasets', repo_type='dataset',
|
| 664 |
+
commit_message='Add {N} entries to {Language} from {Source}')
|
| 665 |
+
"
|
| 666 |
+
```
|
| 667 |
+
|
| 668 |
+
---
|
| 669 |
+
|
| 670 |
+
## 11. PRD: Adding New Languages
|
| 671 |
+
|
| 672 |
+
### Prerequisites
|
| 673 |
+
|
| 674 |
+
- ISO 639-3 code identified
|
| 675 |
+
- At least one external source with verifiable word lists
|
| 676 |
+
- Script conventions for the relevant writing system understood
|
| 677 |
+
|
| 678 |
+
### Step-by-Step
|
| 679 |
+
|
| 680 |
+
#### Step 1: Create Transliteration Map (if needed)
|
| 681 |
+
|
| 682 |
+
Add to `scripts/transliteration_maps.py`:
|
| 683 |
+
|
| 684 |
+
```python
|
| 685 |
+
# ---------------------------------------------------------------------------
|
| 686 |
+
# N. NEW_LANGUAGE (Author Year, "Title")
|
| 687 |
+
# ---------------------------------------------------------------------------
|
| 688 |
+
NEW_LANGUAGE_MAP: Dict[str, str] = {
|
| 689 |
+
"a": "a", "b": "b", ...
|
| 690 |
+
# Every key MUST have a cited academic reference
|
| 691 |
+
}
|
| 692 |
+
```
|
| 693 |
+
|
| 694 |
+
Register in `ALL_MAPS`:
|
| 695 |
+
```python
|
| 696 |
+
ALL_MAPS = {
|
| 697 |
+
...
|
| 698 |
+
"new_iso": NEW_LANGUAGE_MAP,
|
| 699 |
+
}
|
| 700 |
+
```
|
| 701 |
+
|
| 702 |
+
#### Step 2: Write Extraction Script
|
| 703 |
+
|
| 704 |
+
Follow the template in [Section 10](#10-prd-adding-new-data). The script must:
|
| 705 |
+
- Fetch from an external source via HTTP
|
| 706 |
+
- Parse the response (HTML, JSON, XML)
|
| 707 |
+
- Apply `transliterate()` and `ipa_to_sound_class()`
|
| 708 |
+
- Write to `data/training/lexicons/{iso}.tsv`
|
| 709 |
+
- Save raw JSON to `data/training/raw/` for audit trail
|
| 710 |
+
- Deduplicate by Word column
|
| 711 |
+
|
| 712 |
+
#### Step 3: Add to Language Config (optional)
|
| 713 |
+
|
| 714 |
+
If the language will be part of the ancient languages pipeline, add to `scripts/language_configs.py`.
|
| 715 |
+
|
| 716 |
+
#### Step 4: Add to Re-processing List
|
| 717 |
+
|
| 718 |
+
Add the ISO code to `ANCIENT_LANGUAGES` in `scripts/reprocess_ipa.py` and to `ISO_TO_MAP_ISO` if the TSV filename differs from the map ISO.
|
| 719 |
+
|
| 720 |
+
#### Step 5: Run Extraction
|
| 721 |
+
```bash
|
| 722 |
+
python scripts/scrape_{source}.py --language {iso} --dry-run
|
| 723 |
+
python scripts/scrape_{source}.py --language {iso}
|
| 724 |
+
```
|
| 725 |
+
|
| 726 |
+
#### Step 6: Verify
|
| 727 |
+
|
| 728 |
+
```bash
|
| 729 |
+
# Check entry count and IPA quality
|
| 730 |
+
python scripts/reprocess_ipa.py --dry-run --language {iso}
|
| 731 |
+
```
|
| 732 |
+
|
| 733 |
+
#### Step 7: Deploy Adversarial Auditor
|
| 734 |
+
|
| 735 |
+
See [Section 13](#13-adversarial-review-protocol).
|
| 736 |
+
|
| 737 |
+
#### Step 8: Commit and Push
|
| 738 |
+
|
| 739 |
+
---
|
| 740 |
+
|
| 741 |
+
## 12. Data Acquisition Rules (Iron Law)
|
| 742 |
+
|
| 743 |
+
```
|
| 744 |
+
┌─────────────────────────────────────────────────────────────────────┐
|
| 745 |
+
│ DATA MAY ONLY ENTER THE DATASET THROUGH CODE THAT DOWNLOADS IT │
|
| 746 |
+
│ FROM AN EXTERNAL SOURCE. │
|
| 747 |
+
│ │
|
| 748 |
+
│ NO EXCEPTIONS. NO "JUST THIS ONCE." NO "IT'S FASTER." │
|
| 749 |
+
└─────────────────────────────────────────────────────────────────────┘
|
| 750 |
+
```
|
| 751 |
+
|
| 752 |
+
### What IS Allowed
|
| 753 |
+
|
| 754 |
+
| Action | Example | Why OK |
|
| 755 |
+
|--------|---------|--------|
|
| 756 |
+
| Write a script with `urllib.request.urlopen()` | `scrape_palaeolexicon.py` | Data comes from HTTP |
|
| 757 |
+
| Parse HTML/JSON from downloaded content | `BeautifulSoup(html)` | Deterministic extraction |
|
| 758 |
+
| Apply transliteration map (CODE, not DATA) | `transliterate(word, "hit")` | Transformation rules are code |
|
| 759 |
+
| Re-compute SCA from IPA | `ipa_to_sound_class(ipa)` | Deterministic function |
|
| 760 |
+
|
| 761 |
+
### What is FORBIDDEN
|
| 762 |
+
|
| 763 |
+
| Action | Example | Why Forbidden |
|
| 764 |
+
|--------|---------|---------------|
|
| 765 |
+
| Write data rows directly | `f.write("water\twɔːtər\t...")` | Data authoring |
|
| 766 |
+
| Hardcode word lists from memory | `WORDS = [("fire", "paxːur")]` | LLM knowledge ≠ source |
|
| 767 |
+
| Fill in missing fields with guesses | `ipa = "probably θ"` | Hallucination risk |
|
| 768 |
+
| Generate translations/transcriptions | `ipa = "wɔːtər" # I know how water sounds` | Not from a source |
|
| 769 |
+
| Pad entries to reach a target count | Adding 13 entries to make it 200 | Fabrication |
|
| 770 |
+
|
| 771 |
+
### The Cached-Fetch Pattern (Acceptable Gray Area)
|
| 772 |
+
|
| 773 |
+
If a source requires JavaScript rendering or CAPTCHAs:
|
| 774 |
+
1. Use WebFetch/browser to access the source
|
| 775 |
+
2. Save raw content to `data/training/raw/{source}_{iso}_{date}.html`
|
| 776 |
+
3. Write a parsing script that reads from the saved file
|
| 777 |
+
4. The auditor spot-checks 5 entries against the live source
|
| 778 |
+
|
| 779 |
+
### Transliteration Maps Are CODE, Not DATA
|
| 780 |
+
|
| 781 |
+
Transliteration maps (e.g., `"š": "ʃ"`) are **transformation rules** derived from published grammars, not lexical content. Adding or modifying map entries is a code change, not data authoring. However, every map entry MUST cite an academic reference.
|
| 782 |
+
|
| 783 |
+
---
|
| 784 |
+
|
| 785 |
+
## 13. Adversarial Review Protocol
|
| 786 |
+
|
| 787 |
+
### Architecture: Dual-Agent System
|
| 788 |
+
|
| 789 |
+
```
|
| 790 |
+
Team A (Extraction Agent) Team B (Adversarial Auditor)
|
| 791 |
+
├── Writes code ├── Reviews code
|
| 792 |
+
├── Runs scripts ├── Spot-checks output
|
| 793 |
+
├── Produces TSV data ├── Verifies provenance
|
| 794 |
+
└── NEVER writes data └── Has VETO POWER
|
| 795 |
+
directly
|
| 796 |
+
```
|
| 797 |
+
|
| 798 |
+
### When to Deploy
|
| 799 |
+
|
| 800 |
+
- After ANY new data is added to the database
|
| 801 |
+
- After ANY transliteration map change
|
| 802 |
+
- After ANY re-processing run
|
| 803 |
+
- After ANY script modification that affects output
|
| 804 |
+
|
| 805 |
+
### Audit Checklist (per modular step)
|
| 806 |
+
|
| 807 |
+
#### Code Review
|
| 808 |
+
- [ ] Script contains `urllib`/`requests`/`curl` (not hardcoded data)
|
| 809 |
+
- [ ] No literal IPA data in `f.write()` calls
|
| 810 |
+
- [ ] Source attribution matches actual source
|
| 811 |
+
- [ ] Deduplication against existing entries
|
| 812 |
+
|
| 813 |
+
#### Data Quality
|
| 814 |
+
- [ ] Entry count is non-round and plausible
|
| 815 |
+
- [ ] No duplicate Word values
|
| 816 |
+
- [ ] No empty IPA fields
|
| 817 |
+
- [ ] Identity rate is explainable (not suspiciously low or high)
|
| 818 |
+
- [ ] SCA matches `ipa_to_sound_class(IPA)` for 20 random samples
|
| 819 |
+
|
| 820 |
+
#### Never-Regress Verification
|
| 821 |
+
- [ ] No entry went from non-identity IPA to identity (regression)
|
| 822 |
+
- [ ] Entry counts did not decrease
|
| 823 |
+
- [ ] Existing Word/Source/Concept_ID/Cognate_Set_ID unchanged
|
| 824 |
+
|
| 825 |
+
#### Provenance
|
| 826 |
+
- [ ] 20 random entries traced back to source URL
|
| 827 |
+
- [ ] Raw JSON/HTML audit trail saved in `data/training/raw/`
|
| 828 |
+
|
| 829 |
+
### Red Flags (STOP immediately)
|
| 830 |
+
|
| 831 |
+
| Red Flag | What It Means |
|
| 832 |
+
|----------|---------------|
|
| 833 |
+
| No `urllib`/`requests` in extraction code | Agent is authoring data |
|
| 834 |
+
| Entry count is exactly round (100, 200, 500) | Likely padded |
|
| 835 |
+
| >90% of entries have empty required fields | Extraction didn't work |
|
| 836 |
+
| Script contains `f.write("word\tipa\t...")` with literal data | Direct data authoring |
|
| 837 |
+
| Transformation output == input for >80% without cited justification | Map not actually applied |
|
| 838 |
+
|
| 839 |
+
### Report Format
|
| 840 |
+
|
| 841 |
+
```markdown
|
| 842 |
+
# Adversarial Audit: {Step} — {Language} ({iso})
|
| 843 |
+
## Checks:
|
| 844 |
+
- [ ] No data authoring: PASS/FAIL
|
| 845 |
+
- [ ] Entry count: PASS/FAIL (expected X, got Y)
|
| 846 |
+
- [ ] IPA quality: PASS/FAIL (identity rate: Z%)
|
| 847 |
+
- [ ] SCA consistency: PASS/FAIL (N/N verified)
|
| 848 |
+
- [ ] Provenance: PASS/FAIL (N/20 traced to source)
|
| 849 |
+
## Verdict: PASS / WARN / FAIL
|
| 850 |
+
## Blocking: YES (if FAIL)
|
| 851 |
+
```
|
| 852 |
+
|
| 853 |
+
---
|
| 854 |
+
|
| 855 |
+
## 14. Re-processing & Cleaning Runbook
|
| 856 |
+
|
| 857 |
+
### When to Re-process
|
| 858 |
+
|
| 859 |
+
- After modifying any transliteration map in `transliteration_maps.py`
|
| 860 |
+
- After fixing a bug in `transliterate()` or `ipa_to_sound_class()`
|
| 861 |
+
- After adding a new language to `ALL_MAPS`
|
| 862 |
+
|
| 863 |
+
### How to Re-process
|
| 864 |
+
|
| 865 |
+
```bash
|
| 866 |
+
# Dry run first (ALWAYS)
|
| 867 |
+
python scripts/reprocess_ipa.py --dry-run
|
| 868 |
+
|
| 869 |
+
# Check: identity rates should decrease or stay the same, NEVER increase
|
| 870 |
+
# Check: "Changed" column shows expected number of modifications
|
| 871 |
+
# Check: "Errors" column is 0
|
| 872 |
+
|
| 873 |
+
# Run live
|
| 874 |
+
python scripts/reprocess_ipa.py
|
| 875 |
+
|
| 876 |
+
# Or for a single language
|
| 877 |
+
python scripts/reprocess_ipa.py --language xlw
|
| 878 |
+
```
|
| 879 |
+
|
| 880 |
+
### Common Cleaning Operations
|
| 881 |
+
|
| 882 |
+
#### Remove entries with HTML artifacts
|
| 883 |
+
```python
|
| 884 |
+
# Check for HTML entities
|
| 885 |
+
grep -P '&\w+;' data/training/lexicons/{iso}.tsv
|
| 886 |
+
# Remove affected lines via Python script (not manual edit)
|
| 887 |
+
```
|
| 888 |
+
|
| 889 |
+
#### Remove entries from wrong source (contamination)
|
| 890 |
+
```python
|
| 891 |
+
# Example: Hurrian TSV had Hittite entries from wrong Palaeolexicon ID
|
| 892 |
+
# Write a Python script that identifies and removes contaminated entries
|
| 893 |
+
# Save removed entries to audit trail
|
| 894 |
+
```
|
| 895 |
+
|
| 896 |
+
#### Deduplicate
|
| 897 |
+
```python
|
| 898 |
+
# reprocess_ipa.py handles dedup by Word column
|
| 899 |
+
# For more complex dedup, use normalize_lexicons.py
|
| 900 |
+
```
|
| 901 |
+
|
| 902 |
+
#### Fix ABVD fake-IPA
|
| 903 |
+
```bash
|
| 904 |
+
python scripts/fix_abvd_ipa.py
|
| 905 |
+
```
|
| 906 |
+
|
| 907 |
+
### Post-Cleaning Verification
|
| 908 |
+
|
| 909 |
+
```bash
|
| 910 |
+
# Verify entry counts
|
| 911 |
+
python -c "
|
| 912 |
+
for iso in ['hit','uga',...]:
|
| 913 |
+
with open(f'data/training/lexicons/{iso}.tsv') as f:
|
| 914 |
+
print(f'{iso}: {sum(1 for _ in f) - 1} entries')
|
| 915 |
+
"
|
| 916 |
+
|
| 917 |
+
# Verify no empty IPA
|
| 918 |
+
python -c "
|
| 919 |
+
for iso in [...]:
|
| 920 |
+
with open(f'data/training/lexicons/{iso}.tsv') as f:
|
| 921 |
+
for line in f:
|
| 922 |
+
parts = line.strip().split('\t')
|
| 923 |
+
if len(parts) >= 2 and not parts[1]:
|
| 924 |
+
print(f'EMPTY IPA: {iso} {parts[0]}')
|
| 925 |
+
"
|
| 926 |
+
```
|
| 927 |
+
|
| 928 |
+
---
|
| 929 |
+
|
| 930 |
+
## 15. Known Limitations & Future Work
|
| 931 |
+
|
| 932 |
+
### Linguistic Limitations
|
| 933 |
+
|
| 934 |
+
| Issue | Languages Affected | Root Cause |
|
| 935 |
+
|-------|-------------------|------------|
|
| 936 |
+
| Broad phonemic only (no allophonic) | All ancient | Dead languages — allophonic variation unrecoverable |
|
| 937 |
+
| Cuneiform sign names as entries | xur, xhu | Source provides sign-level notation, not phonemic. ~156 Sumerograms in xur. |
|
| 938 |
+
| High identity for transparent orthographies | elx, cms, xle | Writing system maps 1:1 to IPA |
|
| 939 |
+
| Old Persian ç → θ debatable | peo | Kent (1953) says /θ/, Kloekhorst (2008) says /ts/ |
|
| 940 |
+
| Old Persian cuneiform inherent vowels | peo | Syllabary signs (𐎣=ka, 𐎫=ta) include inherent vowels that may be redundant in context |
|
| 941 |
+
| eDiAna entries drive high identity | xlc, xld | eDiAna provides already-transliterated forms; identity is expected, not a map gap |
|
| 942 |
+
|
| 943 |
+
### Technical Debt
|
| 944 |
+
|
| 945 |
+
| Issue | Priority | Fix |
|
| 946 |
+
|-------|----------|-----|
|
| 947 |
+
| `use_word_for_ipa` dead config in expand_wiktionary_categories.py | Low | Remove the config key |
|
| 948 |
+
| Some extraction scripts have hardcoded word lists from pre-Iron-Law era | Medium | Rewrite with HTTP fetch |
|
| 949 |
+
| ABVD entries still ~50% fake-IPA after G2P fix | Medium | Better G2P or manual review |
|
| 950 |
+
| NorthEuraLex/WOLD join segments with spaces | Low | Handled by normalize_lexicons.py |
|
| 951 |
+
| Combining diacritics in Lycian/Carian (U+0303, U+0302) | Low | Normalize in preprocessing before transliteration |
|
| 952 |
+
| Greek letter leaks in Carian source data | Low | Data cleaning script to normalize σ→s, α→a, etc. |
|
| 953 |
+
| HTML entities in 4 PIE IPA entries | Low | Decode with `html.unescape()` in reprocess_ipa.py |
|
| 954 |
+
| 15 Old Persian proper nouns have wrong-language IPA | Low | Filter or manually correct Akkadian/Greek transcriptions |
|
| 955 |
+
|
| 956 |
+
### Expansion Opportunities
|
| 957 |
+
|
| 958 |
+
| Language | Current | Available | Source |
|
| 959 |
+
|----------|---------|-----------|--------|
|
| 960 |
+
| Sumerian | 0 | 5,000+ | EPSD2 (ePSD), Oracc |
|
| 961 |
+
| Akkadian | 0 | 10,000+ | CAD, CDA, ePSD2 |
|
| 962 |
+
| Egyptian | 0 | 3,000+ | TLA (Thesaurus Linguae Aegyptiae) |
|
| 963 |
+
| Sanskrit | (modern only) | 50,000+ | Monier-Williams, DCS |
|
| 964 |
+
| Linear B | 0 | 500+ | DAMOS, Wingspread |
|
| 965 |
+
| Luvian Hieroglyphic | (mixed with xlw) | 500+ | Hawkins (2000) |
|
| 966 |
+
|
| 967 |
+
---
|
| 968 |
+
|
| 969 |
+
## Appendix A: Quick Commands
|
| 970 |
+
|
| 971 |
+
```bash
|
| 972 |
+
# Count entries for a language
|
| 973 |
+
wc -l data/training/lexicons/{iso}.tsv
|
| 974 |
+
|
| 975 |
+
# Check identity rate
|
| 976 |
+
python -c "
|
| 977 |
+
with open('data/training/lexicons/{iso}.tsv') as f:
|
| 978 |
+
lines = f.readlines()[1:]
|
| 979 |
+
total = len(lines)
|
| 980 |
+
identity = sum(1 for l in lines if l.split('\t')[0] == l.split('\t')[1])
|
| 981 |
+
print(f'{identity}/{total} = {identity/total*100:.1f}%')
|
| 982 |
+
"
|
| 983 |
+
|
| 984 |
+
# Test a transliteration map
|
| 985 |
+
python -c "
|
| 986 |
+
import sys; sys.path.insert(0, 'scripts')
|
| 987 |
+
from transliteration_maps import transliterate
|
| 988 |
+
print(transliterate('test_word', 'iso_code'))
|
| 989 |
+
"
|
| 990 |
+
|
| 991 |
+
# Re-process single language (dry run)
|
| 992 |
+
python scripts/reprocess_ipa.py --dry-run --language {iso}
|
| 993 |
+
|
| 994 |
+
# Run adversarial audit (deploy via AI agent)
|
| 995 |
+
# See Section 13 for protocol
|
| 996 |
+
```
|
| 997 |
+
|
| 998 |
+
## Appendix B: File Checksums Reference
|
| 999 |
+
|
| 1000 |
+
Run after any batch operation to create a baseline:
|
| 1001 |
+
```bash
|
| 1002 |
+
find data/training/lexicons -name "*.tsv" -exec wc -l {} \; | sort -k2 > /tmp/lexicon_counts.txt
|
| 1003 |
+
```
|
docs/prd/PRD_DATABASE_RECTIFICATION.md
ADDED
|
@@ -0,0 +1,796 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# PRD: Database Rectification & Expansion Plan
|
| 2 |
+
|
| 3 |
+
**Date:** 2026-03-12
|
| 4 |
+
**Triggered by:** [Adversarial Database Audit 2026-03-12](../ADVERSARIAL_DATABASE_AUDIT_2026-03-12.md)
|
| 5 |
+
**Reference:** [DATABASE_REFERENCE.md](../DATABASE_REFERENCE.md) (protocols, schema, Iron Law)
|
| 6 |
+
**Status:** DRAFT — awaiting approval
|
| 7 |
+
|
| 8 |
+
---
|
| 9 |
+
|
| 10 |
+
## IRON LAW (UNCHANGED — SUPERSEDES ALL GOALS)
|
| 11 |
+
|
| 12 |
+
```
|
| 13 |
+
DATA MAY ONLY ENTER THE DATASET THROUGH CODE THAT DOWNLOADS IT
|
| 14 |
+
FROM AN EXTERNAL SOURCE.
|
| 15 |
+
|
| 16 |
+
NO EXCEPTIONS. NO "JUST THIS ONCE." NO "IT'S FASTER."
|
| 17 |
+
```
|
| 18 |
+
|
| 19 |
+
Every phase below produces **Python scripts** that fetch data via HTTP. No hardcoded word lists. No direct TSV edits. No LLM-generated linguistic content. Every script must contain `urllib.request.urlopen()`, `requests.get()`, or equivalent HTTP fetch. Transliteration maps are CODE (transformation rules from cited grammars), not DATA.
|
| 20 |
+
|
| 21 |
+
---
|
| 22 |
+
|
| 23 |
+
## ADVERSARIAL PIPELINE v2 (ENHANCED)
|
| 24 |
+
|
| 25 |
+
Every phase uses the **Dual-Agent Adversarial Pipeline**. This PRD upgrades the adversarial auditor from v1 (surface-level checks) to v2 (deep cross-reference validation).
|
| 26 |
+
|
| 27 |
+
### Team A: Extraction Agent
|
| 28 |
+
- Writes and runs Python scripts following the [script template](../DATABASE_REFERENCE.md#10-prd-adding-new-data)
|
| 29 |
+
- Produces TSV data via HTTP fetch → parse → transliterate → write
|
| 30 |
+
- NEVER writes data values directly
|
| 31 |
+
|
| 32 |
+
### Team B: Critical Adversarial Auditor (v2 — ENHANCED)
|
| 33 |
+
|
| 34 |
+
**Runs after EACH step with VETO POWER.** The v2 auditor performs **deep validation**, not surface-level checks.
|
| 35 |
+
|
| 36 |
+
#### What Team B MUST Do (Deep Checks)
|
| 37 |
+
|
| 38 |
+
| Check | Method | Pass Criteria |
|
| 39 |
+
|-------|--------|---------------|
|
| 40 |
+
| **50-Word Cross-Reference** | Select 50 random entries from the newly scraped data. For each, fetch the LIVE source URL and verify the word appears there with the same form and meaning. | >= 48/50 match (96%). Any mismatch = STOP. |
|
| 41 |
+
| **IPA Spot-Check** | For 20 random entries, manually apply the transliteration map character-by-character and verify the output matches the IPA column. | 20/20 match. Any mismatch = flag map bug. |
|
| 42 |
+
| **SCA Consistency** | For 20 random entries, verify `ipa_to_sound_class(IPA)` == SCA column. | 20/20 match. |
|
| 43 |
+
| **Source Provenance** | For 10 random entries, construct the exact URL where each entry can be found in the original source. Verify it loads. | 10/10 accessible. |
|
| 44 |
+
| **Concept ID Accuracy** | For 20 random entries with non-empty Concept_IDs, verify the gloss matches the source's definition. | >= 18/20 match. |
|
| 45 |
+
| **Dedup Verification** | Count unique Word values in the output. Compare to total rows. | 0 duplicates. |
|
| 46 |
+
| **Entry Count Plausibility** | Verify count is non-round and matches expected range from source research. | Not exactly round (100, 200, 500). |
|
| 47 |
+
|
| 48 |
+
#### What Team B Must NOT Do (Banned Checks — Wastes Time)
|
| 49 |
+
|
| 50 |
+
- "Does the file have a header?" (Always yes by construction)
|
| 51 |
+
- "Are there HTML tags in the data?" (Parsing handles this)
|
| 52 |
+
- "Is the file UTF-8?" (Always yes by construction)
|
| 53 |
+
- "Does the script import urllib?" (Obvious from code review)
|
| 54 |
+
- Any check that doesn't touch real data
|
| 55 |
+
|
| 56 |
+
#### Auditor Report Format
|
| 57 |
+
|
| 58 |
+
```markdown
|
| 59 |
+
# Adversarial Audit v2: {Phase} — {Language} ({iso})
|
| 60 |
+
|
| 61 |
+
## 50-Word Cross-Reference
|
| 62 |
+
- Sampled: [list 50 words]
|
| 63 |
+
- Source URL pattern: {url}
|
| 64 |
+
- Matches: N/50
|
| 65 |
+
- Mismatches: [list any failures with details]
|
| 66 |
+
|
| 67 |
+
## IPA Spot-Check (20 entries)
|
| 68 |
+
| Word | Expected IPA | Actual IPA | Match? |
|
| 69 |
+
|------|-------------|------------|--------|
|
| 70 |
+
| ... | ... | ... | ... |
|
| 71 |
+
|
| 72 |
+
## SCA Consistency (20 entries)
|
| 73 |
+
- All match: YES/NO
|
| 74 |
+
|
| 75 |
+
## Source Provenance (10 entries)
|
| 76 |
+
| Word | Source URL | Accessible? |
|
| 77 |
+
|------|-----------|-------------|
|
| 78 |
+
| ... | ... | ... |
|
| 79 |
+
|
| 80 |
+
## Concept ID Accuracy (20 entries)
|
| 81 |
+
- Matches: N/20
|
| 82 |
+
|
| 83 |
+
## Dedup Check
|
| 84 |
+
- Unique words: N
|
| 85 |
+
- Total rows: N
|
| 86 |
+
- Duplicates: 0
|
| 87 |
+
|
| 88 |
+
## Verdict: PASS / FAIL
|
| 89 |
+
## Blocking Issues: [list if any]
|
| 90 |
+
```
|
| 91 |
+
|
| 92 |
+
---
|
| 93 |
+
|
| 94 |
+
## PROPER NOUNS POLICY
|
| 95 |
+
|
| 96 |
+
**Proper nouns (theonyms, toponyms, anthroponyms) are VALUED DATA, not contamination.**
|
| 97 |
+
|
| 98 |
+
All ancient language lexicons SHOULD include:
|
| 99 |
+
- **Theonyms** (divine names): gods, goddesses, mythological figures
|
| 100 |
+
- **Toponyms** (place names): cities, rivers, mountains, temples, regions
|
| 101 |
+
- **Anthroponyms** (personal names): rulers, historical figures, common name elements
|
| 102 |
+
- **Ethnonyms** (people/tribe names): tribal and ethnic designations
|
| 103 |
+
|
| 104 |
+
Concept_ID should tag these as `theonym:{name}`, `toponym:{name}`, `anthroponym:{name}`, `ethnonym:{name}`.
|
| 105 |
+
|
| 106 |
+
Where specialist proper noun databases exist (see Phase 5), they MUST be scraped alongside regular vocabulary.
|
| 107 |
+
|
| 108 |
+
---
|
| 109 |
+
|
| 110 |
+
## PHASE 0: Critical Bug Fixes
|
| 111 |
+
|
| 112 |
+
**Priority:** IMMEDIATE — blocks all other phases
|
| 113 |
+
**Estimated effort:** 1 session
|
| 114 |
+
**No adversarial audit needed** (code changes only, no data ingestion)
|
| 115 |
+
|
| 116 |
+
### 0.1 Fix SCA Tokenizer — Labiovelar Bug
|
| 117 |
+
|
| 118 |
+
**File:** `cognate_pipeline/src/cognate_pipeline/normalise/sound_class.py`
|
| 119 |
+
**Bug:** `ʷ` (U+02B7) missing from diacritic regex → produces spurious "0" for every labiovelar
|
| 120 |
+
**Fix:** Add `\u02B7` to the diacritic character class on the tokenizer regex (line ~95)
|
| 121 |
+
**Also add:** `\u02B1` (breathy voice ʱ) for PIE voiced aspirates
|
| 122 |
+
**Test:** Run `ipa_to_sound_class("kʷ")` → should produce `"K"` not `"K0"`
|
| 123 |
+
|
| 124 |
+
### 0.2 Fix SCA Tokenizer — Precomposed Nasalized Vowels
|
| 125 |
+
|
| 126 |
+
**Bug:** Precomposed `ã` (U+00E3), `ẽ` (U+1EBD), `ũ` (U+0169) may fail tokenizer regex
|
| 127 |
+
**Fix:** Either (a) NFC-decompose input before tokenizing, or (b) add precomposed nasalized vowels to the character class
|
| 128 |
+
**Test:** Run `ipa_to_sound_class("ã")` → should produce `"A"` not `""`
|
| 129 |
+
|
| 130 |
+
### 0.3 Write Cleaning Script — Remove Bogus Entries
|
| 131 |
+
|
| 132 |
+
**Iron Law compliance:** Write a Python script `scripts/clean_artifacts.py` that:
|
| 133 |
+
1. Reads each ancient language TSV
|
| 134 |
+
2. Identifies known artifact patterns: `inprogress`, `phoneticvalue`, entries where Word matches `^[a-z]+progress$` or similar processing placeholders
|
| 135 |
+
3. Writes cleaned TSV (preserving all legitimate entries)
|
| 136 |
+
4. Logs removed entries to audit trail
|
| 137 |
+
5. Reports counts
|
| 138 |
+
|
| 139 |
+
**NOT a direct edit** — this is a deterministic cleaning script.
|
| 140 |
+
|
| 141 |
+
### 0.4 Fix Metadata — Add Ancient Languages to languages.tsv
|
| 142 |
+
|
| 143 |
+
**Script:** `scripts/update_metadata.py` — reads all TSVs in lexicons/, counts entries, updates `languages.tsv` with ISO, name, family, entry count, source breakdown. Run after every data change.
|
| 144 |
+
|
| 145 |
+
### 0.5 Presentation Fixes
|
| 146 |
+
|
| 147 |
+
| Task | Action |
|
| 148 |
+
|------|--------|
|
| 149 |
+
| Add LICENSE file | Create `LICENSE` at repo root with CC-BY-SA-4.0 full text |
|
| 150 |
+
| Make HuggingFace public | Change dataset visibility to public (manual step) |
|
| 151 |
+
| Fix HuggingFace README | Expand to include Quick Start, citations, limitations, loading examples |
|
| 152 |
+
| Remove leaked files | Add `sources/`, `.pytest_cache/`, `*.pth`, `*.pkl` to HF `.gitignore`; remove copyrighted PDFs |
|
| 153 |
+
| Fix lexicon count | Identify which of 1,136 claimed files is missing; create or correct count |
|
| 154 |
+
|
| 155 |
+
---
|
| 156 |
+
|
| 157 |
+
## PHASE 1: IPA & Transliteration Map Corrections
|
| 158 |
+
|
| 159 |
+
**Priority:** HIGH — affects all downstream phonetic analysis
|
| 160 |
+
**Estimated effort:** 1 session
|
| 161 |
+
**Adversarial audit:** YES (Team B verifies 20 entries per map change via IPA spot-check)
|
| 162 |
+
|
| 163 |
+
### 1.1 Transliteration Map Fixes
|
| 164 |
+
|
| 165 |
+
Each fix below modifies `scripts/transliteration_maps.py` (CODE, not DATA). After all fixes, run `scripts/reprocess_ipa.py` to propagate changes.
|
| 166 |
+
|
| 167 |
+
| Fix | Language | Change | Academic Reference |
|
| 168 |
+
|-----|----------|--------|--------------------|
|
| 169 |
+
| **Etruscan θ/φ/χ consistency** | ett | Change θ→`tʰ` (aligning with φ→`pʰ`, χ→`kʰ` as aspirated stop series) OR change all three to fricatives (θ, f, x). Pick ONE. Recommended: all aspirated stops per Bonfante early-period analysis: θ→`tʰ` | Bonfante & Bonfante (2002), Rix (1963) |
|
| 170 |
+
| **Lydian ś/š distinction** | xld | Change ś→`ɕ` (alveolopalatal, matching Carian treatment), keep š→`ʃ` | Gusmani (1964), Melchert |
|
| 171 |
+
| **Carian ỳ/ý placeholders** | xcr | Map to best-guess IPA or explicit unknown marker. Recommended: ỳ→`ə`, ý→`e` (tentative vocalic values) with comment noting uncertainty | Adiego (2007) |
|
| 172 |
+
| **Hittite š controversy** | hit | ADD COMMENT documenting the debate. Keep š→`ʃ` as the current choice but note: "Kloekhorst (2008) argues for [s]. Hoffner & Melchert (2008) use the conventional symbol." Do NOT change the value without user decision. | Hoffner & Melchert (2008), Kloekhorst (2008) |
|
| 173 |
+
| **Old Persian ç controversy** | peo | ADD COMMENT documenting the debate. Keep ç→`θ` per Kent but note Kloekhorst's /ts/ argument. | Kent (1953), Kloekhorst (2008) |
|
| 174 |
+
| **PIE h₃ value** | ine | ADD COMMENT noting the speculative nature of h₃→`ɣʷ`. Note: "Leiden school reconstruction. Many scholars leave h₃ phonetically unspecified." | Beekes (2011), Fortson (2010) |
|
| 175 |
+
| **Missing Phrygian Greek letters** | xpg | Add: ξ→`ks`, ψ→`ps`, φ→`pʰ`, χ→`kʰ` | Brixhe & Lejeune (1984) |
|
| 176 |
+
| **Missing PK aspirated affricates** | ccs | Add: cʰ→`tsʰ`, čʰ→`tʃʰ` | Klimov (1998) |
|
| 177 |
+
| **Missing Old Persian signs** | peo | Add: U+103AE (di), U+103B8 (mu), U+103BB (vi) | Kent (1953) |
|
| 178 |
+
|
| 179 |
+
### 1.2 Post-Fix Reprocessing
|
| 180 |
+
|
| 181 |
+
```bash
|
| 182 |
+
# Dry run first (ALWAYS)
|
| 183 |
+
python scripts/reprocess_ipa.py --dry-run
|
| 184 |
+
|
| 185 |
+
# Verify: identity rates should decrease or stay the same, NEVER increase
|
| 186 |
+
# Verify: no regressions (Never-Regress Rule)
|
| 187 |
+
|
| 188 |
+
# Run live
|
| 189 |
+
python scripts/reprocess_ipa.py
|
| 190 |
+
```
|
| 191 |
+
|
| 192 |
+
### 1.3 Adversarial Audit for Phase 1
|
| 193 |
+
|
| 194 |
+
Team B verifies:
|
| 195 |
+
- For each modified map: take 20 entries from that language's TSV, manually apply the updated map, verify IPA matches
|
| 196 |
+
- Verify no regressions: compare before/after identity rates
|
| 197 |
+
- Verify SCA correctness for 20 entries per language
|
| 198 |
+
|
| 199 |
+
---
|
| 200 |
+
|
| 201 |
+
## PHASE 2: Data Restoration & Cleanup
|
| 202 |
+
|
| 203 |
+
**Priority:** HIGH — fixes audit-identified data problems
|
| 204 |
+
**Estimated effort:** 1–2 sessions
|
| 205 |
+
**Adversarial audit:** YES (full v2 pipeline)
|
| 206 |
+
|
| 207 |
+
### 2.1 Avestan — Re-scrape avesta.org (Restore Missing 2,716 Entries)
|
| 208 |
+
|
| 209 |
+
**Problem:** DATABASE_REFERENCE.md claims 3,455 entries including 2,716 from `avesta_org`, but `ave.tsv` only has 739 entries. The avesta_org data was either never ingested or was lost.
|
| 210 |
+
|
| 211 |
+
**Script:** `scripts/scrape_avesta_org.py` (already exists — re-run or debug)
|
| 212 |
+
|
| 213 |
+
**Steps:**
|
| 214 |
+
1. Team A: Verify `scrape_avesta_org.py` still works against live site
|
| 215 |
+
2. Team A: Run `--dry-run` to confirm expected entry count
|
| 216 |
+
3. Team A: Run live scrape, deduplicating against existing 739 entries
|
| 217 |
+
4. Team B: 50-word cross-reference against live avesta.org/avdict/avdict.htm
|
| 218 |
+
5. Team B: IPA spot-check 20 entries against `AVESTAN_MAP`
|
| 219 |
+
6. Update DATABASE_REFERENCE.md with actual count
|
| 220 |
+
|
| 221 |
+
**Acceptance:** `ave.tsv` has 2,500+ entries (the 3,455 was an aspiration, actual may differ)
|
| 222 |
+
|
| 223 |
+
### 2.2 Sumerogram Handling Script
|
| 224 |
+
|
| 225 |
+
**Problem:** Hittite (10+ entries), Luwian (581), and Urartian (171) contain Sumerograms — uppercase cuneiform logograms (LUGAL, URU, DINGIR, etc.) that are NOT phonemic data in the target language.
|
| 226 |
+
|
| 227 |
+
**Script:** `scripts/tag_sumerograms.py`
|
| 228 |
+
|
| 229 |
+
**Approach:** Do NOT delete Sumerograms — they are legitimate scholarly data. Instead:
|
| 230 |
+
1. Write a script that identifies likely Sumerograms (all-uppercase ASCII, known Sumerogram patterns)
|
| 231 |
+
2. Add a tag to the Concept_ID field: prefix with `sumerogram:` (e.g., `sumerogram:king` for LUGAL)
|
| 232 |
+
3. This allows downstream pipelines to filter them if needed while preserving the data
|
| 233 |
+
4. Log all tagged entries to audit trail
|
| 234 |
+
|
| 235 |
+
**Sumerogram detection heuristic:**
|
| 236 |
+
```python
|
| 237 |
+
def is_sumerogram(word: str) -> bool:
|
| 238 |
+
"""Detect cuneiform Sumerograms (uppercase sign names)."""
|
| 239 |
+
if word.isupper() and word.isascii() and len(word) >= 2:
|
| 240 |
+
return True
|
| 241 |
+
if re.match(r'^[A-Z]+(\.[A-Z]+)+$', word): # MUNUS.LUGAL pattern
|
| 242 |
+
return True
|
| 243 |
+
if re.match(r'^[A-Z]+\d+$', word): # KU6, AN2 pattern
|
| 244 |
+
return True
|
| 245 |
+
return False
|
| 246 |
+
```
|
| 247 |
+
|
| 248 |
+
**Team B checks:** Verify 20 tagged entries are actually Sumerograms (not coincidentally uppercase native words).
|
| 249 |
+
|
| 250 |
+
### 2.3 Cross-Language Contamination Fix
|
| 251 |
+
|
| 252 |
+
**Problem:** `hit.tsv` contains at least one Avestan word (`xshap` = "night") and Akkadian entries (`GE` = "ina").
|
| 253 |
+
|
| 254 |
+
**Script:** `scripts/clean_cross_contamination.py`
|
| 255 |
+
1. For each ancient language TSV, check every entry against a known-contamination list (populated from audit findings)
|
| 256 |
+
2. Remove entries confirmed to be from wrong language
|
| 257 |
+
3. Log removals to audit trail
|
| 258 |
+
|
| 259 |
+
**Known contamination (from audit):**
|
| 260 |
+
- `hit.tsv`: `xshap` (Avestan), `GE`/`ina` (Akkadian)
|
| 261 |
+
|
| 262 |
+
**Team B checks:** Verify each removed entry is genuinely from the wrong language by checking Wiktionary source pages.
|
| 263 |
+
|
| 264 |
+
---
|
| 265 |
+
|
| 266 |
+
## PHASE 3: New Language Ingestion — Tier 1
|
| 267 |
+
|
| 268 |
+
**Priority:** HIGH — the 9 most critical missing languages
|
| 269 |
+
**Estimated effort:** 3–5 sessions (can parallelize across languages)
|
| 270 |
+
**Adversarial audit:** YES (full v2 pipeline per language)
|
| 271 |
+
|
| 272 |
+
### General Protocol (applies to all Tier 1 languages)
|
| 273 |
+
|
| 274 |
+
For each new language:
|
| 275 |
+
|
| 276 |
+
1. **Create transliteration map** in `transliteration_maps.py` (if needed) with cited academic reference
|
| 277 |
+
2. **Write extraction script** following the [standard template](../DATABASE_REFERENCE.md#10-prd-adding-new-data):
|
| 278 |
+
- Must use `urllib.request.urlopen()` or `requests.get()`
|
| 279 |
+
- Must deduplicate against existing entries
|
| 280 |
+
- Must apply `transliterate()` and `ipa_to_sound_class()`
|
| 281 |
+
- Must save raw JSON/HTML to `data/training/raw/`
|
| 282 |
+
- Must save audit trail to `data/training/audit_trails/`
|
| 283 |
+
3. **Run `--dry-run`** first
|
| 284 |
+
4. **Deploy Team B adversarial auditor** (full v2: 50-word cross-ref, IPA spot-check, etc.)
|
| 285 |
+
5. **Run live**
|
| 286 |
+
6. **Add to `language_configs.py`**
|
| 287 |
+
7. **Run `reprocess_ipa.py --language {iso}`**
|
| 288 |
+
8. **Update metadata** (`languages.tsv`)
|
| 289 |
+
9. **Commit & push** to both GitHub and HuggingFace
|
| 290 |
+
|
| 291 |
+
---
|
| 292 |
+
|
| 293 |
+
### 3.1 Sumerian (sux)
|
| 294 |
+
|
| 295 |
+
| Field | Value |
|
| 296 |
+
|-------|-------|
|
| 297 |
+
| ISO | sux |
|
| 298 |
+
| Family | Isolate |
|
| 299 |
+
| Primary Source | **ePSD2** — `oracc.museum.upenn.edu/epsd2/sux` (JSON API) |
|
| 300 |
+
| Secondary Source | DCCLT lexical texts via Oracc |
|
| 301 |
+
| Expected entries | 10,000–15,944 lemmas |
|
| 302 |
+
| Script name | `scripts/scrape_epsd2_sumerian.py` |
|
| 303 |
+
| Transliteration map | New: `SUMERIAN_MAP` — cuneiform transliteration → IPA (Jagersma 2010, Edzard 2003) |
|
| 304 |
+
| IPA type | Partial (phonology reconstructed via Akkadian scribal conventions) |
|
| 305 |
+
| Special handling | Strip determinatives (superscript d, GIS, etc.). Tag Sumerograms vs. phonemic entries. Separate emesal (women's dialect) from emegir (main dialect). |
|
| 306 |
+
| Proper nouns to include | Divine names (Enlil, Inanna, Enki, Utu, Nanna, etc.), city names (Ur, Uruk, Lagash, Nippur, Eridu, etc.), royal names (Gilgamesh, Ur-Nammu, Shulgi, etc.) |
|
| 307 |
+
|
| 308 |
+
**Scraping approach:**
|
| 309 |
+
- ePSD2 exposes a JSON API at `oracc.museum.upenn.edu/epsd2/json/`
|
| 310 |
+
- Fetch the full glossary index, then individual lemma pages
|
| 311 |
+
- Parse: headword, citation form, base, morphology, English gloss
|
| 312 |
+
- The ePSD2 provides transliterations in standard Assyriological conventions
|
| 313 |
+
|
| 314 |
+
### 3.2 Akkadian (akk)
|
| 315 |
+
|
| 316 |
+
| Field | Value |
|
| 317 |
+
|-------|-------|
|
| 318 |
+
| ISO | akk |
|
| 319 |
+
| Family | Afroasiatic > Semitic (East) |
|
| 320 |
+
| Primary Source | **AssyrianLanguages.org** — `assyrianlanguages.org/akkadian/` (searchable dictionary) |
|
| 321 |
+
| Secondary Source | Oracc glossaries, Wiktionary Category:Akkadian_lemmas |
|
| 322 |
+
| Expected entries | 5,000–10,000 (from online searchable sources; full CAD is 28K but PDF-only) |
|
| 323 |
+
| Script name | `scripts/scrape_akkadian.py` |
|
| 324 |
+
| Transliteration map | New: `AKKADIAN_MAP` — standard Assyriological transliteration → IPA (Huehnergard 2011, von Soden 1995) |
|
| 325 |
+
| IPA type | Broad phonemic (well-understood via comparative Semitic + cuneiform orthography) |
|
| 326 |
+
| Special handling | Distinguish Old Babylonian, Middle Babylonian, Neo-Assyrian, etc. via source metadata if available. Handle determinatives. |
|
| 327 |
+
| Proper nouns to include | Divine names (Marduk, Ishtar, Shamash, Ea, Sin, Nabu, etc.), city names (Babylon, Nineveh, Assur, Sippar, etc.), royal names (Hammurabi, Sargon, Nebuchadnezzar, etc.) |
|
| 328 |
+
|
| 329 |
+
### 3.3 Ancient Egyptian (egy)
|
| 330 |
+
|
| 331 |
+
| Field | Value |
|
| 332 |
+
|-------|-------|
|
| 333 |
+
| ISO | egy |
|
| 334 |
+
| Family | Afroasiatic > Egyptian |
|
| 335 |
+
| Primary Source | **TLA** — `thesaurus-linguae-aegyptiae.de` (API or web scrape) |
|
| 336 |
+
| Secondary Source | TLA HuggingFace datasets (`huggingface.co/datasets/thesaurus-linguae-aegyptiae/`) |
|
| 337 |
+
| Expected entries | 10,000–49,037 lemmas |
|
| 338 |
+
| Script name | `scripts/scrape_tla_egyptian.py` |
|
| 339 |
+
| Transliteration map | New: `EGYPTIAN_MAP` — Egyptological transliteration (Manuel de Codage) → IPA (Allen 2014, Loprieno 1995) |
|
| 340 |
+
| IPA type | Partial (consonantal skeleton well-known; vowels reconstructed from Coptic, cuneiform transcriptions, and comparative Afroasiatic) |
|
| 341 |
+
| Special handling | Egyptian had no written vowels. Provide consonantal IPA skeleton. Consider separate entries for different periods (Old/Middle/Late/Demotic). Hieroglyphic Unicode signs (U+13000–U+1342F) should be mapped if present. |
|
| 342 |
+
| Proper nouns to include | Pharaoh names (Khufu, Ramesses, Thutmose, etc.), deity names (Ra, Osiris, Isis, Horus, Thoth, Anubis, etc.), place names (Thebes, Memphis, Heliopolis, etc.) |
|
| 343 |
+
|
| 344 |
+
### 3.4 Sanskrit (san)
|
| 345 |
+
|
| 346 |
+
| Field | Value |
|
| 347 |
+
|-------|-------|
|
| 348 |
+
| ISO | san |
|
| 349 |
+
| Family | Indo-European > Indo-Iranian > Indo-Aryan |
|
| 350 |
+
| Primary Source | **Wiktionary** Category:Sanskrit_lemmas (massive category) |
|
| 351 |
+
| Secondary Source | WikiPron Sanskrit entries, DCS (Digital Corpus of Sanskrit) if API accessible |
|
| 352 |
+
| Expected entries | 5,000–20,000 from Wiktionary alone |
|
| 353 |
+
| Script name | `scripts/scrape_sanskrit.py` |
|
| 354 |
+
| Transliteration map | New: `SANSKRIT_MAP` — IAST/Devanagari → IPA (Whitney 1896, Mayrhofer 1986) |
|
| 355 |
+
| IPA type | Full phonemic (Sanskrit phonology is comprehensively documented) |
|
| 356 |
+
| Special handling | Handle both Devanagari (U+0900–U+097F) and IAST romanization. Vedic Sanskrit vs Classical Sanskrit distinction desirable. |
|
| 357 |
+
| Proper nouns to include | Divine names (Indra, Agni, Varuna, Vishnu, Shiva, etc.), place names (Hastinapura, Ayodhya, Lanka, etc.), epic names (Arjuna, Rama, Krishna, etc.) |
|
| 358 |
+
|
| 359 |
+
### 3.5 Ancient Greek (grc)
|
| 360 |
+
|
| 361 |
+
| Field | Value |
|
| 362 |
+
|-------|-------|
|
| 363 |
+
| ISO | grc |
|
| 364 |
+
| Family | Indo-European > Hellenic |
|
| 365 |
+
| Primary Source | **Wiktionary** Category:Ancient_Greek_lemmas |
|
| 366 |
+
| Secondary Source | WikiPron Ancient Greek entries, Perseus Digital Library |
|
| 367 |
+
| Expected entries | 10,000+ from Wiktionary |
|
| 368 |
+
| Script name | `scripts/scrape_ancient_greek.py` |
|
| 369 |
+
| Transliteration map | New: `ANCIENT_GREEK_MAP` — Greek alphabet → reconstructed Classical Attic IPA (Allen 1987, Smyth 1920) |
|
| 370 |
+
| IPA type | Full phonemic (Classical Attic pronunciation well-reconstructed) |
|
| 371 |
+
| Special handling | Use Classical Attic pronunciation (not Koine or Modern). Handle polytonic orthography (breathing marks, accents). Distinguish from Modern Greek WikiPron entries. |
|
| 372 |
+
| Proper nouns to include | Theonyms (Zeus, Athena, Apollo, Hermes, etc.), place names (Athens, Sparta, Thebes, Troy, etc.), hero names (Achilles, Odysseus, Herakles, etc.) |
|
| 373 |
+
|
| 374 |
+
### 3.6 Gothic (got)
|
| 375 |
+
|
| 376 |
+
| Field | Value |
|
| 377 |
+
|-------|-------|
|
| 378 |
+
| ISO | got |
|
| 379 |
+
| Family | Indo-European > Germanic (East) |
|
| 380 |
+
| Primary Source | **Project Wulfila** — `wulfila.be` (TEI corpus + glossary) |
|
| 381 |
+
| Secondary Source | Wiktionary Category:Gothic_lemmas |
|
| 382 |
+
| Expected entries | 3,000–3,600 lemmas |
|
| 383 |
+
| Script name | `scripts/scrape_wulfila_gothic.py` |
|
| 384 |
+
| Transliteration map | New: `GOTHIC_MAP` — Gothic alphabet (U+10330–U+1034F) + transliteration → IPA (Wright 1910, Braune/Heidermanns 2004) |
|
| 385 |
+
| IPA type | Full phonemic (Gothic phonology well-understood from comparative Germanic) |
|
| 386 |
+
| Special handling | Handle Gothic script Unicode block. Project Wulfila provides downloadable TEI XML — use cached-fetch pattern if needed. |
|
| 387 |
+
| Proper nouns to include | Biblical proper nouns in Gothic form (Iesus, Xristus, Pawlus, Iairusalem, etc.), tribal names (Gutans, etc.) |
|
| 388 |
+
|
| 389 |
+
### 3.7 Mycenaean Greek (gmy)
|
| 390 |
+
|
| 391 |
+
| Field | Value |
|
| 392 |
+
|-------|-------|
|
| 393 |
+
| ISO | gmy |
|
| 394 |
+
| Family | Indo-European > Hellenic |
|
| 395 |
+
| Primary Source | **DAMOS** — `damos.hf.uio.no` (complete annotated Mycenaean corpus) |
|
| 396 |
+
| Secondary Source | Palaeolexicon Linear B section |
|
| 397 |
+
| Expected entries | 500–800 |
|
| 398 |
+
| Script name | `scripts/scrape_damos_mycenaean.py` |
|
| 399 |
+
| Transliteration map | New: `MYCENAEAN_MAP` — Linear B syllabary → reconstructed IPA (Ventris & Chadwick 1973, Bartonek 2003) |
|
| 400 |
+
| IPA type | Partial (Linear B is a syllabary that obscures many consonant clusters and final consonants) |
|
| 401 |
+
| Special handling | Linear B is a syllabary — each sign represents a CV syllable. The underlying Greek word must be reconstructed from the syllabic spelling. Many readings are uncertain. |
|
| 402 |
+
| Proper nouns to include | Place names from tablets (pa-ki-ja-ne/Sphagianai, ko-no-so/Knossos, etc.), divine names (di-wo/Zeus, a-ta-na-po-ti-ni-ja/Athena Potnia, etc.) |
|
| 403 |
+
|
| 404 |
+
### 3.8 Old Church Slavonic (chu)
|
| 405 |
+
|
| 406 |
+
| Field | Value |
|
| 407 |
+
|-------|-------|
|
| 408 |
+
| ISO | chu |
|
| 409 |
+
| Family | Indo-European > Slavic (South) |
|
| 410 |
+
| Primary Source | **Wiktionary** Category:Old_Church_Slavonic_lemmas |
|
| 411 |
+
| Secondary Source | GORAZD digital dictionary (`gorazd.org`) if API accessible |
|
| 412 |
+
| Expected entries | 2,000–5,000 from Wiktionary |
|
| 413 |
+
| Script name | `scripts/scrape_ocs.py` |
|
| 414 |
+
| Transliteration map | New: `OCS_MAP` — Cyrillic/Glagolitic → IPA (Lunt 2001) |
|
| 415 |
+
| IPA type | Full phonemic (OCS phonology well-established) |
|
| 416 |
+
| Special handling | Handle both Cyrillic and Glagolitic scripts. OCS Cyrillic uses characters not in modern Cyrillic (ѣ, ъ, ь, ѫ, ѧ, etc.). |
|
| 417 |
+
| Proper nouns to include | Place names from OCS texts, biblical proper nouns in OCS form |
|
| 418 |
+
|
| 419 |
+
### 3.9 Old Norse (non)
|
| 420 |
+
|
| 421 |
+
| Field | Value |
|
| 422 |
+
|-------|-------|
|
| 423 |
+
| ISO | non |
|
| 424 |
+
| Family | Indo-European > Germanic (North) |
|
| 425 |
+
| Primary Source | **Wiktionary** Category:Old_Norse_lemmas |
|
| 426 |
+
| Secondary Source | Cleasby-Vigfusson online if scrapable |
|
| 427 |
+
| Expected entries | 5,000–10,000 |
|
| 428 |
+
| Script name | `scripts/scrape_old_norse.py` |
|
| 429 |
+
| Transliteration map | New: `OLD_NORSE_MAP` — Old Norse orthography → IPA (Gordon 1957, Noreen 1923) |
|
| 430 |
+
| IPA type | Full phonemic (Old Norse phonology well-documented) |
|
| 431 |
+
| Special handling | Handle Old Norse special characters (ð, þ, æ, ø, ǫ). Distinguish Old West Norse (Old Icelandic) from Old East Norse if possible. |
|
| 432 |
+
| Proper nouns to include | Divine names from Eddas (Oðinn, Þórr, Freyr, Freyja, Loki, Baldr, etc.), place names (Ásgarðr, Miðgarðr, Jǫtunheimr, etc.), hero names (Sigurðr, Ragnarr, etc.) |
|
| 433 |
+
|
| 434 |
+
---
|
| 435 |
+
|
| 436 |
+
## PHASE 4: Proper Noun Expansion
|
| 437 |
+
|
| 438 |
+
**Priority:** MEDIUM-HIGH — enhances all existing and new languages
|
| 439 |
+
**Estimated effort:** 2–3 sessions (parallelizable)
|
| 440 |
+
**Adversarial audit:** YES (full v2 pipeline)
|
| 441 |
+
|
| 442 |
+
### 4.1 Strategy
|
| 443 |
+
|
| 444 |
+
For each language already in the database (and each new Tier 1 language), identify and scrape specialist proper noun sources. Proper nouns are tagged in Concept_ID as:
|
| 445 |
+
- `theonym:{name}` — divine/mythological names
|
| 446 |
+
- `toponym:{name}` — place names
|
| 447 |
+
- `anthroponym:{name}` — personal names (rulers, historical figures)
|
| 448 |
+
- `ethnonym:{name}` — tribal/ethnic names
|
| 449 |
+
|
| 450 |
+
### 4.2 Proper Noun Sources by Language (Detailed — from specialist research)
|
| 451 |
+
|
| 452 |
+
#### Tier 1 Sources: Structured Data with API/Download (Best Targets)
|
| 453 |
+
|
| 454 |
+
| # | Language | Source | URL | API Type | Est. Proper Nouns | Notes |
|
| 455 |
+
|---|----------|--------|-----|----------|-------------------|-------|
|
| 456 |
+
| 1 | **Sumerian** | ORACC ePSD2 QPN glossaries | `oracc.museum.upenn.edu/epsd2/names/` | **JSON API** (`build-oracc.museum.upenn.edu/json/`) | 1,000+ (qpn-x-divine, qpn-x-placeN, qpn-x-people, qpn-x-temple, qpn-x-ethnic, qpn-x-celestial) | Sub-glossaries by type code. Best structured source in entire survey. |
|
| 457 |
+
| 2 | **Sumerian** | ETCSL proper nouns (Oxford) | `etcsl.orinst.ox.ac.uk/cgi-bin/etcslpropnoun.cgi` | Scrapable HTML tables | **917 unique** (12,537 occurrences): ~400 DN, ~200 RN, ~150 SN, ~120 TN, ~80 PN | Categorized by type (DN/RN/SN/TN/PN/GN/WN). |
|
| 458 |
+
| 3 | **Akkadian** | ORACC QPN glossaries (all sub-projects) | `oracc.museum.upenn.edu` (rinap, saao, cams, etc.) | **JSON API** | Thousands across dozens of sub-projects | Same JSON structure as Sumerian QPN. Covers Neo-Assyrian, Neo-Babylonian, Old Babylonian. |
|
| 459 |
+
| 4 | **Egyptian** | TLA proper noun lemmas | `thesaurus-linguae-aegyptiae.de` | **JSON/TEI XML API** + **HuggingFace JSONL** | Thousands (subset of 49,037 + 11,610 lemmas) | Categories for kings, deities, persons, places, titles. Raw JSON + TEI XML in lasting repository. |
|
| 460 |
+
| 5 | **Egyptian** | Pharaoh.se king list | `pharaoh.se` | Scrapable HTML | **300–350 pharaoh names** (with variants) | Turin Canon (223), Abydos (76), Karnak (61), Saqqara (58), Manetho. Per-pharaoh URLs. |
|
| 461 |
+
| 6 | **Ancient Greek** | LGPN (Lexicon of Greek Personal Names, Oxford) | `search.lgpn.ox.ac.uk` | **REST API** (`clas-lgpn5.classics.ox.ac.uk:8080/exist/apps/lgpn-api/`) | **35,982 unique personal names** (~400,000 individuals across 8 volumes) | Single richest source for ancient Greek anthroponyms. Data also in ORA (Oxford Research Archive). |
|
| 462 |
+
| 7 | **Ancient Greek** | Pleiades Gazetteer | `pleiades.stoa.org` | **JSON + CSV bulk download** (daily dumps at `atlantides.org/downloads/pleiades/json/`) | **36,000+ places**, **26,000+ ancient names** | GitHub releases. CC-BY licensed. Coordinates, time periods, citations. |
|
| 463 |
+
| 8 | **Ancient Greek** | Theoi.com mythology | `theoi.com` | Scrapable HTML (consistent structure) | **1,000–1,500 mythological figures** | Gods, daimones, creatures, heroes. Alphabetical pages. |
|
| 464 |
+
| 9 | **Gothic** | Project Wulfila | `wulfila.be/gothic/download/` | **TEI XML download** with POS tags | **200–300 biblical proper nouns** | Nouns tagged "Noun, proper." Most machine-friendly source in survey. |
|
| 465 |
+
| 10 | **Etruscan** | CIE/TLE Digital Concordance (Zenodo) | Zenodo (search "Etruscan Faliscan concordance") | **CSV download** | **1,000+ unique names** (from 12,000+ inscriptions) | ~67% of inscriptions contain personal names. Far exceeds current ~250. |
|
| 466 |
+
|
| 467 |
+
#### Tier 2 Sources: Structured HTML, Easily Scrapable
|
| 468 |
+
|
| 469 |
+
| # | Language | Source | URL | Est. Proper Nouns | Notes |
|
| 470 |
+
|---|----------|--------|-----|-------------------|-------|
|
| 471 |
+
| 11 | **Sumerian** | AMGG (Ancient Mesopotamian Gods & Goddesses) | `oracc.museum.upenn.edu/amgg/listofdeities/` | ~100 major deity profiles | Scholarly profiles with epithets, iconography. |
|
| 472 |
+
| 12 | **Hittite** | HDN (Hittite Divine Names) | `cuneiform.neocities.org/HDN/outline` | ~1,000+ divine name entries | Updates van Gessel's 3-volume *Onomasticon*. HTML tables + PDF. |
|
| 473 |
+
| 13 | **Hittite** | HPN + LAMAN (Hittite Name Finder) | `cuneiform.neocities.org/HPN/outline` / `cuneiform.neocities.org/laman/start` | Hundreds of personal names | Unified divine + geographical + personal name retrieval. |
|
| 474 |
+
| 14 | **Ugaritic** | Wikipedia List of Ugaritic Deities | `en.wikipedia.org/wiki/List_of_Ugaritic_deities` | **200–234 divine names** | MediaWiki API. Cuneiform/alphabetic writings + functions. |
|
| 475 |
+
| 15 | **Ugaritic** | Sapiru Project deity lists | `sapiru.wordpress.com` | ~60–80 per list (multiple lists) | Actual Ras Shamra sacrificial deity lists (~1250 BCE). |
|
| 476 |
+
| 16 | **Avestan** | Avesta.org Zoroastrian Names | `avesta.org/znames.htm` | **400+ personal names** + divine names | Single long page. Based on Bartholomae. |
|
| 477 |
+
| 17 | **Avestan** | Encyclopaedia Iranica | `iranicaonline.org` | 400+ names (article "Personal Names, Iranian ii") | Per-deity articles (Anahita, Mithra, Verethragna, Amesha Spentas). |
|
| 478 |
+
| 18 | **Etruscan** | ETP (Etruscan Texts Project, UMass) | `scholarworks.umass.edu/ces_texts/` | 200+ (from 300+ post-1990 inscriptions) | Searchable by keyword, location, date. |
|
| 479 |
+
| 19 | **Etruscan** | Godchecker Etruscan Mythology | `godchecker.com/etruscan-mythology/list-of-names/` | **89 deity names** | Static HTML list. |
|
| 480 |
+
| 20 | **Old Norse** | Nordic Names | `nordicnames.de/wiki/Category:Old_Norse_Names` | Substantial subset of 50,000+ total | MediaWiki API. Name, meaning, etymology, gender. |
|
| 481 |
+
| 21 | **Old Norse** | Eddic proper nouns (Voluspa.org / Sacred-Texts) | `voluspa.org/poeticedda.htm` | **500–800 unique** (deities, giants, dwarves, places, weapons) | Dvergatal alone lists ~70 dwarf names. Requires NLP extraction. |
|
| 482 |
+
|
| 483 |
+
#### Tier 3 Sources: Existing + Wiktionary Expansion
|
| 484 |
+
|
| 485 |
+
| Language | Source | URL | Est. Names |
|
| 486 |
+
|----------|--------|-----|------------|
|
| 487 |
+
| Hurrian | Palaeolexicon | `palaeolexicon.com` | 50+ |
|
| 488 |
+
| Urartian | Oracc eCUT | `oracc.museum.upenn.edu/ecut/` | 100+ |
|
| 489 |
+
| Lycian/Lydian/Carian | eDiAna | `ediana.gwi.uni-muenchen.de` | 50+ each |
|
| 490 |
+
| Phoenician | Wiktionary | `en.wiktionary.org` | 50+ |
|
| 491 |
+
| PIE | Wiktionary reconstructed theonyms | `en.wiktionary.org` | 30+ |
|
| 492 |
+
| Mycenaean | DAMOS | `damos.hf.uio.no` | 100+ |
|
| 493 |
+
| Sanskrit | Wiktionary proper nouns | `en.wiktionary.org` | 500+ |
|
| 494 |
+
| OCS | Wiktionary proper nouns | `en.wiktionary.org` | 100+ |
|
| 495 |
+
|
| 496 |
+
### 4.3 Per-Language Script
|
| 497 |
+
|
| 498 |
+
Create `scripts/scrape_proper_nouns.py` — a unified script with per-language configs:
|
| 499 |
+
|
| 500 |
+
```python
|
| 501 |
+
PROPER_NOUN_CONFIGS = {
|
| 502 |
+
"grc": {
|
| 503 |
+
"sources": [
|
| 504 |
+
{"type": "wiktionary_cat", "category": "Category:Ancient_Greek_proper_nouns"},
|
| 505 |
+
{"type": "theoi", "url": "https://www.theoi.com/greek-mythology/..."},
|
| 506 |
+
],
|
| 507 |
+
"iso_for_translit": "grc",
|
| 508 |
+
"tsv_filename": "grc.tsv",
|
| 509 |
+
},
|
| 510 |
+
...
|
| 511 |
+
}
|
| 512 |
+
```
|
| 513 |
+
|
| 514 |
+
### 4.4 Adversarial Audit for Proper Nouns
|
| 515 |
+
|
| 516 |
+
Team B checks (in addition to standard v2):
|
| 517 |
+
- Verify 20 proper nouns are attested in the source language (not modern inventions)
|
| 518 |
+
- Verify Concept_ID tags are correct (theonym vs toponym vs anthroponym)
|
| 519 |
+
- Verify no modern-language proper nouns leaked in (e.g., English "John" in a Gothic file)
|
| 520 |
+
|
| 521 |
+
---
|
| 522 |
+
|
| 523 |
+
## PHASE 5: Source Quality Upgrades
|
| 524 |
+
|
| 525 |
+
**Priority:** MEDIUM — replaces weak sources with stronger ones
|
| 526 |
+
**Estimated effort:** 2 sessions
|
| 527 |
+
**Adversarial audit:** YES
|
| 528 |
+
|
| 529 |
+
### 5.1 Replace avesta.org with Bartholomae
|
| 530 |
+
|
| 531 |
+
**Problem:** avesta.org is a personal website by a non-specialist, based on a 125-year-old dictionary.
|
| 532 |
+
**Solution:** After Phase 2 restores the avesta_org data, write a SECOND script that cross-references against Bartholomae's *Altiranisches Wörterbuch* entries available via:
|
| 533 |
+
- TITUS Frankfurt digitized texts
|
| 534 |
+
- Wiktionary entries that cite Bartholomae
|
| 535 |
+
|
| 536 |
+
**Script:** `scripts/crossref_avestan_bartholomae.py`
|
| 537 |
+
- For each avesta_org entry, search Wiktionary for a matching Avestan entry with Bartholomae citation
|
| 538 |
+
- Flag entries that appear in avesta_org but NOT in any academic source
|
| 539 |
+
- Add `bartholomae_verified: true/false` to audit trail
|
| 540 |
+
|
| 541 |
+
### 5.2 Cross-Reference Palaeolexicon Against eDiAna
|
| 542 |
+
|
| 543 |
+
**Problem:** Palaeolexicon (1,960 entries across 6 languages) is a volunteer project with no peer review.
|
| 544 |
+
**Solution:** For Anatolian languages where eDiAna overlaps (Lycian, Lydian, Carian, Luwian), verify Palaeolexicon entries against eDiAna.
|
| 545 |
+
|
| 546 |
+
**Script:** `scripts/crossref_palaeolexicon_ediana.py`
|
| 547 |
+
- Load both Palaeolexicon and eDiAna entries for each Anatolian language
|
| 548 |
+
- Flag Palaeolexicon entries with no eDiAna match
|
| 549 |
+
- Log verification status to audit trail
|
| 550 |
+
|
| 551 |
+
### 5.3 Upgrade ABVD Data via Lexibank 2
|
| 552 |
+
|
| 553 |
+
**Problem:** ABVD entries are ~50% orthographic (fake-IPA).
|
| 554 |
+
**Solution:** Where Lexibank 2 provides CLTS-standardized versions of ABVD languages, prefer those.
|
| 555 |
+
|
| 556 |
+
**Script:** `scripts/upgrade_abvd_lexibank.py`
|
| 557 |
+
- Download Lexibank 2 standardized forms for ABVD languages
|
| 558 |
+
- For each ABVD entry where Lexibank provides a CLTS-standardized IPA, update the IPA column
|
| 559 |
+
- Apply Never-Regress Rule: only update if Lexibank IPA differs from Word (i.e., is not identity)
|
| 560 |
+
|
| 561 |
+
---
|
| 562 |
+
|
| 563 |
+
## PHASE 6: New Language Ingestion — Tier 2
|
| 564 |
+
|
| 565 |
+
**Priority:** MEDIUM — important but less critical than Tier 1
|
| 566 |
+
**Estimated effort:** 3–4 sessions (parallelizable)
|
| 567 |
+
**Adversarial audit:** YES (full v2 pipeline per language)
|
| 568 |
+
|
| 569 |
+
### Languages
|
| 570 |
+
|
| 571 |
+
| Language | ISO | Family | Primary Source | Est. Entries |
|
| 572 |
+
|----------|-----|--------|---------------|-------------|
|
| 573 |
+
| Coptic | cop | Afroasiatic | Coptic Dictionary Online (coptic-dictionary.org) | 5,000–11,263 |
|
| 574 |
+
| Hattic | xht | Isolate | Palaeolexicon + Wiktionary | 100–300 |
|
| 575 |
+
| Pali | pli | Indo-European | PTS Dictionary (dsal.uchicago.edu), Digital Pali Dict | 5,000+ |
|
| 576 |
+
| Classical Armenian | xcl | Indo-European | Wiktionary Category:Old_Armenian_lemmas, Calfa.fr | 2,000+ |
|
| 577 |
+
| Old English | ang | Indo-European | Wiktionary Category:Old_English_lemmas | 5,000+ |
|
| 578 |
+
| Ge'ez | gez | Afroasiatic | Wiktionary + Leslau dictionary if accessible | 1,000+ |
|
| 579 |
+
| Syriac | syc | Afroasiatic | SEDRA (sedra.bethmardutho.org) + Wiktionary | 3,000+ |
|
| 580 |
+
| Aramaic (Imperial/Biblical) | arc | Afroasiatic | CAL (cal.huc.edu) + Wiktionary | 3,000+ |
|
| 581 |
+
| Biblical Hebrew | hbo | Afroasiatic | Wiktionary Category:Biblical_Hebrew_lemmas | 3,000+ |
|
| 582 |
+
|
| 583 |
+
### Per-Language Protocol
|
| 584 |
+
|
| 585 |
+
Same as Phase 3: create transliteration map → write extraction script → dry-run → adversarial audit → run live → update metadata.
|
| 586 |
+
|
| 587 |
+
Each language needs:
|
| 588 |
+
1. Transliteration map in `transliteration_maps.py` with cited reference
|
| 589 |
+
2. Extraction script in `scripts/`
|
| 590 |
+
3. Entry in `language_configs.py`
|
| 591 |
+
4. Proper noun scraping (gods, places, rulers) from the same sources
|
| 592 |
+
|
| 593 |
+
---
|
| 594 |
+
|
| 595 |
+
## PHASE 7: New Language Ingestion — Tier 3 & Proto-Languages
|
| 596 |
+
|
| 597 |
+
**Priority:** LOW-MEDIUM — expansion after core is solid
|
| 598 |
+
**Estimated effort:** 4+ sessions
|
| 599 |
+
**Adversarial audit:** YES
|
| 600 |
+
|
| 601 |
+
### Tier 3 Ancient Languages
|
| 602 |
+
|
| 603 |
+
| Language | ISO | Source | Est. Entries |
|
| 604 |
+
|----------|-----|--------|-------------|
|
| 605 |
+
| Middle Persian | pal | MPCD (mpcorpus.org) | 3,000+ |
|
| 606 |
+
| Sogdian | sog | Gharib Dictionary (Internet Archive) | 1,000+ |
|
| 607 |
+
| Old Japanese | ojp | ONCOJ (oncoj.ninjal.ac.jp) | 2,000+ |
|
| 608 |
+
| Gaulish | xtg | Lexicon Leponticum | 500+ |
|
| 609 |
+
| Oscan | osc | CEIPoM (Zenodo) | 500+ |
|
| 610 |
+
| Umbrian | xum | CEIPoM | 300+ |
|
| 611 |
+
| Venetic | xve | CEIPoM | 300+ |
|
| 612 |
+
| Classical Nahuatl | nci | Wiktionary + colonial dictionaries | 2,000+ |
|
| 613 |
+
| Eblaite | xeb | Oracc/DCCLT | 1,000+ |
|
| 614 |
+
| Old Irish | sga | eDIL (dil.ie) | 5,000+ |
|
| 615 |
+
| Palaic | plq | eDiAna | 50+ |
|
| 616 |
+
|
| 617 |
+
### Reconstructed Proto-Languages
|
| 618 |
+
|
| 619 |
+
| Language | ISO | Source | Est. Entries |
|
| 620 |
+
|----------|-----|--------|-------------|
|
| 621 |
+
| Proto-Austronesian | map | ACD (acd.clld.org) | 3,000–5,000 |
|
| 622 |
+
| Proto-Uralic | urj-pro | Wiktionary + Starostin | 500+ |
|
| 623 |
+
| Proto-Bantu | bnt-pro | BLR3 (africamuseum.be) | 5,000+ |
|
| 624 |
+
| Proto-Sino-Tibetan | sit-pro | STEDT (stedt.berkeley.edu) | 1,000+ |
|
| 625 |
+
| Proto-Celtic | cel-pro | Matasovic dictionary (Internet Archive) | 1,000+ |
|
| 626 |
+
| Proto-Germanic | gem-pro | Wiktionary Category:Proto-Germanic_lemmas | 2,000+ |
|
| 627 |
+
|
| 628 |
+
---
|
| 629 |
+
|
| 630 |
+
## PHASE 8: Ongoing Quality Assurance
|
| 631 |
+
|
| 632 |
+
### 8.1 Automated Validation Suite
|
| 633 |
+
|
| 634 |
+
Write `scripts/validate_all.py` — a comprehensive validation script that runs after ANY data change:
|
| 635 |
+
|
| 636 |
+
```python
|
| 637 |
+
def validate_all():
|
| 638 |
+
for tsv in LEXICON_DIR.glob("*.tsv"):
|
| 639 |
+
# 1. Header check
|
| 640 |
+
# 2. No empty IPA
|
| 641 |
+
# 3. No duplicate Words
|
| 642 |
+
# 4. SCA matches ipa_to_sound_class(IPA) for all entries
|
| 643 |
+
# 5. No '0' in SCA (flag but don't fail — may be legitimate unknowns)
|
| 644 |
+
# 6. Source field is non-empty
|
| 645 |
+
# 7. Entry count matches languages.tsv
|
| 646 |
+
# 8. No known artifact patterns (inprogress, phoneticvalue, etc.)
|
| 647 |
+
```
|
| 648 |
+
|
| 649 |
+
### 8.2 Pre-Push Validation Gate
|
| 650 |
+
|
| 651 |
+
Add to the HuggingFace push workflow:
|
| 652 |
+
1. Run `validate_all.py` — must pass with 0 errors
|
| 653 |
+
2. Run `reprocess_ipa.py --dry-run` — verify no regressions
|
| 654 |
+
3. Verify all TSV files have correct header
|
| 655 |
+
4. Verify `languages.tsv` entry counts match actual
|
| 656 |
+
|
| 657 |
+
### 8.3 DATABASE_REFERENCE.md Auto-Update
|
| 658 |
+
|
| 659 |
+
After every phase completion, update DATABASE_REFERENCE.md with:
|
| 660 |
+
- New language entries in the Ancient Languages table
|
| 661 |
+
- Updated entry counts
|
| 662 |
+
- New source entries in the Source Registry
|
| 663 |
+
- New transliteration maps in the Map Registry
|
| 664 |
+
|
| 665 |
+
---
|
| 666 |
+
|
| 667 |
+
## Execution Order & Dependencies
|
| 668 |
+
|
| 669 |
+
```
|
| 670 |
+
PHASE 0 (Critical Bugs)
|
| 671 |
+
├── 0.1 SCA tokenizer fix
|
| 672 |
+
├── 0.2 Nasalized vowel fix
|
| 673 |
+
├── 0.3 Clean artifacts script
|
| 674 |
+
├── 0.4 Metadata update
|
| 675 |
+
└── 0.5 Presentation fixes
|
| 676 |
+
↓
|
| 677 |
+
PHASE 1 (IPA Map Fixes) ──→ reprocess_ipa.py ──→ validate_all.py
|
| 678 |
+
↓
|
| 679 |
+
PHASE 2 (Data Restoration)
|
| 680 |
+
├── 2.1 Avestan re-scrape
|
| 681 |
+
├── 2.2 Sumerogram tagging
|
| 682 |
+
└── 2.3 Contamination fix
|
| 683 |
+
↓
|
| 684 |
+
PHASE 3 (Tier 1 Languages) ←── can run 9 languages in PARALLEL
|
| 685 |
+
├── 3.1 Sumerian
|
| 686 |
+
├── 3.2 Akkadian
|
| 687 |
+
├── 3.3 Egyptian
|
| 688 |
+
├── 3.4 Sanskrit
|
| 689 |
+
├── 3.5 Ancient Greek
|
| 690 |
+
├── 3.6 Gothic
|
| 691 |
+
├── 3.7 Mycenaean Greek
|
| 692 |
+
├── 3.8 OCS
|
| 693 |
+
└── 3.9 Old Norse
|
| 694 |
+
↓
|
| 695 |
+
PHASE 4 (Proper Nouns) ←── runs AFTER Phase 3 (needs Tier 1 TSVs to exist)
|
| 696 |
+
↓
|
| 697 |
+
PHASE 5 (Source Upgrades) ←── independent, can run in parallel with Phase 4
|
| 698 |
+
↓
|
| 699 |
+
PHASE 6 (Tier 2 Languages)
|
| 700 |
+
↓
|
| 701 |
+
PHASE 7 (Tier 3 + Proto-Languages)
|
| 702 |
+
↓
|
| 703 |
+
PHASE 8 (Ongoing QA) ←── continuous after all phases
|
| 704 |
+
```
|
| 705 |
+
|
| 706 |
+
---
|
| 707 |
+
|
| 708 |
+
## Success Criteria
|
| 709 |
+
|
| 710 |
+
| Metric | Current | Target |
|
| 711 |
+
|--------|---------|--------|
|
| 712 |
+
| Ancient/reconstructed languages | 23 | 42+ (Tier 1+2) |
|
| 713 |
+
| Total ancient language entries | 17,567 | 100,000+ |
|
| 714 |
+
| Languages with >80% non-identity IPA | 10 | 30+ |
|
| 715 |
+
| Languages with 0% empty Concept_IDs | ~5 | 25+ |
|
| 716 |
+
| SCA "0" rate across all ancient langs | ~5% | <1% |
|
| 717 |
+
| Proper noun coverage per language | Variable | All languages have theonym + toponym entries |
|
| 718 |
+
| Adversarial audit pass rate | — | 100% (all phases pass v2 audit) |
|
| 719 |
+
| HuggingFace accessibility | Private | Public |
|
| 720 |
+
| License | None | CC-BY-SA-4.0 (file present) |
|
| 721 |
+
|
| 722 |
+
---
|
| 723 |
+
|
| 724 |
+
## Appendix A: Script Naming Convention
|
| 725 |
+
|
| 726 |
+
```
|
| 727 |
+
scripts/scrape_{source}_{language}.py # Single-source, single-language
|
| 728 |
+
scripts/scrape_{source}.py # Single-source, multi-language
|
| 729 |
+
scripts/scrape_proper_nouns.py # Unified proper noun scraper
|
| 730 |
+
scripts/clean_{issue}.py # Cleaning/fixing scripts
|
| 731 |
+
scripts/crossref_{source1}_{source2}.py # Cross-reference validation
|
| 732 |
+
scripts/upgrade_{source}.py # Source quality upgrades
|
| 733 |
+
scripts/validate_all.py # Comprehensive validation
|
| 734 |
+
scripts/tag_sumerograms.py # Sumerogram identification
|
| 735 |
+
```
|
| 736 |
+
|
| 737 |
+
## Appendix B: Transliteration Map Naming Convention
|
| 738 |
+
|
| 739 |
+
```python
|
| 740 |
+
# In transliteration_maps.py:
|
| 741 |
+
SUMERIAN_MAP: Dict[str, str] = { ... } # Jagersma (2010)
|
| 742 |
+
AKKADIAN_MAP: Dict[str, str] = { ... } # Huehnergard (2011)
|
| 743 |
+
EGYPTIAN_MAP: Dict[str, str] = { ... } # Allen (2014)
|
| 744 |
+
SANSKRIT_MAP: Dict[str, str] = { ... } # Whitney (1896)
|
| 745 |
+
ANCIENT_GREEK_MAP: Dict[str, str] = { ... } # Allen (1987)
|
| 746 |
+
GOTHIC_MAP: Dict[str, str] = { ... } # Wright (1910)
|
| 747 |
+
MYCENAEAN_MAP: Dict[str, str] = { ... } # Ventris & Chadwick (1973)
|
| 748 |
+
OCS_MAP: Dict[str, str] = { ... } # Lunt (2001)
|
| 749 |
+
OLD_NORSE_MAP: Dict[str, str] = { ... } # Gordon (1957)
|
| 750 |
+
```
|
| 751 |
+
|
| 752 |
+
## Appendix C: Adversarial Auditor Dispatch Template
|
| 753 |
+
|
| 754 |
+
When deploying the adversarial pipeline for any phase, spawn two parallel agents:
|
| 755 |
+
|
| 756 |
+
**Agent A (Extraction):**
|
| 757 |
+
```
|
| 758 |
+
You are Team A (Extraction Agent). Your job is to write and run a Python script
|
| 759 |
+
that scrapes {SOURCE} for {LANGUAGE} data. Follow the Iron Law: all data must
|
| 760 |
+
come from HTTP requests. Use the standard script template from DATABASE_REFERENCE.md.
|
| 761 |
+
[... phase-specific instructions ...]
|
| 762 |
+
```
|
| 763 |
+
|
| 764 |
+
**Agent B (Adversarial Auditor v2):**
|
| 765 |
+
```
|
| 766 |
+
You are Team B (Critical Adversarial Auditor v2). You have VETO POWER.
|
| 767 |
+
After Agent A completes, perform the following DEEP checks:
|
| 768 |
+
|
| 769 |
+
1. 50-WORD CROSS-REFERENCE: Select 50 random entries from the output TSV.
|
| 770 |
+
For each, construct the source URL and verify the word appears there.
|
| 771 |
+
Use WebFetch to check each URL. Report matches and mismatches.
|
| 772 |
+
|
| 773 |
+
2. IPA SPOT-CHECK: For 20 random entries, manually apply the transliteration
|
| 774 |
+
map character-by-character. Show your work. Report any mismatches.
|
| 775 |
+
|
| 776 |
+
3. SCA CONSISTENCY: For 20 random entries, verify ipa_to_sound_class(IPA) == SCA.
|
| 777 |
+
|
| 778 |
+
4. SOURCE PROVENANCE: For 10 random entries, provide the exact URL where
|
| 779 |
+
each entry can be verified. Fetch each URL and confirm.
|
| 780 |
+
|
| 781 |
+
5. CONCEPT ID ACCURACY: For 20 entries with glosses, verify the gloss matches
|
| 782 |
+
the source definition.
|
| 783 |
+
|
| 784 |
+
6. DEDUP: Count unique words. Report any duplicates.
|
| 785 |
+
|
| 786 |
+
7. ENTRY COUNT: Is the count non-round and plausible?
|
| 787 |
+
|
| 788 |
+
DO NOT perform surface-level checks (header format, encoding, file existence).
|
| 789 |
+
Only perform checks that touch REAL DATA and REAL SOURCES.
|
| 790 |
+
|
| 791 |
+
Produce a full v2 audit report. Verdict: PASS or FAIL with blocking issues.
|
| 792 |
+
```
|
| 793 |
+
|
| 794 |
+
---
|
| 795 |
+
|
| 796 |
+
*End of PRD*
|
scripts/fetch_wiktionary_raw.py
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""Fetch Wiktionary category members and save as raw JSON for later processing.
|
| 3 |
+
|
| 4 |
+
Uses curl with retry-after header respect. Designed to handle rate limiting
|
| 5 |
+
gracefully by waiting the specified time between retries.
|
| 6 |
+
|
| 7 |
+
Iron Rule: All data comes from HTTP API responses.
|
| 8 |
+
|
| 9 |
+
Usage:
|
| 10 |
+
python scripts/fetch_wiktionary_raw.py [--language ISO]
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
from __future__ import annotations
|
| 14 |
+
|
| 15 |
+
import argparse
|
| 16 |
+
import io
|
| 17 |
+
import json
|
| 18 |
+
import logging
|
| 19 |
+
import subprocess
|
| 20 |
+
import sys
|
| 21 |
+
import time
|
| 22 |
+
from pathlib import Path
|
| 23 |
+
|
| 24 |
+
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding="utf-8")
|
| 25 |
+
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding="utf-8")
|
| 26 |
+
|
| 27 |
+
ROOT = Path(__file__).resolve().parent.parent
|
| 28 |
+
RAW_DIR = ROOT / "data" / "training" / "raw"
|
| 29 |
+
|
| 30 |
+
logger = logging.getLogger(__name__)
|
| 31 |
+
|
| 32 |
+
API_URL = "https://en.wiktionary.org/w/api.php"
|
| 33 |
+
USER_AGENT = "PhaiPhon/1.0 (ancient-scripts-datasets; academic research)"
|
| 34 |
+
|
| 35 |
+
# All categories: (category_name, namespace)
|
| 36 |
+
CATEGORIES = {
|
| 37 |
+
"cop": ("Coptic_lemmas", 0),
|
| 38 |
+
"pli": ("Pali_lemmas", 0),
|
| 39 |
+
"xcl": ("Old_Armenian_lemmas", 0),
|
| 40 |
+
"ang": ("Old_English_lemmas", 0),
|
| 41 |
+
"gez": ("Ge%27ez_lemmas", 0),
|
| 42 |
+
"hbo": ("Hebrew_lemmas", 0),
|
| 43 |
+
"xht": ("Hattic_lemmas", 0),
|
| 44 |
+
# Tier 3 + Proto-languages
|
| 45 |
+
"gem-pro": ("Proto-Germanic_lemmas", 118),
|
| 46 |
+
"cel-pro": ("Proto-Celtic_lemmas", 118),
|
| 47 |
+
"urj-pro": ("Proto-Uralic_lemmas", 118),
|
| 48 |
+
"nci": ("Classical_Nahuatl_lemmas", 0),
|
| 49 |
+
"sga": ("Old_Irish_lemmas", 0),
|
| 50 |
+
# Phase 7 additions
|
| 51 |
+
"pal": ("Middle_Persian_lemmas", 0),
|
| 52 |
+
"bnt-pro": ("Proto-Bantu_lemmas", 118),
|
| 53 |
+
"sit-pro": ("Proto-Sino-Tibetan_lemmas", 118),
|
| 54 |
+
"xtg": ("Gaulish_lemmas", 0),
|
| 55 |
+
"sog": ("Sogdian_lemmas", 0),
|
| 56 |
+
"ojp": ("Old_Japanese_lemmas", 0),
|
| 57 |
+
# Phase 8 P0 additions
|
| 58 |
+
"sla-pro": ("Proto-Slavic_lemmas", 118),
|
| 59 |
+
"trk-pro": ("Proto-Turkic_lemmas", 118),
|
| 60 |
+
"itc-pro": ("Proto-Italic_lemmas", 118),
|
| 61 |
+
"jpx-pro": ("Proto-Japonic_lemmas", 118),
|
| 62 |
+
"ira-pro": ("Proto-Iranian_lemmas", 118),
|
| 63 |
+
# Phase 8 P1 proto-languages
|
| 64 |
+
"alg-pro": ("Proto-Algonquian_lemmas", 118),
|
| 65 |
+
"sqj-pro": ("Proto-Albanian_lemmas", 118),
|
| 66 |
+
"aav-pro": ("Proto-Austroasiatic_lemmas", 118),
|
| 67 |
+
"poz-pol-pro": ("Proto-Polynesian_lemmas", 118),
|
| 68 |
+
"tai-pro": ("Proto-Tai_lemmas", 118),
|
| 69 |
+
"xto-pro": ("Proto-Tocharian_lemmas", 118),
|
| 70 |
+
"poz-oce-pro": ("Proto-Oceanic_lemmas", 118),
|
| 71 |
+
"xgn-pro": ("Proto-Mongolic_lemmas", 118),
|
| 72 |
+
# Phase 8 additional ancient languages
|
| 73 |
+
"obm": ("Moabite_lemmas", 0),
|
| 74 |
+
# Batch 3: P2 proto-languages + Iberian
|
| 75 |
+
"myn-pro": ("Proto-Mayan_lemmas", 118),
|
| 76 |
+
"afa-pro": ("Proto-Afroasiatic_lemmas", 118),
|
| 77 |
+
"xib": ("Iberian_lemmas", 0),
|
| 78 |
+
}
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def fetch_one_page(url: str) -> tuple[str, int]:
|
| 82 |
+
"""Fetch one URL via curl. Returns (body, retry_after_secs)."""
|
| 83 |
+
result = subprocess.run(
|
| 84 |
+
["curl", "-s", "-D", "-",
|
| 85 |
+
"-H", f"User-Agent: {USER_AGENT}",
|
| 86 |
+
url],
|
| 87 |
+
capture_output=True, text=True, timeout=60,
|
| 88 |
+
)
|
| 89 |
+
output = result.stdout
|
| 90 |
+
# Split headers from body
|
| 91 |
+
parts = output.split("\r\n\r\n", 1)
|
| 92 |
+
if len(parts) < 2:
|
| 93 |
+
parts = output.split("\n\n", 1)
|
| 94 |
+
|
| 95 |
+
headers = parts[0] if parts else ""
|
| 96 |
+
body = parts[1] if len(parts) > 1 else ""
|
| 97 |
+
|
| 98 |
+
# Check for 429
|
| 99 |
+
retry_after = 0
|
| 100 |
+
if "429" in headers.split("\n")[0]:
|
| 101 |
+
for line in headers.split("\n"):
|
| 102 |
+
if line.lower().startswith("retry-after:"):
|
| 103 |
+
try:
|
| 104 |
+
retry_after = int(line.split(":", 1)[1].strip())
|
| 105 |
+
except ValueError:
|
| 106 |
+
retry_after = 300 # Default 5 min
|
| 107 |
+
if retry_after == 0:
|
| 108 |
+
retry_after = 300
|
| 109 |
+
|
| 110 |
+
return body, retry_after
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
def fetch_category(iso: str, category: str, namespace: int = 0) -> list[str]:
|
| 114 |
+
"""Fetch all members of a Wiktionary category, respecting rate limits."""
|
| 115 |
+
members = []
|
| 116 |
+
base = (
|
| 117 |
+
f"action=query&list=categorymembers&cmtitle=Category:{category}"
|
| 118 |
+
f"&cmtype=page&cmnamespace={namespace}&cmlimit=500&format=json"
|
| 119 |
+
)
|
| 120 |
+
extra = ""
|
| 121 |
+
page = 0
|
| 122 |
+
|
| 123 |
+
while True:
|
| 124 |
+
page += 1
|
| 125 |
+
url = f"{API_URL}?{base}{extra}"
|
| 126 |
+
|
| 127 |
+
body, retry_after = fetch_one_page(url)
|
| 128 |
+
|
| 129 |
+
if retry_after > 0:
|
| 130 |
+
logger.warning("%s: Rate limited. Retry-After=%d seconds (%.1f min). Waiting...",
|
| 131 |
+
iso, retry_after, retry_after / 60)
|
| 132 |
+
time.sleep(retry_after + 5)
|
| 133 |
+
# Retry after waiting
|
| 134 |
+
body, retry_after = fetch_one_page(url)
|
| 135 |
+
if retry_after > 0:
|
| 136 |
+
logger.error("%s: Still rate limited after waiting. Aborting.", iso)
|
| 137 |
+
return members
|
| 138 |
+
|
| 139 |
+
try:
|
| 140 |
+
data = json.loads(body)
|
| 141 |
+
except json.JSONDecodeError:
|
| 142 |
+
logger.error("%s: Invalid JSON on page %d. Body: %s", iso, page, body[:200])
|
| 143 |
+
return members
|
| 144 |
+
|
| 145 |
+
for m in data.get("query", {}).get("categorymembers", []):
|
| 146 |
+
members.append(m["title"])
|
| 147 |
+
|
| 148 |
+
cont = data.get("continue", {})
|
| 149 |
+
if "cmcontinue" in cont:
|
| 150 |
+
extra = f"&cmcontinue={cont['cmcontinue']}"
|
| 151 |
+
if page % 5 == 0:
|
| 152 |
+
logger.info(" %s page %d: %d members...", iso, page, len(members))
|
| 153 |
+
time.sleep(1.5) # Be nice
|
| 154 |
+
else:
|
| 155 |
+
break
|
| 156 |
+
|
| 157 |
+
return members
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
def main():
|
| 161 |
+
parser = argparse.ArgumentParser(description="Fetch Wiktionary category raw data")
|
| 162 |
+
parser.add_argument("--language", "-l", help="Specific ISO code")
|
| 163 |
+
args = parser.parse_args()
|
| 164 |
+
|
| 165 |
+
logging.basicConfig(
|
| 166 |
+
level=logging.INFO,
|
| 167 |
+
format="%(asctime)s %(levelname)s: %(message)s",
|
| 168 |
+
datefmt="%H:%M:%S",
|
| 169 |
+
)
|
| 170 |
+
|
| 171 |
+
RAW_DIR.mkdir(parents=True, exist_ok=True)
|
| 172 |
+
|
| 173 |
+
if args.language:
|
| 174 |
+
cats = {args.language: CATEGORIES[args.language]}
|
| 175 |
+
else:
|
| 176 |
+
cats = CATEGORIES
|
| 177 |
+
|
| 178 |
+
for iso, cat_info in cats.items():
|
| 179 |
+
category, namespace = cat_info
|
| 180 |
+
raw_path = RAW_DIR / f"wiktionary_category_{iso}.json"
|
| 181 |
+
if raw_path.exists():
|
| 182 |
+
with open(raw_path, "r", encoding="utf-8") as f:
|
| 183 |
+
existing = json.load(f)
|
| 184 |
+
logger.info("%s: Already cached (%d members). Skipping.", iso, len(existing.get("members", [])))
|
| 185 |
+
continue
|
| 186 |
+
|
| 187 |
+
logger.info("%s: Fetching %s (ns=%d)...", iso, category, namespace)
|
| 188 |
+
members = fetch_category(iso, category, namespace)
|
| 189 |
+
logger.info("%s: Got %d members", iso, len(members))
|
| 190 |
+
|
| 191 |
+
if members:
|
| 192 |
+
with open(raw_path, "w", encoding="utf-8") as f:
|
| 193 |
+
json.dump({"category": category, "members": members}, f, ensure_ascii=False)
|
| 194 |
+
logger.info("%s: Saved to %s", iso, raw_path)
|
| 195 |
+
|
| 196 |
+
# Pause between languages to be polite
|
| 197 |
+
time.sleep(5)
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
if __name__ == "__main__":
|
| 201 |
+
main()
|
scripts/ingest_acd.py
ADDED
|
@@ -0,0 +1,307 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""Ingest Proto-Austronesian reconstructed forms from the ACD CLDF dataset.
|
| 3 |
+
|
| 4 |
+
Source: Austronesian Comparative Dictionary (ACD) — CLDF on GitHub
|
| 5 |
+
URL: https://github.com/lexibank/acd
|
| 6 |
+
License: CC BY 4.0
|
| 7 |
+
Citation: Blust, Trussel & Smith (2023), DOI: 10.5281/zenodo.7737547
|
| 8 |
+
|
| 9 |
+
The CLDF forms.csv contains reconstructed forms for 42 proto-languages.
|
| 10 |
+
Forms use Blust notation (not IPA) — requires transliteration.
|
| 11 |
+
|
| 12 |
+
Iron Rule: Data comes from downloaded CSV files. No hardcoded word lists.
|
| 13 |
+
|
| 14 |
+
Usage:
|
| 15 |
+
python scripts/ingest_acd.py [--dry-run]
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
from __future__ import annotations
|
| 19 |
+
|
| 20 |
+
import argparse
|
| 21 |
+
import csv
|
| 22 |
+
import io
|
| 23 |
+
import json
|
| 24 |
+
import logging
|
| 25 |
+
import re
|
| 26 |
+
import sys
|
| 27 |
+
import unicodedata
|
| 28 |
+
from pathlib import Path
|
| 29 |
+
|
| 30 |
+
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding="utf-8")
|
| 31 |
+
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding="utf-8")
|
| 32 |
+
|
| 33 |
+
ROOT = Path(__file__).resolve().parent.parent
|
| 34 |
+
sys.path.insert(0, str(ROOT / "cognate_pipeline" / "src"))
|
| 35 |
+
sys.path.insert(0, str(ROOT / "scripts"))
|
| 36 |
+
|
| 37 |
+
from cognate_pipeline.normalise.sound_class import ipa_to_sound_class # noqa: E402
|
| 38 |
+
|
| 39 |
+
logger = logging.getLogger(__name__)
|
| 40 |
+
|
| 41 |
+
LEXICON_DIR = ROOT / "data" / "training" / "lexicons"
|
| 42 |
+
AUDIT_TRAIL_DIR = ROOT / "data" / "training" / "audit_trails"
|
| 43 |
+
RAW_DIR = ROOT / "data" / "training" / "raw"
|
| 44 |
+
|
| 45 |
+
ACD_DIR = RAW_DIR / "acd_cldf"
|
| 46 |
+
ACD_BASE = "https://raw.githubusercontent.com/lexibank/acd/main/cldf/"
|
| 47 |
+
|
| 48 |
+
# Blust notation → IPA mapping
|
| 49 |
+
# Reference: Blust (2009) The Austronesian Languages, Chapter 2
|
| 50 |
+
BLUST_TO_IPA = {
|
| 51 |
+
# Capital letters = special proto-phonemes
|
| 52 |
+
"C": "ts", # *C — voiceless dental/alveolar affricate
|
| 53 |
+
"N": "ŋ", # *N — velar nasal (sometimes ñ)
|
| 54 |
+
"R": "ʀ", # *R — uvular trill or retroflex
|
| 55 |
+
"S": "s", # *S — voiceless sibilant
|
| 56 |
+
"Z": "z", # *Z — voiced sibilant
|
| 57 |
+
"H": "h", # *H — laryngeal
|
| 58 |
+
"L": "ɬ", # *L — lateral fricative
|
| 59 |
+
"T": "t", # *T — voiceless dental stop
|
| 60 |
+
"D": "d", # *D — voiced dental stop
|
| 61 |
+
# Digraphs
|
| 62 |
+
"ng": "ŋ",
|
| 63 |
+
"ny": "ɲ",
|
| 64 |
+
"nj": "ɲ",
|
| 65 |
+
# Glottal
|
| 66 |
+
"q": "ʔ",
|
| 67 |
+
# Vowels with special values
|
| 68 |
+
"e": "ə", # Blust *e = schwa in PAN
|
| 69 |
+
# Subscript digits (used for homonyms) — remove
|
| 70 |
+
"₁": "", "₂": "", "₃": "", "₄": "", "₅": "",
|
| 71 |
+
"₆": "", "₇": "", "₈": "", "₉": "", "₀": "",
|
| 72 |
+
}
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def blust_to_ipa(form: str) -> str:
|
| 76 |
+
"""Convert Blust notation to approximate IPA."""
|
| 77 |
+
# Remove reconstruction asterisk
|
| 78 |
+
form = form.lstrip("*")
|
| 79 |
+
# Remove parenthetical optional segments
|
| 80 |
+
form = re.sub(r"\([^)]+\)", "", form)
|
| 81 |
+
|
| 82 |
+
# Greedy longest-match transliteration
|
| 83 |
+
keys = sorted(BLUST_TO_IPA.keys(), key=len, reverse=True)
|
| 84 |
+
result = []
|
| 85 |
+
i = 0
|
| 86 |
+
while i < len(form):
|
| 87 |
+
matched = False
|
| 88 |
+
for key in keys:
|
| 89 |
+
if form[i:i + len(key)] == key:
|
| 90 |
+
result.append(BLUST_TO_IPA[key])
|
| 91 |
+
i += len(key)
|
| 92 |
+
matched = True
|
| 93 |
+
break
|
| 94 |
+
if not matched:
|
| 95 |
+
if form[i] not in "- ": # skip hyphens and spaces
|
| 96 |
+
result.append(form[i])
|
| 97 |
+
i += 1
|
| 98 |
+
return "".join(result)
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
def download_if_needed():
|
| 102 |
+
"""Download ACD CLDF files if not cached."""
|
| 103 |
+
import urllib.request
|
| 104 |
+
|
| 105 |
+
ACD_DIR.mkdir(parents=True, exist_ok=True)
|
| 106 |
+
for fname in ("forms.csv", "languages.csv", "cognatesets.csv"):
|
| 107 |
+
local = ACD_DIR / fname
|
| 108 |
+
if local.exists():
|
| 109 |
+
logger.info("Cached: %s (%d bytes)", fname, local.stat().st_size)
|
| 110 |
+
continue
|
| 111 |
+
url = ACD_BASE + fname
|
| 112 |
+
logger.info("Downloading %s ...", url)
|
| 113 |
+
req = urllib.request.Request(url, headers={
|
| 114 |
+
"User-Agent": "PhaiPhon/1.0 (ancient-scripts-datasets)"
|
| 115 |
+
})
|
| 116 |
+
with urllib.request.urlopen(req, timeout=120) as resp:
|
| 117 |
+
data = resp.read()
|
| 118 |
+
with open(local, "wb") as f:
|
| 119 |
+
f.write(data)
|
| 120 |
+
logger.info("Downloaded %s (%d bytes)", fname, len(data))
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
def load_proto_languages():
|
| 124 |
+
"""Load language metadata to identify proto-languages."""
|
| 125 |
+
lang_path = ACD_DIR / "languages.csv"
|
| 126 |
+
protos = {}
|
| 127 |
+
with open(lang_path, "r", encoding="utf-8") as f:
|
| 128 |
+
for row in csv.DictReader(f):
|
| 129 |
+
name = row.get("Name", "")
|
| 130 |
+
lid = row.get("ID", "")
|
| 131 |
+
# Proto-languages have names starting with "Proto-"
|
| 132 |
+
if name.startswith("Proto-"):
|
| 133 |
+
protos[lid] = name
|
| 134 |
+
return protos
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
def extract_proto_forms():
|
| 138 |
+
"""Extract reconstructed forms from ACD CLDF."""
|
| 139 |
+
protos = load_proto_languages()
|
| 140 |
+
logger.info("Found %d proto-languages in ACD", len(protos))
|
| 141 |
+
|
| 142 |
+
forms_path = ACD_DIR / "forms.csv"
|
| 143 |
+
entries = {} # (proto_lang, form) -> {gloss, ...}
|
| 144 |
+
|
| 145 |
+
with open(forms_path, "r", encoding="utf-8") as f:
|
| 146 |
+
for row in csv.DictReader(f):
|
| 147 |
+
lang_id = row.get("Language_ID", "")
|
| 148 |
+
if lang_id not in protos:
|
| 149 |
+
continue
|
| 150 |
+
|
| 151 |
+
form = row.get("Form", "").strip()
|
| 152 |
+
value = row.get("Value", "").strip()
|
| 153 |
+
gloss = row.get("Description", "").strip()
|
| 154 |
+
|
| 155 |
+
if not form:
|
| 156 |
+
continue
|
| 157 |
+
# Use Form (cleaned) rather than Value (has optional segments)
|
| 158 |
+
word = form
|
| 159 |
+
|
| 160 |
+
# Strip infix angle brackets: C<in>aliS → CinaliS
|
| 161 |
+
word = re.sub(r"<([^>]+)>", r"\1", word)
|
| 162 |
+
# Strip parenthetical optional segments: (q)uNah → uNah
|
| 163 |
+
word = re.sub(r"\([^)]+\)", "", word)
|
| 164 |
+
# Remove leading asterisk
|
| 165 |
+
word = word.lstrip("*")
|
| 166 |
+
# Remove subscript digits (homonym markers)
|
| 167 |
+
word = re.sub(r"[₀₁₂₃₄₅₆₇₈₉]", "", word)
|
| 168 |
+
# Remove tilde variants: keep only first form
|
| 169 |
+
if " ~ " in word:
|
| 170 |
+
word = word.split(" ~ ")[0]
|
| 171 |
+
# Remove hyphens (prefix/suffix markers)
|
| 172 |
+
word = word.strip("-").strip()
|
| 173 |
+
|
| 174 |
+
# NFC normalize
|
| 175 |
+
word = unicodedata.normalize("NFC", word)
|
| 176 |
+
|
| 177 |
+
key = (lang_id, word)
|
| 178 |
+
if key not in entries:
|
| 179 |
+
entries[key] = {
|
| 180 |
+
"word": word,
|
| 181 |
+
"gloss": gloss,
|
| 182 |
+
"proto_lang": protos[lang_id],
|
| 183 |
+
"proto_lang_id": lang_id,
|
| 184 |
+
}
|
| 185 |
+
|
| 186 |
+
return entries
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
def load_existing_words(tsv_path: Path) -> set[str]:
|
| 190 |
+
"""Load existing Word column values."""
|
| 191 |
+
existing = set()
|
| 192 |
+
if tsv_path.exists():
|
| 193 |
+
with open(tsv_path, "r", encoding="utf-8") as f:
|
| 194 |
+
for line in f:
|
| 195 |
+
if line.startswith("Word\t"):
|
| 196 |
+
continue
|
| 197 |
+
word = line.split("\t")[0]
|
| 198 |
+
existing.add(word)
|
| 199 |
+
return existing
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
def main():
|
| 203 |
+
parser = argparse.ArgumentParser(description="Ingest ACD Proto-Austronesian")
|
| 204 |
+
parser.add_argument("--dry-run", action="store_true")
|
| 205 |
+
args = parser.parse_args()
|
| 206 |
+
|
| 207 |
+
logging.basicConfig(
|
| 208 |
+
level=logging.INFO,
|
| 209 |
+
format="%(asctime)s %(levelname)s: %(message)s",
|
| 210 |
+
datefmt="%H:%M:%S",
|
| 211 |
+
)
|
| 212 |
+
|
| 213 |
+
download_if_needed()
|
| 214 |
+
|
| 215 |
+
# We ingest all proto-forms into a single map.tsv (Proto-Austronesian family)
|
| 216 |
+
tsv_path = LEXICON_DIR / "map.tsv"
|
| 217 |
+
existing = load_existing_words(tsv_path)
|
| 218 |
+
logger.info("Existing Proto-Austronesian entries: %d", len(existing))
|
| 219 |
+
|
| 220 |
+
entries = extract_proto_forms()
|
| 221 |
+
logger.info("ACD proto-forms: %d", len(entries))
|
| 222 |
+
|
| 223 |
+
# Count by proto-language
|
| 224 |
+
by_lang = {}
|
| 225 |
+
for (lid, _), info in entries.items():
|
| 226 |
+
name = info["proto_lang"]
|
| 227 |
+
by_lang[name] = by_lang.get(name, 0) + 1
|
| 228 |
+
for name, count in sorted(by_lang.items(), key=lambda x: -x[1])[:10]:
|
| 229 |
+
logger.info(" %s: %d", name, count)
|
| 230 |
+
|
| 231 |
+
# Process
|
| 232 |
+
new_entries = []
|
| 233 |
+
audit_trail = []
|
| 234 |
+
skipped = 0
|
| 235 |
+
|
| 236 |
+
for (lid, word), info in sorted(entries.items()):
|
| 237 |
+
clean_word = word.strip()
|
| 238 |
+
if not clean_word or len(clean_word) < 2 or len(clean_word) > 50:
|
| 239 |
+
skipped += 1
|
| 240 |
+
continue
|
| 241 |
+
|
| 242 |
+
if clean_word in existing:
|
| 243 |
+
skipped += 1
|
| 244 |
+
continue
|
| 245 |
+
|
| 246 |
+
# Convert Blust notation to IPA
|
| 247 |
+
ipa = blust_to_ipa(word)
|
| 248 |
+
if not ipa:
|
| 249 |
+
ipa = clean_word
|
| 250 |
+
|
| 251 |
+
try:
|
| 252 |
+
sca = ipa_to_sound_class(ipa)
|
| 253 |
+
except Exception:
|
| 254 |
+
sca = ""
|
| 255 |
+
|
| 256 |
+
new_entries.append({
|
| 257 |
+
"word": clean_word,
|
| 258 |
+
"ipa": ipa,
|
| 259 |
+
"sca": sca,
|
| 260 |
+
})
|
| 261 |
+
existing.add(clean_word)
|
| 262 |
+
|
| 263 |
+
audit_trail.append({
|
| 264 |
+
"word": clean_word,
|
| 265 |
+
"raw_form": word,
|
| 266 |
+
"ipa": ipa,
|
| 267 |
+
"gloss": info["gloss"],
|
| 268 |
+
"proto_lang": info["proto_lang"],
|
| 269 |
+
"source": "acd",
|
| 270 |
+
})
|
| 271 |
+
|
| 272 |
+
logger.info("New: %d, Skipped: %d", len(new_entries), skipped)
|
| 273 |
+
|
| 274 |
+
if args.dry_run:
|
| 275 |
+
print(f"\nDRY RUN: ACD Proto-Austronesian Ingestion:")
|
| 276 |
+
print(f" ACD proto-forms: {len(entries)}")
|
| 277 |
+
print(f" Existing: {len(existing) - len(new_entries)}")
|
| 278 |
+
print(f" New: {len(new_entries)}")
|
| 279 |
+
print(f" Total: {len(existing)}")
|
| 280 |
+
return
|
| 281 |
+
|
| 282 |
+
if new_entries:
|
| 283 |
+
LEXICON_DIR.mkdir(parents=True, exist_ok=True)
|
| 284 |
+
if not tsv_path.exists():
|
| 285 |
+
with open(tsv_path, "w", encoding="utf-8") as f:
|
| 286 |
+
f.write("Word\tIPA\tSCA\tSource\tConcept_ID\tCognate_Set_ID\n")
|
| 287 |
+
|
| 288 |
+
with open(tsv_path, "a", encoding="utf-8") as f:
|
| 289 |
+
for e in new_entries:
|
| 290 |
+
f.write(f"{e['word']}\t{e['ipa']}\t{e['sca']}\tacd\t-\t-\n")
|
| 291 |
+
|
| 292 |
+
if audit_trail:
|
| 293 |
+
AUDIT_TRAIL_DIR.mkdir(parents=True, exist_ok=True)
|
| 294 |
+
audit_path = AUDIT_TRAIL_DIR / "acd_ingest_map.jsonl"
|
| 295 |
+
with open(audit_path, "w", encoding="utf-8") as f:
|
| 296 |
+
for r in audit_trail:
|
| 297 |
+
f.write(json.dumps(r, ensure_ascii=False) + "\n")
|
| 298 |
+
|
| 299 |
+
print(f"\nACD Proto-Austronesian Ingestion:")
|
| 300 |
+
print(f" ACD proto-forms: {len(entries)}")
|
| 301 |
+
print(f" Existing: {len(existing) - len(new_entries)}")
|
| 302 |
+
print(f" New: {len(new_entries)}")
|
| 303 |
+
print(f" Total: {len(existing)}")
|
| 304 |
+
|
| 305 |
+
|
| 306 |
+
if __name__ == "__main__":
|
| 307 |
+
main()
|