Datasets:
Alvin commited on
Commit ·
26786e3
1
Parent(s): 8d2d3e2
Add complete dataset: all sources, metadata, scripts, docs, and phylo enrichment
Browse filesSyncs HuggingFace repo with full GitHub dataset:
- 1,177 lexicons (modern + ancient languages)
- 1,110 language profiles
- 102 audit trails
- 53 validation sets
- cognate_pipeline package (full Python source)
- All scripts (ingest, build, validate, scrape)
- All docs (PRDs, adversarial audits, database reference)
- Phylogenetic enrichment: phylo_pairs.tsv (386K pairs), Glottolog CLDF tree
- Additional data: gothic, iberian, ugaritic, religious_terms, linear_a, cited_sources
This view is limited to 50 files because it contains too many changes. See raw diff
- .gitignore +12 -0
- LICENSE +27 -0
- README.md +158 -0
- audit_sources.py +237 -0
- char_analysis.txt +81 -0
- char_analysis2.txt +70 -0
- cognate_pipeline/README.md +3 -0
- cognate_pipeline/alembic.ini +37 -0
- cognate_pipeline/config.example.yaml +445 -0
- cognate_pipeline/pyproject.toml +46 -0
- cognate_pipeline/src/cognate_pipeline/__init__.py +3 -0
- cognate_pipeline/src/cognate_pipeline/cli/__init__.py +0 -0
- cognate_pipeline/src/cognate_pipeline/cli/detect_cmd.py +73 -0
- cognate_pipeline/src/cognate_pipeline/cli/export_cldf_cmd.py +24 -0
- cognate_pipeline/src/cognate_pipeline/cli/export_jsonld_cmd.py +24 -0
- cognate_pipeline/src/cognate_pipeline/cli/ingest_cmd.py +63 -0
- cognate_pipeline/src/cognate_pipeline/cli/load_cmd.py +63 -0
- cognate_pipeline/src/cognate_pipeline/cli/main.py +81 -0
- cognate_pipeline/src/cognate_pipeline/cli/normalise_cmd.py +40 -0
- cognate_pipeline/src/cognate_pipeline/cognate/__init__.py +0 -0
- cognate_pipeline/src/cognate_pipeline/cognate/baseline_levenshtein.py +112 -0
- cognate_pipeline/src/cognate_pipeline/cognate/candidate_gen.py +101 -0
- cognate_pipeline/src/cognate_pipeline/cognate/clustering.py +169 -0
- cognate_pipeline/src/cognate_pipeline/cognate/family_map.json +3 -0
- cognate_pipeline/src/cognate_pipeline/cognate/lexstat_detector.py +96 -0
- cognate_pipeline/src/cognate_pipeline/cognate/models.py +90 -0
- cognate_pipeline/src/cognate_pipeline/config/__init__.py +0 -0
- cognate_pipeline/src/cognate_pipeline/config/loader.py +19 -0
- cognate_pipeline/src/cognate_pipeline/config/schema.py +109 -0
- cognate_pipeline/src/cognate_pipeline/db/__init__.py +0 -0
- cognate_pipeline/src/cognate_pipeline/db/connection.py +29 -0
- cognate_pipeline/src/cognate_pipeline/db/loader.py +171 -0
- cognate_pipeline/src/cognate_pipeline/db/migrations/__init__.py +0 -0
- cognate_pipeline/src/cognate_pipeline/db/migrations/env.py +49 -0
- cognate_pipeline/src/cognate_pipeline/db/migrations/versions/001_initial_schema.py +160 -0
- cognate_pipeline/src/cognate_pipeline/db/schema.py +191 -0
- cognate_pipeline/src/cognate_pipeline/export/__init__.py +0 -0
- cognate_pipeline/src/cognate_pipeline/export/cldf_exporter.py +152 -0
- cognate_pipeline/src/cognate_pipeline/export/jsonld_exporter.py +138 -0
- cognate_pipeline/src/cognate_pipeline/ingest/__init__.py +0 -0
- cognate_pipeline/src/cognate_pipeline/ingest/base.py +20 -0
- cognate_pipeline/src/cognate_pipeline/ingest/cldf_ingester.py +89 -0
- cognate_pipeline/src/cognate_pipeline/ingest/csv_ingester.py +164 -0
- cognate_pipeline/src/cognate_pipeline/ingest/json_ingester.py +114 -0
- cognate_pipeline/src/cognate_pipeline/ingest/language_map.json +3 -0
- cognate_pipeline/src/cognate_pipeline/ingest/language_resolver.py +75 -0
- cognate_pipeline/src/cognate_pipeline/ingest/models.py +123 -0
- cognate_pipeline/src/cognate_pipeline/ingest/wiktionary_ingester.py +94 -0
- cognate_pipeline/src/cognate_pipeline/normalise/__init__.py +0 -0
- cognate_pipeline/src/cognate_pipeline/normalise/epitran_backend.py +62 -0
.gitignore
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__pycache__/
|
| 2 |
+
*.pyc
|
| 3 |
+
.DS_Store
|
| 4 |
+
Thumbs.db
|
| 5 |
+
|
| 6 |
+
# Cloned CLDF source repositories (large, not ours to commit)
|
| 7 |
+
sources/
|
| 8 |
+
|
| 9 |
+
# Training data (too large for git, regenerated from scripts)
|
| 10 |
+
data/training/lexicons/
|
| 11 |
+
data/training/cognate_pairs/
|
| 12 |
+
# data/training/validation/ — now tracked via Git LFS
|
LICENSE
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Creative Commons Attribution-ShareAlike 4.0 International
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2026 Nacryos
|
| 4 |
+
|
| 5 |
+
This work is licensed under the Creative Commons Attribution-ShareAlike 4.0
|
| 6 |
+
International License. To view a copy of this license, visit
|
| 7 |
+
http://creativecommons.org/licenses/by-sa/4.0/ or send a letter to
|
| 8 |
+
Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
|
| 9 |
+
|
| 10 |
+
You are free to:
|
| 11 |
+
|
| 12 |
+
Share - copy and redistribute the material in any medium or format
|
| 13 |
+
Adapt - remix, transform, and build upon the material for any purpose,
|
| 14 |
+
even commercially
|
| 15 |
+
|
| 16 |
+
Under the following terms:
|
| 17 |
+
|
| 18 |
+
Attribution - You must give appropriate credit, provide a link to the
|
| 19 |
+
license, and indicate if changes were made.
|
| 20 |
+
ShareAlike - If you remix, transform, or build upon the material, you
|
| 21 |
+
must distribute your contributions under the same license.
|
| 22 |
+
|
| 23 |
+
No additional restrictions - You may not apply legal terms or technological
|
| 24 |
+
measures that legally restrict others from doing anything the license permits.
|
| 25 |
+
|
| 26 |
+
Note: Individual data sources used in this dataset may have their own licenses.
|
| 27 |
+
See docs/DATABASE_REFERENCE.md Section 5 (Source Registry) for details.
|
README.md
ADDED
|
@@ -0,0 +1,158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Ancient Scripts Decipherment Datasets
|
| 2 |
+
|
| 3 |
+
Collated datasets for the paper:
|
| 4 |
+
|
| 5 |
+
> **Deciphering Undersegmented Ancient Scripts Using Phonetic Prior**
|
| 6 |
+
> Jiaming Luo, Frederik Hartmann, Enrico Santus, Regina Barzilay, Yuan Cao
|
| 7 |
+
> *Transactions of the Association for Computational Linguistics*, 2021
|
| 8 |
+
> [arXiv:2010.11054](https://arxiv.org/abs/2010.11054)
|
| 9 |
+
|
| 10 |
+
This repository gathers the training datasets used in the paper — both those hosted in the authors' GitHub repos and the external cited sources.
|
| 11 |
+
|
| 12 |
+
---
|
| 13 |
+
|
| 14 |
+
## Repository Structure
|
| 15 |
+
|
| 16 |
+
```
|
| 17 |
+
data/
|
| 18 |
+
├── gothic/ # Gothic language data
|
| 19 |
+
│ ├── got.pretrained.pth # Pretrained phonological embeddings (PyTorch)
|
| 20 |
+
│ ├── segments.pkl # Phonetic segment data (Python pickle)
|
| 21 |
+
│ ├── gotica.txt # Gothic Bible plain text (Wulfila project)
|
| 22 |
+
│ └── gotica.xml.zip # Gothic Bible TEI XML (Wulfila project)
|
| 23 |
+
│
|
| 24 |
+
├── ugaritic/ # Ugaritic-Hebrew cognate data
|
| 25 |
+
│ ├── uga-heb.no_spe.cog # Full cognate pairs (TSV, ~7,353 tokens)
|
| 26 |
+
│ └── uga-heb.small.no_spe.cog # Small training subset (~10% of full)
|
| 27 |
+
│
|
| 28 |
+
├── iberian/ # Iberian inscription data
|
| 29 |
+
│ └── iberian.csv # Cleaned Hesperia epigraphy (3,466 chunks)
|
| 30 |
+
│
|
| 31 |
+
├── religious_terms/ # ** CURATED SUBSET: Religious vocabulary **
|
| 32 |
+
│ ├── README.md # Methodology and category definitions
|
| 33 |
+
│ ├── ugaritic_hebrew_religious.tsv # ~170 Ug-Heb cognate pairs (deity, ritual, sacred)
|
| 34 |
+
│ ├── gothic_religious.tsv # ~65 Gothic Bible religious terms
|
| 35 |
+
│ └── iberian_religious.tsv # ~40 Iberian votive/religious elements
|
| 36 |
+
│
|
| 37 |
+
├── validation/ # Phylogenetic validation dataset (9 branches)
|
| 38 |
+
│ ├── README.md # Format, sources, concept list
|
| 39 |
+
│ ├── concepts.tsv # 40 shared concept IDs
|
| 40 |
+
│ ├── germanic.tsv # got, ang, non, goh (~160 entries)
|
| 41 |
+
│ ├── celtic.tsv # sga, cym, bre (~120 entries)
|
| 42 |
+
│ ├── balto_slavic.tsv # lit, chu, rus (~120 entries)
|
| 43 |
+
│ ├── indo_iranian.tsv # san, ave, fas (~120 entries)
|
| 44 |
+
│ ├── italic.tsv # lat, osc, xum (~120 entries)
|
| 45 |
+
│ ├── hellenic.tsv # grc, gmy (~80 entries)
|
| 46 |
+
│ ├── semitic.tsv # heb, arb, amh (~120 entries)
|
| 47 |
+
│ ├── turkic.tsv # otk, tur, aze (~120 entries)
|
| 48 |
+
│ └── uralic.tsv # fin, hun, est (~120 entries)
|
| 49 |
+
│
|
| 50 |
+
└── cited_sources/ # External datasets cited in the paper
|
| 51 |
+
├── genesis/
|
| 52 |
+
│ ├── Hebrew.xml # Hebrew Bible (Christodouloupoulos & Steedman 2015)
|
| 53 |
+
│ └── Latin.xml # Latin Bible (same corpus)
|
| 54 |
+
├── basque/
|
| 55 |
+
│ ├── Basque-NT.xml # Basque New Testament (same corpus)
|
| 56 |
+
│ └── Trask_Etymological_Dictionary_Basque.pdf # Trask's Basque etymological dictionary
|
| 57 |
+
└── iberian_names/
|
| 58 |
+
└── RodriguezRamos2014.pdf # Iberian onomastic index (personal names)
|
| 59 |
+
```
|
| 60 |
+
|
| 61 |
+
---
|
| 62 |
+
|
| 63 |
+
## Dataset Details
|
| 64 |
+
|
| 65 |
+
### Gothic (`data/gothic/`)
|
| 66 |
+
|
| 67 |
+
| File | Source | Description |
|
| 68 |
+
|---|---|---|
|
| 69 |
+
| `got.pretrained.pth` | [DecipherUnsegmented](https://github.com/j-luo93/DecipherUnsegmented) | Pretrained phonological embeddings trained on Gothic IPA data |
|
| 70 |
+
| `segments.pkl` | [DecipherUnsegmented](https://github.com/j-luo93/DecipherUnsegmented) | Serialized phonetic segment inventory |
|
| 71 |
+
| `gotica.txt` | [Wulfila Project](https://www.wulfila.be/gothic/download/) | Plain text of the Gothic Bible (4th century CE translation by Bishop Wulfila) |
|
| 72 |
+
| `gotica.xml.zip` | [Wulfila Project](https://www.wulfila.be/gothic/download/) | TEI P5 XML encoding with linguistic annotations |
|
| 73 |
+
|
| 74 |
+
The Gothic Bible is the primary source of Gothic text. The paper uses unsegmented Gothic inscriptions from the 3rd-10th century AD period.
|
| 75 |
+
|
| 76 |
+
### Ugaritic (`data/ugaritic/`)
|
| 77 |
+
|
| 78 |
+
| File | Source | Description |
|
| 79 |
+
|---|---|---|
|
| 80 |
+
| `uga-heb.no_spe.cog` | [NeuroDecipher](https://github.com/j-luo93/NeuroDecipher) | Full Ugaritic-Hebrew cognate pairs |
|
| 81 |
+
| `uga-heb.small.no_spe.cog` | [NeuroDecipher](https://github.com/j-luo93/NeuroDecipher) | ~10% training subset |
|
| 82 |
+
|
| 83 |
+
**Format:** Tab-separated values. Each row is a cognate pair. Column 1 = Ugaritic transliteration, Column 2 = Hebrew transliteration. `|` separates multiple cognates; `_` marks missing entries. Originally from Snyder et al. (2010), covering 7,353 segmented tokens from the 14th-12th century BC.
|
| 84 |
+
|
| 85 |
+
### Iberian (`data/iberian/`)
|
| 86 |
+
|
| 87 |
+
| File | Source | Description |
|
| 88 |
+
|---|---|---|
|
| 89 |
+
| `iberian.csv` | [DecipherUnsegmented](https://github.com/j-luo93/DecipherUnsegmented) | Cleaned epigraphic inscriptions |
|
| 90 |
+
|
| 91 |
+
**Format:** CSV with columns `REF. HESPERIA` (inscription reference code) and `cleaned` (transcribed text). Contains 3,466 undersegmented character chunks from the 6th-1st century BC. Sourced from the [Hesperia database](http://hesperia.ucm.es/en/proyecto_hesperia.php) and cleaned via the authors' Jupyter notebook.
|
| 92 |
+
|
| 93 |
+
### Cited Sources (`data/cited_sources/`)
|
| 94 |
+
|
| 95 |
+
These are external datasets referenced in the paper for known-language vocabularies and comparison:
|
| 96 |
+
|
| 97 |
+
| File | Citation | Usage in Paper |
|
| 98 |
+
|---|---|---|
|
| 99 |
+
| `genesis/Hebrew.xml` | Christodouloupoulos & Steedman (2015) | Hebrew vocabulary for Ugaritic comparison |
|
| 100 |
+
| `genesis/Latin.xml` | Christodouloupoulos & Steedman (2015) | Latin vocabulary for cross-linguistic comparison |
|
| 101 |
+
| `basque/Basque-NT.xml` | Christodouloupoulos & Steedman (2015) | Basque vocabulary for Iberian comparison |
|
| 102 |
+
| `basque/Trask_Etymological_Dictionary_Basque.pdf` | Trask (2008) | Basque etymological data |
|
| 103 |
+
| `iberian_names/RodriguezRamos2014.pdf` | Rodriguez Ramos (2014) | Iberian personal name lists with Latin correspondences |
|
| 104 |
+
|
| 105 |
+
The Bible texts are from the [Massively Parallel Bible Corpus](https://github.com/christos-c/bible-corpus) (CC0 licensed).
|
| 106 |
+
|
| 107 |
+
---
|
| 108 |
+
|
| 109 |
+
## Additional Data Sources (Not Included)
|
| 110 |
+
|
| 111 |
+
The following sources were cited in the paper but are not machine-readable or freely downloadable:
|
| 112 |
+
|
| 113 |
+
- **Wiktionary descendant trees** for Proto-Germanic, Old Norse, and Old English vocabularies — extracted by the authors from Wiktionary's structured data
|
| 114 |
+
- **Original Hesperia epigraphy** (`hesperia_epigraphy.csv`) — referenced in the DecipherUnsegmented README but not present in the repository
|
| 115 |
+
|
| 116 |
+
---
|
| 117 |
+
|
| 118 |
+
## Cognate Detection Pipeline
|
| 119 |
+
|
| 120 |
+
The `cognate_pipeline/` directory contains a full Python package for cross-linguistic cognate detection, built on the datasets in this repository. It provides:
|
| 121 |
+
|
| 122 |
+
- **Ingestion** of CSV/TSV/COG, CLDF, Wiktionary JSONL, and generic JSON sources
|
| 123 |
+
- **Phonetic normalisation** with transcription type tracking (IPA, transliteration, orthographic)
|
| 124 |
+
- **SCA sound class encoding** (List 2012) for phonological comparison
|
| 125 |
+
- **Family-aware cognate candidate generation** (tags `cognate_inherited` vs `similarity_only`)
|
| 126 |
+
- **Weighted Levenshtein scoring** with SCA-class-aware substitution costs
|
| 127 |
+
- **Clustering** via connected components or UPGMA
|
| 128 |
+
- **PostgreSQL/PostGIS database** with 8 normalised tables and Alembic migrations
|
| 129 |
+
- **Export** to CLDF Wordlist and JSON-LD formats
|
| 130 |
+
- **Full provenance tracking** through every pipeline stage
|
| 131 |
+
|
| 132 |
+
Supports 36 languages across 9 phylogenetic branches (Germanic, Celtic, Balto-Slavic, Indo-Iranian, Italic, Hellenic, Semitic, Turkic, Uralic) plus isolates, with Glottocode resolution and IPA transcriptions.
|
| 133 |
+
|
| 134 |
+
See `data/validation/README.md` for the phylogenetic validation dataset.
|
| 135 |
+
|
| 136 |
+
See [`cognate_pipeline/README.md`](cognate_pipeline/README.md) for installation and usage.
|
| 137 |
+
|
| 138 |
+
---
|
| 139 |
+
|
| 140 |
+
## Original Repositories
|
| 141 |
+
|
| 142 |
+
- [j-luo93/DecipherUnsegmented](https://github.com/j-luo93/DecipherUnsegmented) — main code for the paper
|
| 143 |
+
- [j-luo93/NeuroDecipher](https://github.com/j-luo93/NeuroDecipher) — predecessor (Ugaritic/Linear B decipherment)
|
| 144 |
+
- [j-luo93/xib](https://github.com/j-luo93/xib) — earlier Iberian codebase
|
| 145 |
+
|
| 146 |
+
## Paper Citation
|
| 147 |
+
|
| 148 |
+
```bibtex
|
| 149 |
+
@article{luo2021deciphering,
|
| 150 |
+
title={Deciphering Undersegmented Ancient Scripts Using Phonetic Prior},
|
| 151 |
+
author={Luo, Jiaming and Hartmann, Frederik and Santus, Enrico and Barzilay, Regina and Cao, Yuan},
|
| 152 |
+
journal={Transactions of the Association for Computational Linguistics},
|
| 153 |
+
volume={9},
|
| 154 |
+
pages={69--81},
|
| 155 |
+
year={2021},
|
| 156 |
+
doi={10.1162/tacl_a_00354}
|
| 157 |
+
}
|
| 158 |
+
```
|
audit_sources.py
ADDED
|
@@ -0,0 +1,237 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Exhaustive audit of Source values across ALL 1,135 lexicon TSV files.
|
| 3 |
+
Checks EVERY ROW in EVERY FILE.
|
| 4 |
+
"""
|
| 5 |
+
import csv
|
| 6 |
+
import os
|
| 7 |
+
import sys
|
| 8 |
+
from collections import defaultdict, Counter
|
| 9 |
+
|
| 10 |
+
LEXICON_DIR = r"C:\Users\alvin\hf-ancient-scripts\data\training\lexicons"
|
| 11 |
+
|
| 12 |
+
VALID_SOURCES = {
|
| 13 |
+
"abvd", "wikipron", "northeuralex", "wold", "wiktionary", "wiktionary_cat",
|
| 14 |
+
"sinotibetan", "palaeolexicon", "ediana", "oracc_ecut", "tir_raetica",
|
| 15 |
+
"kaikki", "kassian2010", "kassian2010_basic", "seed", "wikipedia", "avesta_org"
|
| 16 |
+
}
|
| 17 |
+
|
| 18 |
+
# Counters
|
| 19 |
+
total_entries = 0
|
| 20 |
+
valid_source_count = 0
|
| 21 |
+
empty_source_count = 0
|
| 22 |
+
dash_source_count = 0
|
| 23 |
+
unknown_source_count = 0
|
| 24 |
+
whitespace_only_count = 0
|
| 25 |
+
|
| 26 |
+
# Track all unique source values
|
| 27 |
+
all_sources = Counter()
|
| 28 |
+
|
| 29 |
+
# Track unknown source values and where they appear
|
| 30 |
+
unknown_sources = defaultdict(list) # source_val -> [(file, row_num, word)]
|
| 31 |
+
|
| 32 |
+
# Track files with problems
|
| 33 |
+
files_with_empty = defaultdict(list) # file -> [(row_num, word, ipa)]
|
| 34 |
+
files_with_dash = defaultdict(list) # file -> [(row_num, word, ipa)]
|
| 35 |
+
files_with_unknown = defaultdict(list) # file -> [(row_num, word, source_val)]
|
| 36 |
+
|
| 37 |
+
# Per-file stats
|
| 38 |
+
file_stats = {} # file -> {total, valid, empty, dash, unknown}
|
| 39 |
+
|
| 40 |
+
# Files with no Source column at all
|
| 41 |
+
files_missing_source_col = []
|
| 42 |
+
|
| 43 |
+
tsv_files = sorted([f for f in os.listdir(LEXICON_DIR) if f.endswith(".tsv")])
|
| 44 |
+
print(f"Found {len(tsv_files)} TSV files to audit.\n")
|
| 45 |
+
|
| 46 |
+
for fname in tsv_files:
|
| 47 |
+
fpath = os.path.join(LEXICON_DIR, fname)
|
| 48 |
+
f_total = 0
|
| 49 |
+
f_valid = 0
|
| 50 |
+
f_empty = 0
|
| 51 |
+
f_dash = 0
|
| 52 |
+
f_unknown = 0
|
| 53 |
+
f_whitespace = 0
|
| 54 |
+
|
| 55 |
+
with open(fpath, "r", encoding="utf-8") as f:
|
| 56 |
+
reader = csv.DictReader(f, delimiter="\t")
|
| 57 |
+
|
| 58 |
+
# Check if Source column exists
|
| 59 |
+
if reader.fieldnames is None or "Source" not in reader.fieldnames:
|
| 60 |
+
files_missing_source_col.append(fname)
|
| 61 |
+
# Still count rows
|
| 62 |
+
for row in reader:
|
| 63 |
+
total_entries += 1
|
| 64 |
+
f_total += 1
|
| 65 |
+
empty_source_count += 1
|
| 66 |
+
f_empty += 1
|
| 67 |
+
file_stats[fname] = {"total": f_total, "valid": 0, "empty": f_total, "dash": 0, "unknown": 0, "whitespace": 0}
|
| 68 |
+
continue
|
| 69 |
+
|
| 70 |
+
for row_num, row in enumerate(reader, start=2): # row 1 is header
|
| 71 |
+
total_entries += 1
|
| 72 |
+
f_total += 1
|
| 73 |
+
|
| 74 |
+
source = row.get("Source", "")
|
| 75 |
+
word = row.get("Word", "???")
|
| 76 |
+
ipa = row.get("IPA", "???")
|
| 77 |
+
|
| 78 |
+
if source is None:
|
| 79 |
+
source = ""
|
| 80 |
+
|
| 81 |
+
raw_source = source
|
| 82 |
+
source_stripped = source.strip()
|
| 83 |
+
|
| 84 |
+
# Track all source values
|
| 85 |
+
all_sources[raw_source] += 1
|
| 86 |
+
|
| 87 |
+
if source_stripped == "":
|
| 88 |
+
empty_source_count += 1
|
| 89 |
+
f_empty += 1
|
| 90 |
+
files_with_empty[fname].append((row_num, word, ipa))
|
| 91 |
+
elif source_stripped == "-":
|
| 92 |
+
dash_source_count += 1
|
| 93 |
+
f_dash += 1
|
| 94 |
+
files_with_dash[fname].append((row_num, word, ipa))
|
| 95 |
+
elif source_stripped != raw_source:
|
| 96 |
+
# Has leading/trailing whitespace but non-empty
|
| 97 |
+
whitespace_only_count += 1
|
| 98 |
+
f_whitespace += 1
|
| 99 |
+
if source_stripped in VALID_SOURCES:
|
| 100 |
+
valid_source_count += 1
|
| 101 |
+
f_valid += 1
|
| 102 |
+
else:
|
| 103 |
+
unknown_source_count += 1
|
| 104 |
+
f_unknown += 1
|
| 105 |
+
unknown_sources[raw_source].append((fname, row_num, word))
|
| 106 |
+
files_with_unknown[fname].append((row_num, word, raw_source))
|
| 107 |
+
elif source_stripped in VALID_SOURCES:
|
| 108 |
+
valid_source_count += 1
|
| 109 |
+
f_valid += 1
|
| 110 |
+
else:
|
| 111 |
+
unknown_source_count += 1
|
| 112 |
+
f_unknown += 1
|
| 113 |
+
unknown_sources[source_stripped].append((fname, row_num, word))
|
| 114 |
+
files_with_unknown[fname].append((row_num, word, source_stripped))
|
| 115 |
+
|
| 116 |
+
file_stats[fname] = {
|
| 117 |
+
"total": f_total, "valid": f_valid, "empty": f_empty,
|
| 118 |
+
"dash": f_dash, "unknown": f_unknown, "whitespace": f_whitespace
|
| 119 |
+
}
|
| 120 |
+
|
| 121 |
+
# ============================================================
|
| 122 |
+
# REPORT
|
| 123 |
+
# ============================================================
|
| 124 |
+
print("=" * 80)
|
| 125 |
+
print("EXHAUSTIVE SOURCE AUDIT REPORT")
|
| 126 |
+
print("=" * 80)
|
| 127 |
+
|
| 128 |
+
print(f"\nFiles scanned: {len(tsv_files)}")
|
| 129 |
+
print(f"Total entries (rows): {total_entries:,}")
|
| 130 |
+
print(f" Valid source: {valid_source_count:,} ({100*valid_source_count/total_entries:.2f}%)")
|
| 131 |
+
print(f" Empty/missing source: {empty_source_count:,} ({100*empty_source_count/total_entries:.4f}%)")
|
| 132 |
+
print(f" Dash '-' as source: {dash_source_count:,} ({100*dash_source_count/total_entries:.4f}%)")
|
| 133 |
+
print(f" Unknown source value: {unknown_source_count:,} ({100*unknown_source_count/total_entries:.4f}%)")
|
| 134 |
+
print(f" (Whitespace-padded): {whitespace_only_count:,}")
|
| 135 |
+
|
| 136 |
+
# Missing Source column
|
| 137 |
+
if files_missing_source_col:
|
| 138 |
+
print(f"\n{'='*80}")
|
| 139 |
+
print(f"FILES MISSING 'Source' COLUMN ENTIRELY: {len(files_missing_source_col)}")
|
| 140 |
+
print("="*80)
|
| 141 |
+
for f in files_missing_source_col:
|
| 142 |
+
print(f" - {f}")
|
| 143 |
+
|
| 144 |
+
# All unique source values
|
| 145 |
+
print(f"\n{'='*80}")
|
| 146 |
+
print(f"ALL UNIQUE SOURCE VALUES (across entire database):")
|
| 147 |
+
print("="*80)
|
| 148 |
+
for src, count in sorted(all_sources.items(), key=lambda x: -x[1]):
|
| 149 |
+
marker = ""
|
| 150 |
+
s = src.strip()
|
| 151 |
+
if s == "":
|
| 152 |
+
marker = " <-- EMPTY"
|
| 153 |
+
elif s == "-":
|
| 154 |
+
marker = " <-- DASH"
|
| 155 |
+
elif s not in VALID_SOURCES:
|
| 156 |
+
marker = " <-- UNKNOWN"
|
| 157 |
+
print(f" '{src}' : {count:,} entries{marker}")
|
| 158 |
+
|
| 159 |
+
# Unknown source values detail
|
| 160 |
+
if unknown_sources:
|
| 161 |
+
print(f"\n{'='*80}")
|
| 162 |
+
print(f"UNKNOWN SOURCE VALUES (not in valid list):")
|
| 163 |
+
print("="*80)
|
| 164 |
+
for src_val, occurrences in sorted(unknown_sources.items(), key=lambda x: -len(x[1])):
|
| 165 |
+
print(f"\n Source value: '{src_val}' ({len(occurrences)} occurrences)")
|
| 166 |
+
for fname, row_num, word in occurrences[:10]:
|
| 167 |
+
print(f" - {fname} row {row_num}: word='{word}'")
|
| 168 |
+
if len(occurrences) > 10:
|
| 169 |
+
print(f" ... and {len(occurrences)-10} more")
|
| 170 |
+
|
| 171 |
+
# Files with empty sources
|
| 172 |
+
if files_with_empty:
|
| 173 |
+
print(f"\n{'='*80}")
|
| 174 |
+
print(f"FILES WITH EMPTY SOURCE ENTRIES:")
|
| 175 |
+
print("="*80)
|
| 176 |
+
for fname, entries in sorted(files_with_empty.items()):
|
| 177 |
+
stats = file_stats[fname]
|
| 178 |
+
pct = 100 * len(entries) / stats["total"] if stats["total"] > 0 else 0
|
| 179 |
+
print(f"\n {fname}: {len(entries)} empty / {stats['total']} total ({pct:.1f}%)")
|
| 180 |
+
if pct > 1.0:
|
| 181 |
+
print(f" ** >1% empty — listing all entries:")
|
| 182 |
+
for row_num, word, ipa in entries:
|
| 183 |
+
print(f" Row {row_num}: Word='{word}', IPA='{ipa}'")
|
| 184 |
+
else:
|
| 185 |
+
for row_num, word, ipa in entries[:5]:
|
| 186 |
+
print(f" Row {row_num}: Word='{word}', IPA='{ipa}'")
|
| 187 |
+
if len(entries) > 5:
|
| 188 |
+
print(f" ... and {len(entries)-5} more")
|
| 189 |
+
|
| 190 |
+
# Files with dash sources
|
| 191 |
+
if files_with_dash:
|
| 192 |
+
print(f"\n{'='*80}")
|
| 193 |
+
print(f"FILES WITH DASH '-' AS SOURCE ({len(files_with_dash)} files):")
|
| 194 |
+
print("="*80)
|
| 195 |
+
for fname, entries in sorted(files_with_dash.items()):
|
| 196 |
+
stats = file_stats[fname]
|
| 197 |
+
pct = 100 * len(entries) / stats["total"] if stats["total"] > 0 else 0
|
| 198 |
+
print(f" {fname}: {len(entries)} dash / {stats['total']} total ({pct:.1f}%)")
|
| 199 |
+
if len(entries) <= 5:
|
| 200 |
+
for row_num, word, ipa in entries:
|
| 201 |
+
print(f" Row {row_num}: Word='{word}', IPA='{ipa}'")
|
| 202 |
+
|
| 203 |
+
# Files with unknown sources
|
| 204 |
+
if files_with_unknown:
|
| 205 |
+
print(f"\n{'='*80}")
|
| 206 |
+
print(f"FILES WITH UNKNOWN SOURCE VALUES ({len(files_with_unknown)} files):")
|
| 207 |
+
print("="*80)
|
| 208 |
+
for fname, entries in sorted(files_with_unknown.items()):
|
| 209 |
+
stats = file_stats[fname]
|
| 210 |
+
print(f"\n {fname}: {len(entries)} unknown / {stats['total']} total")
|
| 211 |
+
for row_num, word, src_val in entries[:10]:
|
| 212 |
+
print(f" Row {row_num}: Word='{word}', Source='{src_val}'")
|
| 213 |
+
if len(entries) > 10:
|
| 214 |
+
print(f" ... and {len(entries)-10} more")
|
| 215 |
+
|
| 216 |
+
# Summary of problematic files
|
| 217 |
+
problem_files = set()
|
| 218 |
+
problem_files.update(files_with_empty.keys())
|
| 219 |
+
problem_files.update(files_with_dash.keys())
|
| 220 |
+
problem_files.update(files_with_unknown.keys())
|
| 221 |
+
problem_files.update(files_missing_source_col)
|
| 222 |
+
|
| 223 |
+
print(f"\n{'='*80}")
|
| 224 |
+
print(f"SUMMARY")
|
| 225 |
+
print("="*80)
|
| 226 |
+
print(f"Total files: {len(tsv_files)}")
|
| 227 |
+
print(f"Files with problems: {len(problem_files)}")
|
| 228 |
+
print(f" - Missing Source column: {len(files_missing_source_col)}")
|
| 229 |
+
print(f" - With empty sources: {len(files_with_empty)}")
|
| 230 |
+
print(f" - With dash sources: {len(files_with_dash)}")
|
| 231 |
+
print(f" - With unknown sources: {len(files_with_unknown)}")
|
| 232 |
+
print(f"Clean files: {len(tsv_files) - len(problem_files)}")
|
| 233 |
+
|
| 234 |
+
if not problem_files:
|
| 235 |
+
print("\n*** ALL FILES PASS: Every row in every file has a valid source. ***")
|
| 236 |
+
else:
|
| 237 |
+
print(f"\n*** {len(problem_files)} FILES HAVE ISSUES — see details above. ***")
|
char_analysis.txt
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Total words: 266
|
| 2 |
+
Characters covered by map keys: ['a', 'b', 'd', 'e', 'g', 'h', 'i', 'k', 'l', 'm', 'n', 'p', 'r', 's', 't', 'u', 'w', 'y', 'z']
|
| 3 |
+
|
| 4 |
+
All unique characters in Word column (37):
|
| 5 |
+
' ' U+0020 char_occurrences= 2 UNMAPPED
|
| 6 |
+
'.' U+002E char_occurrences= 2 UNMAPPED
|
| 7 |
+
'6' U+0036 char_occurrences= 1 UNMAPPED
|
| 8 |
+
'a' U+0061 char_occurrences= 343 MAPPED
|
| 9 |
+
'b' U+0062 char_occurrences= 3 MAPPED
|
| 10 |
+
'd' U+0064 char_occurrences= 22 MAPPED
|
| 11 |
+
'e' U+0065 char_occurrences= 67 MAPPED
|
| 12 |
+
'g' U+0067 char_occurrences= 17 MAPPED
|
| 13 |
+
'h' U+0068 char_occurrences= 22 MAPPED
|
| 14 |
+
'i' U+0069 char_occurrences= 167 MAPPED
|
| 15 |
+
'j' U+006A char_occurrences= 2 UNMAPPED
|
| 16 |
+
'k' U+006B char_occurrences= 71 MAPPED
|
| 17 |
+
'l' U+006C char_occurrences= 52 MAPPED
|
| 18 |
+
'm' U+006D char_occurrences= 37 MAPPED
|
| 19 |
+
'n' U+006E char_occurrences= 95 MAPPED
|
| 20 |
+
'p' U+0070 char_occurrences= 58 MAPPED
|
| 21 |
+
'r' U+0072 char_occurrences= 85 MAPPED
|
| 22 |
+
's' U+0073 char_occurrences= 82 MAPPED
|
| 23 |
+
't' U+0074 char_occurrences= 61 MAPPED
|
| 24 |
+
'u' U+0075 char_occurrences= 141 MAPPED
|
| 25 |
+
'w' U+0077 char_occurrences= 43 MAPPED
|
| 26 |
+
'x' U+0078 char_occurrences= 1 UNMAPPED
|
| 27 |
+
'y' U+0079 char_occurrences= 16 MAPPED
|
| 28 |
+
'z' U+007A char_occurrences= 89 MAPPED
|
| 29 |
+
'à' U+00E0 char_occurrences= 1 UNMAPPED
|
| 30 |
+
'í' U+00ED char_occurrences= 1 UNMAPPED
|
| 31 |
+
'ú' U+00FA char_occurrences= 1 UNMAPPED
|
| 32 |
+
'ā' U+0101 char_occurrences= 22 UNMAPPED
|
| 33 |
+
'ē' U+0113 char_occurrences= 8 UNMAPPED
|
| 34 |
+
'ī' U+012B char_occurrences= 4 UNMAPPED
|
| 35 |
+
'š' U+0161 char_occurrences= 97 UNMAPPED
|
| 36 |
+
'ū' U+016B char_occurrences= 3 UNMAPPED
|
| 37 |
+
'́' U+0301 char_occurrences= 2 UNMAPPED
|
| 38 |
+
'̩' U+0329 char_occurrences= 1 UNMAPPED
|
| 39 |
+
'͡' U+0361 char_occurrences= 1 UNMAPPED
|
| 40 |
+
'ḫ' U+1E2B char_occurrences= 46 UNMAPPED
|
| 41 |
+
'ṣ' U+1E63 char_occurrences= 1 UNMAPPED
|
| 42 |
+
|
| 43 |
+
=== UNMAPPED CHARACTERS (entries affected) ===
|
| 44 |
+
'š' U+0161 entries= 77 char_occurrences= 97
|
| 45 |
+
'ḫ' U+1E2B entries= 37 char_occurrences= 46
|
| 46 |
+
'ā' U+0101 entries= 22 char_occurrences= 22
|
| 47 |
+
'ē' U+0113 entries= 8 char_occurrences= 8
|
| 48 |
+
'ī' U+012B entries= 4 char_occurrences= 4
|
| 49 |
+
'ū' U+016B entries= 3 char_occurrences= 3
|
| 50 |
+
'j' U+006A entries= 2 char_occurrences= 2
|
| 51 |
+
'́' U+0301 entries= 2 char_occurrences= 2
|
| 52 |
+
'.' U+002E entries= 2 char_occurrences= 2
|
| 53 |
+
'à' U+00E0 entries= 1 char_occurrences= 1
|
| 54 |
+
'ú' U+00FA entries= 1 char_occurrences= 1
|
| 55 |
+
'6' U+0036 entries= 1 char_occurrences= 1
|
| 56 |
+
'͡' U+0361 entries= 1 char_occurrences= 1
|
| 57 |
+
' ' U+0020 entries= 1 char_occurrences= 2
|
| 58 |
+
'̩' U+0329 entries= 1 char_occurrences= 1
|
| 59 |
+
'í' U+00ED entries= 1 char_occurrences= 1
|
| 60 |
+
'x' U+0078 entries= 1 char_occurrences= 1
|
| 61 |
+
'ṣ' U+1E63 entries= 1 char_occurrences= 1
|
| 62 |
+
|
| 63 |
+
=== EXAMPLE WORDS FOR EACH UNMAPPED CHARACTER ===
|
| 64 |
+
'š' U+0161: ['addaaš', 'alpuš', 'ammiyaanteeššar', 'annaš', 'antuwaaḫḫaaš']
|
| 65 |
+
'ḫ' U+1E2B: ['antuwaaḫḫaaš', 'arḫaš', 'eešḫaaḫru', 'eešḫar', 'eešḫarnuuzzi']
|
| 66 |
+
'ā' U+0101: ['apiyā', 'arāwanzi', 'arāš', 'awārna', 'ašāwar']
|
| 67 |
+
'ē' U+0113: ['alēl', 'nēpis', 'nēwas', 'nēšaš', 'pēr']
|
| 68 |
+
'ī' U+012B: ['aurīš', 'lalatīna', 'tītan', 'īṣā́']
|
| 69 |
+
'ū' U+016B: ['zūwaš', 'ūḫḫi', 'ḫūmanz']
|
| 70 |
+
'j' U+006A: ['jā́nu', 'sjeus']
|
| 71 |
+
'́' U+0301: ['jā́nu', 'īṣā́']
|
| 72 |
+
'.' U+002E: ['munus.lugalaš', 'munus.lugalraaš']
|
| 73 |
+
'à' U+00E0: ['šàer']
|
| 74 |
+
'ú' U+00FA: ['úul']
|
| 75 |
+
'6' U+0036: ['ku6']
|
| 76 |
+
'͡' U+0361: ['marsant͡s']
|
| 77 |
+
' ' U+0020: ['ninnudiš maš galgal']
|
| 78 |
+
'̩' U+0329: ['iwr̩']
|
| 79 |
+
'í' U+00ED: ['sígaš']
|
| 80 |
+
'x' U+0078: ['xšap']
|
| 81 |
+
'ṣ' U+1E63: ['īṣā́']
|
char_analysis2.txt
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
=== 'š' (sh (s-caron)) U+0161 ===
|
| 3 |
+
Word=addaaš IPA=atːaːš
|
| 4 |
+
Word=alpuš IPA=alpuš
|
| 5 |
+
Word=ammiyaanteeššar IPA=ammijaːnteːššar
|
| 6 |
+
Word=annaš IPA=annaš
|
| 7 |
+
Word=antuwaaḫḫaaš IPA=antuwaːḫḫaːš
|
| 8 |
+
Word=araaaš IPA=araːaš
|
| 9 |
+
Word=arauwaaš IPA=arauwaːš
|
| 10 |
+
Word=ariyašeššar IPA=arijašeššar
|
| 11 |
+
|
| 12 |
+
=== 'ḫ' (h-under (h with breve below)) U+1E2B ===
|
| 13 |
+
Word=antuwaaḫḫaaš IPA=antuwaːḫḫaːš
|
| 14 |
+
Word=arḫaš IPA=arxaš
|
| 15 |
+
Word=eešḫaaḫru IPA=eːšḫaːḫru
|
| 16 |
+
Word=eešḫar IPA=eːšḫar
|
| 17 |
+
Word=eešḫarnuuzzi IPA=eːšḫarnuːtsːi
|
| 18 |
+
Word=išḫaaššaraaš IPA=išḫaːššaraːš
|
| 19 |
+
Word=išḫimaaaš IPA=išḫimaːaš
|
| 20 |
+
Word=išḫāš IPA=išxaːš
|
| 21 |
+
|
| 22 |
+
=== 'ā' (a-macron) U+0101 ===
|
| 23 |
+
Word=apiyā IPA=apijā
|
| 24 |
+
Word=arāwanzi IPA=arāwantsi
|
| 25 |
+
Word=arāš IPA=araːš
|
| 26 |
+
Word=awārna IPA=awaːrna
|
| 27 |
+
Word=ašāwar IPA=ašaːwar
|
| 28 |
+
Word=išḫāš IPA=išxaːš
|
| 29 |
+
Word=jā́nu IPA=jaː́nu
|
| 30 |
+
Word=paddāi IPA=patːāi
|
| 31 |
+
|
| 32 |
+
=== 'ē' (e-macron) U+0113 ===
|
| 33 |
+
Word=alēl IPA=aleːl
|
| 34 |
+
Word=nēpis IPA=neːpis
|
| 35 |
+
Word=nēwas IPA=neːwas
|
| 36 |
+
Word=nēšaš IPA=neːšaš
|
| 37 |
+
Word=pēr IPA=peːr
|
| 38 |
+
Word=tēnawaš IPA=teːnawaš
|
| 39 |
+
Word=ēzzzzi IPA=ētsːtsːi
|
| 40 |
+
Word=ēšzi IPA=ēštsi
|
| 41 |
+
|
| 42 |
+
=== 'ī' (i-macron) U+012B ===
|
| 43 |
+
Word=aurīš IPA=auriːš
|
| 44 |
+
Word=lalatīna IPA=lalatiːna
|
| 45 |
+
Word=tītan IPA=tiːtan
|
| 46 |
+
Word=īṣā́ IPA=iːṣaː́
|
| 47 |
+
|
| 48 |
+
=== 'ū' (u-macron) U+016B ===
|
| 49 |
+
Word=zūwaš IPA=tsūwaš
|
| 50 |
+
Word=ūḫḫi IPA=uːxːi
|
| 51 |
+
Word=ḫūmanz IPA=ḫūmants
|
| 52 |
+
|
| 53 |
+
=== 'j' (j) U+006A ===
|
| 54 |
+
Word=jā́nu IPA=jaː́nu
|
| 55 |
+
Word=sjeus IPA=sjeus
|
| 56 |
+
|
| 57 |
+
=== 'à' (a-grave) U+00E0 ===
|
| 58 |
+
Word=šàer IPA=šaer
|
| 59 |
+
|
| 60 |
+
=== 'í' (i-acute) U+00ED ===
|
| 61 |
+
Word=sígaš IPA=síkaš
|
| 62 |
+
|
| 63 |
+
=== 'ú' (u-acute) U+00FA ===
|
| 64 |
+
Word=úul IPA=úul
|
| 65 |
+
|
| 66 |
+
=== 'ṣ' (s-dot-below) U+1E63 ===
|
| 67 |
+
Word=īṣā́ IPA=iːṣaː́
|
| 68 |
+
|
| 69 |
+
=== 'x' (x) U+0078 ===
|
| 70 |
+
Word=xšap IPA=xšap
|
cognate_pipeline/README.md
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Cognate Pipeline
|
| 2 |
+
|
| 3 |
+
Cross-linguistic cognate detection pipeline with provenance tracking.
|
cognate_pipeline/alembic.ini
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[alembic]
|
| 2 |
+
script_location = src/cognate_pipeline/db/migrations
|
| 3 |
+
prepend_sys_path = .
|
| 4 |
+
sqlalchemy.url = postgresql+psycopg://postgres@localhost:5432/cognate_db
|
| 5 |
+
|
| 6 |
+
[loggers]
|
| 7 |
+
keys = root,sqlalchemy,alembic
|
| 8 |
+
|
| 9 |
+
[handlers]
|
| 10 |
+
keys = console
|
| 11 |
+
|
| 12 |
+
[formatters]
|
| 13 |
+
keys = generic
|
| 14 |
+
|
| 15 |
+
[logger_root]
|
| 16 |
+
level = WARN
|
| 17 |
+
handlers = console
|
| 18 |
+
|
| 19 |
+
[logger_sqlalchemy]
|
| 20 |
+
level = WARN
|
| 21 |
+
handlers =
|
| 22 |
+
qualname = sqlalchemy.engine
|
| 23 |
+
|
| 24 |
+
[logger_alembic]
|
| 25 |
+
level = INFO
|
| 26 |
+
handlers =
|
| 27 |
+
qualname = alembic
|
| 28 |
+
|
| 29 |
+
[handler_console]
|
| 30 |
+
class = StreamHandler
|
| 31 |
+
args = (sys.stderr,)
|
| 32 |
+
level = NOTSET
|
| 33 |
+
formatter = generic
|
| 34 |
+
|
| 35 |
+
[formatter_generic]
|
| 36 |
+
format = %(levelname)-5.5s [%(name)s] %(message)s
|
| 37 |
+
datefmt = %H:%M:%S
|
cognate_pipeline/config.example.yaml
ADDED
|
@@ -0,0 +1,445 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
staging_dir: staging
|
| 2 |
+
log_level: INFO
|
| 3 |
+
batch_size: 5000
|
| 4 |
+
|
| 5 |
+
# Glottolog languoid CSV (optional — needed for family-aware cognate detection)
|
| 6 |
+
# glottolog_data_dir: /path/to/glottolog-data
|
| 7 |
+
|
| 8 |
+
sources:
|
| 9 |
+
- name: ugaritic_hebrew
|
| 10 |
+
path: ../ancient-scripts-datasets/data/ugaritic/uga-heb.small.no_spe.cog
|
| 11 |
+
format: cog
|
| 12 |
+
license: "Research / Fair Use"
|
| 13 |
+
license_url: ""
|
| 14 |
+
citation_bibtex: |
|
| 15 |
+
@article{luo2021deciphering,
|
| 16 |
+
title={Deciphering Undersegmented Ancient Scripts Using Phonetic Prior},
|
| 17 |
+
author={Luo, Jiaming and Hartmann, Frederik and Santus, Enrico and Barzilay, Regina and Cao, Yuan},
|
| 18 |
+
journal={Transactions of the Association for Computational Linguistics},
|
| 19 |
+
year={2021}
|
| 20 |
+
}
|
| 21 |
+
column_mapping:
|
| 22 |
+
language: null
|
| 23 |
+
form: null
|
| 24 |
+
concept: null
|
| 25 |
+
delimiter: "\t"
|
| 26 |
+
extra:
|
| 27 |
+
lang_a: uga
|
| 28 |
+
lang_b: heb
|
| 29 |
+
header_a: "uga-no_spe"
|
| 30 |
+
header_b: "heb-no_spe"
|
| 31 |
+
|
| 32 |
+
- name: religious_terms
|
| 33 |
+
path: ../ancient-scripts-datasets/data/religious_terms/ugaritic_hebrew_religious.tsv
|
| 34 |
+
format: tsv
|
| 35 |
+
license: "Research / Fair Use"
|
| 36 |
+
column_mapping:
|
| 37 |
+
language: null
|
| 38 |
+
form: ugaritic_form
|
| 39 |
+
concept: english_meaning
|
| 40 |
+
ipa: null
|
| 41 |
+
delimiter: "\t"
|
| 42 |
+
extra:
|
| 43 |
+
lang_a_col: ugaritic_form
|
| 44 |
+
lang_b_col: hebrew_cognate
|
| 45 |
+
|
| 46 |
+
# --- Validation branches (9 phylogenetic families) ---
|
| 47 |
+
|
| 48 |
+
- name: germanic
|
| 49 |
+
path: ../ancient-scripts-datasets/data/validation/germanic.tsv
|
| 50 |
+
format: tsv
|
| 51 |
+
license: "Research / Fair Use"
|
| 52 |
+
column_mapping:
|
| 53 |
+
language: Language_ID
|
| 54 |
+
form: Form
|
| 55 |
+
concept: Parameter_ID
|
| 56 |
+
ipa: IPA
|
| 57 |
+
glottocode: Glottocode
|
| 58 |
+
|
| 59 |
+
- name: celtic
|
| 60 |
+
path: ../ancient-scripts-datasets/data/validation/celtic.tsv
|
| 61 |
+
format: tsv
|
| 62 |
+
license: "Research / Fair Use"
|
| 63 |
+
column_mapping:
|
| 64 |
+
language: Language_ID
|
| 65 |
+
form: Form
|
| 66 |
+
concept: Parameter_ID
|
| 67 |
+
ipa: IPA
|
| 68 |
+
glottocode: Glottocode
|
| 69 |
+
|
| 70 |
+
- name: balto_slavic
|
| 71 |
+
path: ../ancient-scripts-datasets/data/validation/balto_slavic.tsv
|
| 72 |
+
format: tsv
|
| 73 |
+
license: "Research / Fair Use"
|
| 74 |
+
column_mapping:
|
| 75 |
+
language: Language_ID
|
| 76 |
+
form: Form
|
| 77 |
+
concept: Parameter_ID
|
| 78 |
+
ipa: IPA
|
| 79 |
+
glottocode: Glottocode
|
| 80 |
+
|
| 81 |
+
- name: indo_iranian
|
| 82 |
+
path: ../ancient-scripts-datasets/data/validation/indo_iranian.tsv
|
| 83 |
+
format: tsv
|
| 84 |
+
license: "Research / Fair Use"
|
| 85 |
+
column_mapping:
|
| 86 |
+
language: Language_ID
|
| 87 |
+
form: Form
|
| 88 |
+
concept: Parameter_ID
|
| 89 |
+
ipa: IPA
|
| 90 |
+
glottocode: Glottocode
|
| 91 |
+
|
| 92 |
+
- name: italic
|
| 93 |
+
path: ../ancient-scripts-datasets/data/validation/italic.tsv
|
| 94 |
+
format: tsv
|
| 95 |
+
license: "Research / Fair Use"
|
| 96 |
+
column_mapping:
|
| 97 |
+
language: Language_ID
|
| 98 |
+
form: Form
|
| 99 |
+
concept: Parameter_ID
|
| 100 |
+
ipa: IPA
|
| 101 |
+
glottocode: Glottocode
|
| 102 |
+
|
| 103 |
+
- name: hellenic
|
| 104 |
+
path: ../ancient-scripts-datasets/data/validation/hellenic.tsv
|
| 105 |
+
format: tsv
|
| 106 |
+
license: "Research / Fair Use"
|
| 107 |
+
column_mapping:
|
| 108 |
+
language: Language_ID
|
| 109 |
+
form: Form
|
| 110 |
+
concept: Parameter_ID
|
| 111 |
+
ipa: IPA
|
| 112 |
+
glottocode: Glottocode
|
| 113 |
+
|
| 114 |
+
- name: semitic
|
| 115 |
+
path: ../ancient-scripts-datasets/data/validation/semitic.tsv
|
| 116 |
+
format: tsv
|
| 117 |
+
license: "Research / Fair Use"
|
| 118 |
+
column_mapping:
|
| 119 |
+
language: Language_ID
|
| 120 |
+
form: Form
|
| 121 |
+
concept: Parameter_ID
|
| 122 |
+
ipa: IPA
|
| 123 |
+
glottocode: Glottocode
|
| 124 |
+
|
| 125 |
+
- name: turkic
|
| 126 |
+
path: ../ancient-scripts-datasets/data/validation/turkic.tsv
|
| 127 |
+
format: tsv
|
| 128 |
+
license: "Research / Fair Use"
|
| 129 |
+
column_mapping:
|
| 130 |
+
language: Language_ID
|
| 131 |
+
form: Form
|
| 132 |
+
concept: Parameter_ID
|
| 133 |
+
ipa: IPA
|
| 134 |
+
glottocode: Glottocode
|
| 135 |
+
|
| 136 |
+
- name: uralic
|
| 137 |
+
path: ../ancient-scripts-datasets/data/validation/uralic.tsv
|
| 138 |
+
format: tsv
|
| 139 |
+
license: "Research / Fair Use"
|
| 140 |
+
column_mapping:
|
| 141 |
+
language: Language_ID
|
| 142 |
+
form: Form
|
| 143 |
+
concept: Parameter_ID
|
| 144 |
+
ipa: IPA
|
| 145 |
+
glottocode: Glottocode
|
| 146 |
+
|
| 147 |
+
# --- Cross-family names subset (deity, proper, place names) ---
|
| 148 |
+
|
| 149 |
+
- name: names
|
| 150 |
+
path: ../ancient-scripts-datasets/data/validation/names.tsv
|
| 151 |
+
format: tsv
|
| 152 |
+
license: "Research / Fair Use"
|
| 153 |
+
column_mapping:
|
| 154 |
+
language: Language_ID
|
| 155 |
+
form: Form
|
| 156 |
+
concept: Parameter_ID
|
| 157 |
+
ipa: IPA
|
| 158 |
+
glottocode: Glottocode
|
| 159 |
+
|
| 160 |
+
# --- Expanded validation branches (original langs + CLDF additions) ---
|
| 161 |
+
|
| 162 |
+
- name: germanic_expanded
|
| 163 |
+
path: ../ancient-scripts-datasets/data/validation/germanic_expanded.tsv
|
| 164 |
+
format: tsv
|
| 165 |
+
license: "CC-BY / Research"
|
| 166 |
+
column_mapping: &expanded_cols
|
| 167 |
+
language: Language_ID
|
| 168 |
+
form: Form
|
| 169 |
+
concept: Parameter_ID
|
| 170 |
+
ipa: IPA
|
| 171 |
+
glottocode: Glottocode
|
| 172 |
+
|
| 173 |
+
- name: celtic_expanded
|
| 174 |
+
path: ../ancient-scripts-datasets/data/validation/celtic_expanded.tsv
|
| 175 |
+
format: tsv
|
| 176 |
+
license: "CC-BY / Research"
|
| 177 |
+
column_mapping: *expanded_cols
|
| 178 |
+
|
| 179 |
+
- name: balto_slavic_expanded
|
| 180 |
+
path: ../ancient-scripts-datasets/data/validation/balto_slavic_expanded.tsv
|
| 181 |
+
format: tsv
|
| 182 |
+
license: "CC-BY / Research"
|
| 183 |
+
column_mapping: *expanded_cols
|
| 184 |
+
|
| 185 |
+
- name: indo_iranian_expanded
|
| 186 |
+
path: ../ancient-scripts-datasets/data/validation/indo_iranian_expanded.tsv
|
| 187 |
+
format: tsv
|
| 188 |
+
license: "CC-BY / Research"
|
| 189 |
+
column_mapping: *expanded_cols
|
| 190 |
+
|
| 191 |
+
- name: italic_expanded
|
| 192 |
+
path: ../ancient-scripts-datasets/data/validation/italic_expanded.tsv
|
| 193 |
+
format: tsv
|
| 194 |
+
license: "CC-BY / Research"
|
| 195 |
+
column_mapping: *expanded_cols
|
| 196 |
+
|
| 197 |
+
- name: hellenic_expanded
|
| 198 |
+
path: ../ancient-scripts-datasets/data/validation/hellenic_expanded.tsv
|
| 199 |
+
format: tsv
|
| 200 |
+
license: "CC-BY / Research"
|
| 201 |
+
column_mapping: *expanded_cols
|
| 202 |
+
|
| 203 |
+
- name: semitic_expanded
|
| 204 |
+
path: ../ancient-scripts-datasets/data/validation/semitic_expanded.tsv
|
| 205 |
+
format: tsv
|
| 206 |
+
license: "CC-BY / Research"
|
| 207 |
+
column_mapping: *expanded_cols
|
| 208 |
+
|
| 209 |
+
- name: turkic_expanded
|
| 210 |
+
path: ../ancient-scripts-datasets/data/validation/turkic_expanded.tsv
|
| 211 |
+
format: tsv
|
| 212 |
+
license: "CC-BY / Research"
|
| 213 |
+
column_mapping: *expanded_cols
|
| 214 |
+
|
| 215 |
+
- name: uralic_expanded
|
| 216 |
+
path: ../ancient-scripts-datasets/data/validation/uralic_expanded.tsv
|
| 217 |
+
format: tsv
|
| 218 |
+
license: "CC-BY / Research"
|
| 219 |
+
column_mapping: *expanded_cols
|
| 220 |
+
|
| 221 |
+
# --- New language families ---
|
| 222 |
+
|
| 223 |
+
- name: albanian
|
| 224 |
+
path: ../ancient-scripts-datasets/data/validation/albanian.tsv
|
| 225 |
+
format: tsv
|
| 226 |
+
license: "CC-BY / Research"
|
| 227 |
+
column_mapping: *expanded_cols
|
| 228 |
+
|
| 229 |
+
- name: armenian
|
| 230 |
+
path: ../ancient-scripts-datasets/data/validation/armenian.tsv
|
| 231 |
+
format: tsv
|
| 232 |
+
license: "CC-BY / Research"
|
| 233 |
+
column_mapping: *expanded_cols
|
| 234 |
+
|
| 235 |
+
- name: dravidian
|
| 236 |
+
path: ../ancient-scripts-datasets/data/validation/dravidian.tsv
|
| 237 |
+
format: tsv
|
| 238 |
+
license: "CC-BY / Research"
|
| 239 |
+
column_mapping: *expanded_cols
|
| 240 |
+
|
| 241 |
+
- name: kartvelian
|
| 242 |
+
path: ../ancient-scripts-datasets/data/validation/kartvelian.tsv
|
| 243 |
+
format: tsv
|
| 244 |
+
license: "CC-BY / Research"
|
| 245 |
+
column_mapping: *expanded_cols
|
| 246 |
+
|
| 247 |
+
- name: austronesian
|
| 248 |
+
path: ../ancient-scripts-datasets/data/validation/austronesian.tsv
|
| 249 |
+
format: tsv
|
| 250 |
+
license: "CC-BY / Research"
|
| 251 |
+
column_mapping: *expanded_cols
|
| 252 |
+
|
| 253 |
+
- name: sino_tibetan
|
| 254 |
+
path: ../ancient-scripts-datasets/data/validation/sino_tibetan.tsv
|
| 255 |
+
format: tsv
|
| 256 |
+
license: "CC-BY / Research"
|
| 257 |
+
column_mapping: *expanded_cols
|
| 258 |
+
|
| 259 |
+
- name: mongolic
|
| 260 |
+
path: ../ancient-scripts-datasets/data/validation/mongolic.tsv
|
| 261 |
+
format: tsv
|
| 262 |
+
license: "CC-BY / Research"
|
| 263 |
+
column_mapping: *expanded_cols
|
| 264 |
+
|
| 265 |
+
- name: tungusic
|
| 266 |
+
path: ../ancient-scripts-datasets/data/validation/tungusic.tsv
|
| 267 |
+
format: tsv
|
| 268 |
+
license: "CC-BY / Research"
|
| 269 |
+
column_mapping: *expanded_cols
|
| 270 |
+
|
| 271 |
+
- name: japonic
|
| 272 |
+
path: ../ancient-scripts-datasets/data/validation/japonic.tsv
|
| 273 |
+
format: tsv
|
| 274 |
+
license: "CC-BY / Research"
|
| 275 |
+
column_mapping: *expanded_cols
|
| 276 |
+
|
| 277 |
+
- name: koreanic
|
| 278 |
+
path: ../ancient-scripts-datasets/data/validation/koreanic.tsv
|
| 279 |
+
format: tsv
|
| 280 |
+
license: "CC-BY / Research"
|
| 281 |
+
column_mapping: *expanded_cols
|
| 282 |
+
|
| 283 |
+
- name: northeast_caucasian
|
| 284 |
+
path: ../ancient-scripts-datasets/data/validation/northeast_caucasian.tsv
|
| 285 |
+
format: tsv
|
| 286 |
+
license: "CC-BY / Research"
|
| 287 |
+
column_mapping: *expanded_cols
|
| 288 |
+
|
| 289 |
+
- name: northwest_caucasian
|
| 290 |
+
path: ../ancient-scripts-datasets/data/validation/northwest_caucasian.tsv
|
| 291 |
+
format: tsv
|
| 292 |
+
license: "CC-BY / Research"
|
| 293 |
+
column_mapping: *expanded_cols
|
| 294 |
+
|
| 295 |
+
- name: eskimo_aleut
|
| 296 |
+
path: ../ancient-scripts-datasets/data/validation/eskimo_aleut.tsv
|
| 297 |
+
format: tsv
|
| 298 |
+
license: "CC-BY / Research"
|
| 299 |
+
column_mapping: *expanded_cols
|
| 300 |
+
|
| 301 |
+
- name: isolates
|
| 302 |
+
path: ../ancient-scripts-datasets/data/validation/isolates.tsv
|
| 303 |
+
format: tsv
|
| 304 |
+
license: "CC-BY / Research"
|
| 305 |
+
column_mapping: *expanded_cols
|
| 306 |
+
|
| 307 |
+
- name: afroasiatic_berber
|
| 308 |
+
path: ../ancient-scripts-datasets/data/validation/afroasiatic_berber.tsv
|
| 309 |
+
format: tsv
|
| 310 |
+
license: "CC-BY / Research"
|
| 311 |
+
column_mapping: *expanded_cols
|
| 312 |
+
|
| 313 |
+
- name: afroasiatic_chadic
|
| 314 |
+
path: ../ancient-scripts-datasets/data/validation/afroasiatic_chadic.tsv
|
| 315 |
+
format: tsv
|
| 316 |
+
license: "CC-BY / Research"
|
| 317 |
+
column_mapping: *expanded_cols
|
| 318 |
+
|
| 319 |
+
- name: afroasiatic_cushitic
|
| 320 |
+
path: ../ancient-scripts-datasets/data/validation/afroasiatic_cushitic.tsv
|
| 321 |
+
format: tsv
|
| 322 |
+
license: "CC-BY / Research"
|
| 323 |
+
column_mapping: *expanded_cols
|
| 324 |
+
|
| 325 |
+
- name: niger_congo_bantu
|
| 326 |
+
path: ../ancient-scripts-datasets/data/validation/niger_congo_bantu.tsv
|
| 327 |
+
format: tsv
|
| 328 |
+
license: "CC-BY / Research"
|
| 329 |
+
column_mapping: *expanded_cols
|
| 330 |
+
|
| 331 |
+
- name: tai_kadai
|
| 332 |
+
path: ../ancient-scripts-datasets/data/validation/tai_kadai.tsv
|
| 333 |
+
format: tsv
|
| 334 |
+
license: "CC-BY / Research"
|
| 335 |
+
column_mapping: *expanded_cols
|
| 336 |
+
|
| 337 |
+
- name: austroasiatic
|
| 338 |
+
path: ../ancient-scripts-datasets/data/validation/austroasiatic.tsv
|
| 339 |
+
format: tsv
|
| 340 |
+
license: "CC-BY / Research"
|
| 341 |
+
column_mapping: *expanded_cols
|
| 342 |
+
|
| 343 |
+
- name: mayan
|
| 344 |
+
path: ../ancient-scripts-datasets/data/validation/mayan.tsv
|
| 345 |
+
format: tsv
|
| 346 |
+
license: "CC-BY / Research"
|
| 347 |
+
column_mapping: *expanded_cols
|
| 348 |
+
|
| 349 |
+
- name: uto_aztecan
|
| 350 |
+
path: ../ancient-scripts-datasets/data/validation/uto_aztecan.tsv
|
| 351 |
+
format: tsv
|
| 352 |
+
license: "CC-BY / Research"
|
| 353 |
+
column_mapping: *expanded_cols
|
| 354 |
+
|
| 355 |
+
- name: quechuan
|
| 356 |
+
path: ../ancient-scripts-datasets/data/validation/quechuan.tsv
|
| 357 |
+
format: tsv
|
| 358 |
+
license: "CC-BY / Research"
|
| 359 |
+
column_mapping: *expanded_cols
|
| 360 |
+
|
| 361 |
+
- name: hmong_mien
|
| 362 |
+
path: ../ancient-scripts-datasets/data/validation/hmong_mien.tsv
|
| 363 |
+
format: tsv
|
| 364 |
+
license: "CC-BY / Research"
|
| 365 |
+
column_mapping: *expanded_cols
|
| 366 |
+
|
| 367 |
+
- name: chukotko_kamchatkan
|
| 368 |
+
path: ../ancient-scripts-datasets/data/validation/chukotko_kamchatkan.tsv
|
| 369 |
+
format: tsv
|
| 370 |
+
license: "CC-BY / Research"
|
| 371 |
+
column_mapping: *expanded_cols
|
| 372 |
+
|
| 373 |
+
- name: yukaghir
|
| 374 |
+
path: ../ancient-scripts-datasets/data/validation/yukaghir.tsv
|
| 375 |
+
format: tsv
|
| 376 |
+
license: "CC-BY / Research"
|
| 377 |
+
column_mapping: *expanded_cols
|
| 378 |
+
|
| 379 |
+
- name: saharan
|
| 380 |
+
path: ../ancient-scripts-datasets/data/validation/saharan.tsv
|
| 381 |
+
format: tsv
|
| 382 |
+
license: "CC-BY / Research"
|
| 383 |
+
column_mapping: *expanded_cols
|
| 384 |
+
|
| 385 |
+
- name: pama_nyungan
|
| 386 |
+
path: ../ancient-scripts-datasets/data/validation/pama_nyungan.tsv
|
| 387 |
+
format: tsv
|
| 388 |
+
license: "CC-BY / Research"
|
| 389 |
+
column_mapping: *expanded_cols
|
| 390 |
+
|
| 391 |
+
# --- Training data (full-dictionary scale) ---
|
| 392 |
+
|
| 393 |
+
- name: training_lexicons
|
| 394 |
+
path: ../ancient-scripts-datasets/data/training/lexicons/
|
| 395 |
+
format: tsv
|
| 396 |
+
license: "CC-BY / Research / WikiPron"
|
| 397 |
+
column_mapping:
|
| 398 |
+
language: null
|
| 399 |
+
form: Word
|
| 400 |
+
concept: Concept_ID
|
| 401 |
+
ipa: IPA
|
| 402 |
+
extra:
|
| 403 |
+
description: "Per-language lexicon TSVs from WikiPron + CLDF sources"
|
| 404 |
+
entries_target: 4000000
|
| 405 |
+
|
| 406 |
+
- name: training_cognate_pairs
|
| 407 |
+
path: ../ancient-scripts-datasets/data/training/cognate_pairs/
|
| 408 |
+
format: tsv
|
| 409 |
+
license: "CC-BY / Research"
|
| 410 |
+
column_mapping:
|
| 411 |
+
language: null
|
| 412 |
+
form: null
|
| 413 |
+
concept: Concept_ID
|
| 414 |
+
extra:
|
| 415 |
+
description: "Cognate pair files (inherited, similarity, borrowing)"
|
| 416 |
+
|
| 417 |
+
database:
|
| 418 |
+
host: localhost
|
| 419 |
+
port: 5432
|
| 420 |
+
name: cognate_db
|
| 421 |
+
user: postgres
|
| 422 |
+
password: ""
|
| 423 |
+
schema_name: public
|
| 424 |
+
|
| 425 |
+
normalisation:
|
| 426 |
+
unicode_form: NFC
|
| 427 |
+
strip_suprasegmentals: false
|
| 428 |
+
strip_whitespace: true
|
| 429 |
+
ipa_backend_priority:
|
| 430 |
+
- attested
|
| 431 |
+
- epitran
|
| 432 |
+
- phonemizer
|
| 433 |
+
transliteration_passthrough: true
|
| 434 |
+
|
| 435 |
+
cognate:
|
| 436 |
+
method: baseline_lev
|
| 437 |
+
clustering: connected_components
|
| 438 |
+
threshold: 0.5
|
| 439 |
+
family_aware: true
|
| 440 |
+
batch_size: 10000
|
| 441 |
+
|
| 442 |
+
export:
|
| 443 |
+
cldf_output_dir: export/cldf
|
| 444 |
+
jsonld_output_dir: export/jsonld
|
| 445 |
+
include_provenance: true
|
cognate_pipeline/pyproject.toml
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[build-system]
|
| 2 |
+
requires = ["hatchling"]
|
| 3 |
+
build-backend = "hatchling.build"
|
| 4 |
+
|
| 5 |
+
[project]
|
| 6 |
+
name = "cognate-pipeline"
|
| 7 |
+
version = "0.1.0"
|
| 8 |
+
description = "Cross-linguistic cognate detection pipeline with provenance tracking"
|
| 9 |
+
readme = "README.md"
|
| 10 |
+
requires-python = ">=3.11"
|
| 11 |
+
license = "MIT"
|
| 12 |
+
dependencies = [
|
| 13 |
+
"typer>=0.12,<1",
|
| 14 |
+
"pydantic>=2.5,<3",
|
| 15 |
+
"pyyaml>=6,<7",
|
| 16 |
+
"orjson>=3.9,<4",
|
| 17 |
+
"sqlalchemy>=2.0,<3",
|
| 18 |
+
"geoalchemy2>=0.14,<1",
|
| 19 |
+
"psycopg[binary]>=3.1,<4",
|
| 20 |
+
"alembic>=1.13,<2",
|
| 21 |
+
]
|
| 22 |
+
|
| 23 |
+
[project.optional-dependencies]
|
| 24 |
+
lingpy = ["lingpy>=2.6.12,<3"]
|
| 25 |
+
epitran = ["epitran>=1.24,<2"]
|
| 26 |
+
cldf = ["pycldf>=1.35,<2", "clldutils>=3.5,<4"]
|
| 27 |
+
phonemizer = ["phonemizer>=3.2,<4"]
|
| 28 |
+
all = [
|
| 29 |
+
"cognate-pipeline[lingpy,epitran,cldf,phonemizer]",
|
| 30 |
+
]
|
| 31 |
+
dev = [
|
| 32 |
+
"pytest>=8,<9",
|
| 33 |
+
"pytest-cov>=5,<6",
|
| 34 |
+
]
|
| 35 |
+
|
| 36 |
+
[project.scripts]
|
| 37 |
+
cognate-pipeline = "cognate_pipeline.cli.main:app"
|
| 38 |
+
|
| 39 |
+
[tool.hatch.build.targets.wheel]
|
| 40 |
+
packages = ["src/cognate_pipeline"]
|
| 41 |
+
|
| 42 |
+
[tool.pytest.ini_options]
|
| 43 |
+
testpaths = ["tests"]
|
| 44 |
+
markers = [
|
| 45 |
+
"e2e: end-to-end tests requiring external services",
|
| 46 |
+
]
|
cognate_pipeline/src/cognate_pipeline/__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Cognate Pipeline — cross-linguistic cognate detection with provenance."""
|
| 2 |
+
|
| 3 |
+
__version__ = "0.1.0"
|
cognate_pipeline/src/cognate_pipeline/cli/__init__.py
ADDED
|
File without changes
|
cognate_pipeline/src/cognate_pipeline/cli/detect_cmd.py
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""CLI handler for the detect-cognates subcommand."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import logging
|
| 6 |
+
from pathlib import Path
|
| 7 |
+
|
| 8 |
+
import orjson
|
| 9 |
+
|
| 10 |
+
from cognate_pipeline.config.loader import load_config
|
| 11 |
+
from cognate_pipeline.config.schema import CognateMethod
|
| 12 |
+
from cognate_pipeline.cognate.candidate_gen import generate_candidates
|
| 13 |
+
from cognate_pipeline.cognate.baseline_levenshtein import BaselineLevenshtein
|
| 14 |
+
from cognate_pipeline.cognate.lexstat_detector import LexStatDetector
|
| 15 |
+
from cognate_pipeline.cognate.clustering import cluster_links
|
| 16 |
+
from cognate_pipeline.normalise.models import NormalisedLexeme
|
| 17 |
+
from cognate_pipeline.utils.logging_setup import setup_logging
|
| 18 |
+
|
| 19 |
+
logger = logging.getLogger(__name__)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def run_detect(config_path: str, method_override: str | None) -> None:
|
| 23 |
+
cfg = load_config(config_path)
|
| 24 |
+
setup_logging(cfg.log_level)
|
| 25 |
+
|
| 26 |
+
method = CognateMethod(method_override) if method_override else cfg.cognate.method
|
| 27 |
+
|
| 28 |
+
norm_dir = cfg.staging_dir / "normalised"
|
| 29 |
+
cog_dir = cfg.staging_dir / "cognate"
|
| 30 |
+
cog_dir.mkdir(parents=True, exist_ok=True)
|
| 31 |
+
|
| 32 |
+
# Load all normalised lexemes
|
| 33 |
+
lexemes: list[NormalisedLexeme] = []
|
| 34 |
+
for jsonl_path in sorted(norm_dir.glob("*.jsonl")):
|
| 35 |
+
if jsonl_path.name.startswith("_"):
|
| 36 |
+
continue
|
| 37 |
+
with jsonl_path.open("rb") as fh:
|
| 38 |
+
for line in fh:
|
| 39 |
+
lexemes.append(NormalisedLexeme.from_dict(orjson.loads(line)))
|
| 40 |
+
logger.info("Loaded %d normalised lexemes", len(lexemes))
|
| 41 |
+
|
| 42 |
+
# Generate candidate pairs
|
| 43 |
+
pairs = generate_candidates(lexemes, family_aware=cfg.cognate.family_aware)
|
| 44 |
+
logger.info("Generated %d candidate pairs", len(pairs))
|
| 45 |
+
|
| 46 |
+
# Score pairs
|
| 47 |
+
if method == CognateMethod.BASELINE_LEV:
|
| 48 |
+
scorer = BaselineLevenshtein()
|
| 49 |
+
links = scorer.score_pairs(pairs, threshold=cfg.cognate.threshold)
|
| 50 |
+
elif method == CognateMethod.LEXSTAT:
|
| 51 |
+
detector = LexStatDetector()
|
| 52 |
+
links = detector.detect(lexemes, threshold=cfg.cognate.threshold)
|
| 53 |
+
else:
|
| 54 |
+
raise ValueError(f"Unknown method: {method}")
|
| 55 |
+
logger.info("Scored %d cognate links above threshold", len(links))
|
| 56 |
+
|
| 57 |
+
# Cluster
|
| 58 |
+
sets = cluster_links(links, algorithm=cfg.cognate.clustering)
|
| 59 |
+
logger.info("Formed %d cognate sets", len(sets))
|
| 60 |
+
|
| 61 |
+
# Write links
|
| 62 |
+
links_path = cog_dir / "cognate_links.jsonl"
|
| 63 |
+
with links_path.open("wb") as fh:
|
| 64 |
+
for link in links:
|
| 65 |
+
fh.write(orjson.dumps(link.to_dict()) + b"\n")
|
| 66 |
+
|
| 67 |
+
# Write sets
|
| 68 |
+
sets_path = cog_dir / "cognate_sets.jsonl"
|
| 69 |
+
with sets_path.open("wb") as fh:
|
| 70 |
+
for cset in sets:
|
| 71 |
+
fh.write(orjson.dumps(cset.to_dict()) + b"\n")
|
| 72 |
+
|
| 73 |
+
logger.info("Cognate results written to %s", cog_dir)
|
cognate_pipeline/src/cognate_pipeline/cli/export_cldf_cmd.py
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""CLI handler for the export-cldf subcommand."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import logging
|
| 6 |
+
|
| 7 |
+
from cognate_pipeline.config.loader import load_config
|
| 8 |
+
from cognate_pipeline.db.connection import get_engine
|
| 9 |
+
from cognate_pipeline.export.cldf_exporter import CldfExporter
|
| 10 |
+
from cognate_pipeline.utils.logging_setup import setup_logging
|
| 11 |
+
|
| 12 |
+
logger = logging.getLogger(__name__)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def run_export_cldf(config_path: str) -> None:
|
| 16 |
+
cfg = load_config(config_path)
|
| 17 |
+
setup_logging(cfg.log_level)
|
| 18 |
+
|
| 19 |
+
engine = get_engine(cfg.database)
|
| 20 |
+
exporter = CldfExporter(engine, cfg.export)
|
| 21 |
+
out_dir = cfg.export.cldf_output_dir
|
| 22 |
+
out_dir.mkdir(parents=True, exist_ok=True)
|
| 23 |
+
exporter.export(out_dir)
|
| 24 |
+
logger.info("CLDF export written to %s", out_dir)
|
cognate_pipeline/src/cognate_pipeline/cli/export_jsonld_cmd.py
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""CLI handler for the export-jsonld subcommand."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import logging
|
| 6 |
+
|
| 7 |
+
from cognate_pipeline.config.loader import load_config
|
| 8 |
+
from cognate_pipeline.db.connection import get_engine
|
| 9 |
+
from cognate_pipeline.export.jsonld_exporter import JsonLdExporter
|
| 10 |
+
from cognate_pipeline.utils.logging_setup import setup_logging
|
| 11 |
+
|
| 12 |
+
logger = logging.getLogger(__name__)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def run_export_jsonld(config_path: str) -> None:
|
| 16 |
+
cfg = load_config(config_path)
|
| 17 |
+
setup_logging(cfg.log_level)
|
| 18 |
+
|
| 19 |
+
engine = get_engine(cfg.database)
|
| 20 |
+
exporter = JsonLdExporter(engine, cfg.export)
|
| 21 |
+
out_dir = cfg.export.jsonld_output_dir
|
| 22 |
+
out_dir.mkdir(parents=True, exist_ok=True)
|
| 23 |
+
exporter.export(out_dir)
|
| 24 |
+
logger.info("JSON-LD export written to %s", out_dir)
|
cognate_pipeline/src/cognate_pipeline/cli/ingest_cmd.py
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""CLI handler for the ingest-sources subcommand."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import logging
|
| 6 |
+
from pathlib import Path
|
| 7 |
+
|
| 8 |
+
import orjson
|
| 9 |
+
|
| 10 |
+
from cognate_pipeline.config.loader import load_config
|
| 11 |
+
from cognate_pipeline.config.schema import SourceFormat
|
| 12 |
+
from cognate_pipeline.ingest.base import SourceIngester
|
| 13 |
+
from cognate_pipeline.ingest.csv_ingester import CsvIngester
|
| 14 |
+
from cognate_pipeline.ingest.cldf_ingester import CldfIngester
|
| 15 |
+
from cognate_pipeline.ingest.json_ingester import JsonIngester
|
| 16 |
+
from cognate_pipeline.ingest.wiktionary_ingester import WiktionaryIngester
|
| 17 |
+
from cognate_pipeline.provenance.license_registry import LicenseRegistry
|
| 18 |
+
from cognate_pipeline.utils.logging_setup import setup_logging
|
| 19 |
+
|
| 20 |
+
logger = logging.getLogger(__name__)
|
| 21 |
+
|
| 22 |
+
_FORMAT_TO_INGESTER: dict[SourceFormat, type[SourceIngester]] = {
|
| 23 |
+
SourceFormat.CLDF: CldfIngester,
|
| 24 |
+
SourceFormat.CSV: CsvIngester,
|
| 25 |
+
SourceFormat.TSV: CsvIngester,
|
| 26 |
+
SourceFormat.COG: CsvIngester,
|
| 27 |
+
SourceFormat.JSON: JsonIngester,
|
| 28 |
+
SourceFormat.NDJSON: JsonIngester,
|
| 29 |
+
SourceFormat.WIKTIONARY: WiktionaryIngester,
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def run_ingest(config_path: str, output_dir: str | None) -> None:
|
| 34 |
+
cfg = load_config(config_path)
|
| 35 |
+
setup_logging(cfg.log_level)
|
| 36 |
+
registry = LicenseRegistry()
|
| 37 |
+
|
| 38 |
+
staging = Path(output_dir) if output_dir else cfg.staging_dir / "ingest"
|
| 39 |
+
staging.mkdir(parents=True, exist_ok=True)
|
| 40 |
+
|
| 41 |
+
for source_def in cfg.sources:
|
| 42 |
+
logger.info("Ingesting source: %s (%s)", source_def.name, source_def.format)
|
| 43 |
+
registry.register(
|
| 44 |
+
source_def.name, source_def.license, source_def.license_url,
|
| 45 |
+
source_def.citation_bibtex,
|
| 46 |
+
)
|
| 47 |
+
ingester_cls = _FORMAT_TO_INGESTER.get(source_def.format)
|
| 48 |
+
if ingester_cls is None:
|
| 49 |
+
logger.error("No ingester for format %s", source_def.format)
|
| 50 |
+
continue
|
| 51 |
+
|
| 52 |
+
ingester = ingester_cls(source_def)
|
| 53 |
+
out_path = staging / f"{source_def.name}.jsonl"
|
| 54 |
+
count = 0
|
| 55 |
+
with out_path.open("wb") as fh:
|
| 56 |
+
for lexeme in ingester.ingest():
|
| 57 |
+
fh.write(orjson.dumps(lexeme.to_dict()) + b"\n")
|
| 58 |
+
count += 1
|
| 59 |
+
logger.info(" Wrote %d lexemes to %s", count, out_path)
|
| 60 |
+
|
| 61 |
+
reg_path = staging / "_license_registry.json"
|
| 62 |
+
reg_path.write_bytes(orjson.dumps(registry.to_dict()))
|
| 63 |
+
logger.info("License registry saved to %s", reg_path)
|
cognate_pipeline/src/cognate_pipeline/cli/load_cmd.py
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""CLI handler for the load-db subcommand."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import logging
|
| 6 |
+
|
| 7 |
+
import orjson
|
| 8 |
+
|
| 9 |
+
from cognate_pipeline.config.loader import load_config
|
| 10 |
+
from cognate_pipeline.db.connection import get_engine, get_session
|
| 11 |
+
from cognate_pipeline.db.loader import BatchLoader
|
| 12 |
+
from cognate_pipeline.utils.logging_setup import setup_logging
|
| 13 |
+
|
| 14 |
+
logger = logging.getLogger(__name__)
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def run_load(config_path: str) -> None:
|
| 18 |
+
cfg = load_config(config_path)
|
| 19 |
+
setup_logging(cfg.log_level)
|
| 20 |
+
|
| 21 |
+
engine = get_engine(cfg.database)
|
| 22 |
+
loader = BatchLoader(engine, batch_size=cfg.batch_size)
|
| 23 |
+
|
| 24 |
+
# Load license registry
|
| 25 |
+
reg_path = cfg.staging_dir / "ingest" / "_license_registry.json"
|
| 26 |
+
if reg_path.exists():
|
| 27 |
+
registry = orjson.loads(reg_path.read_bytes())
|
| 28 |
+
loader.load_sources(registry)
|
| 29 |
+
logger.info("Loaded source metadata")
|
| 30 |
+
|
| 31 |
+
# Load normalised lexemes
|
| 32 |
+
norm_dir = cfg.staging_dir / "normalised"
|
| 33 |
+
for jsonl_path in sorted(norm_dir.glob("*.jsonl")):
|
| 34 |
+
if jsonl_path.name.startswith("_"):
|
| 35 |
+
continue
|
| 36 |
+
logger.info("Loading lexemes from %s", jsonl_path.name)
|
| 37 |
+
records = []
|
| 38 |
+
with jsonl_path.open("rb") as fh:
|
| 39 |
+
for line in fh:
|
| 40 |
+
records.append(orjson.loads(line))
|
| 41 |
+
loader.load_lexemes(records)
|
| 42 |
+
|
| 43 |
+
# Load cognate data
|
| 44 |
+
cog_dir = cfg.staging_dir / "cognate"
|
| 45 |
+
links_path = cog_dir / "cognate_links.jsonl"
|
| 46 |
+
if links_path.exists():
|
| 47 |
+
links = []
|
| 48 |
+
with links_path.open("rb") as fh:
|
| 49 |
+
for line in fh:
|
| 50 |
+
links.append(orjson.loads(line))
|
| 51 |
+
loader.load_cognate_links(links)
|
| 52 |
+
logger.info("Loaded %d cognate links", len(links))
|
| 53 |
+
|
| 54 |
+
sets_path = cog_dir / "cognate_sets.jsonl"
|
| 55 |
+
if sets_path.exists():
|
| 56 |
+
sets_data = []
|
| 57 |
+
with sets_path.open("rb") as fh:
|
| 58 |
+
for line in fh:
|
| 59 |
+
sets_data.append(orjson.loads(line))
|
| 60 |
+
loader.load_cognate_sets(sets_data)
|
| 61 |
+
logger.info("Loaded %d cognate sets", len(sets_data))
|
| 62 |
+
|
| 63 |
+
logger.info("Database load complete")
|
cognate_pipeline/src/cognate_pipeline/cli/main.py
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Main Typer application with 6 subcommands."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import typer
|
| 6 |
+
|
| 7 |
+
app = typer.Typer(
|
| 8 |
+
name="cognate-pipeline",
|
| 9 |
+
help="Cross-linguistic cognate detection pipeline with provenance tracking.",
|
| 10 |
+
no_args_is_help=True,
|
| 11 |
+
)
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
@app.command()
|
| 15 |
+
def ingest_sources(
|
| 16 |
+
config: str = typer.Option(..., "--config", "-c", help="Path to config YAML"),
|
| 17 |
+
output_dir: str = typer.Option(
|
| 18 |
+
None, "--output-dir", "-o", help="Override staging output directory"
|
| 19 |
+
),
|
| 20 |
+
) -> None:
|
| 21 |
+
"""Ingest lexical sources into staging JSONL files."""
|
| 22 |
+
from .ingest_cmd import run_ingest
|
| 23 |
+
|
| 24 |
+
run_ingest(config, output_dir)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
@app.command()
|
| 28 |
+
def normalise_ipa(
|
| 29 |
+
config: str = typer.Option(..., "--config", "-c", help="Path to config YAML"),
|
| 30 |
+
) -> None:
|
| 31 |
+
"""Normalise IPA and compute sound classes for staged lexemes."""
|
| 32 |
+
from .normalise_cmd import run_normalise
|
| 33 |
+
|
| 34 |
+
run_normalise(config)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
@app.command()
|
| 38 |
+
def detect_cognates(
|
| 39 |
+
config: str = typer.Option(..., "--config", "-c", help="Path to config YAML"),
|
| 40 |
+
method: str = typer.Option(
|
| 41 |
+
None, "--method", "-m", help="Override cognate detection method"
|
| 42 |
+
),
|
| 43 |
+
) -> None:
|
| 44 |
+
"""Detect cognate candidates and cluster into cognate sets."""
|
| 45 |
+
from .detect_cmd import run_detect
|
| 46 |
+
|
| 47 |
+
run_detect(config, method)
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
@app.command()
|
| 51 |
+
def load_db(
|
| 52 |
+
config: str = typer.Option(..., "--config", "-c", help="Path to config YAML"),
|
| 53 |
+
) -> None:
|
| 54 |
+
"""Load staged data into PostgreSQL."""
|
| 55 |
+
from .load_cmd import run_load
|
| 56 |
+
|
| 57 |
+
run_load(config)
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
@app.command()
|
| 61 |
+
def export_cldf(
|
| 62 |
+
config: str = typer.Option(..., "--config", "-c", help="Path to config YAML"),
|
| 63 |
+
) -> None:
|
| 64 |
+
"""Export database contents as CLDF Wordlist."""
|
| 65 |
+
from .export_cldf_cmd import run_export_cldf
|
| 66 |
+
|
| 67 |
+
run_export_cldf(config)
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
@app.command()
|
| 71 |
+
def export_jsonld(
|
| 72 |
+
config: str = typer.Option(..., "--config", "-c", help="Path to config YAML"),
|
| 73 |
+
) -> None:
|
| 74 |
+
"""Export cognate links as JSON-LD."""
|
| 75 |
+
from .export_jsonld_cmd import run_export_jsonld
|
| 76 |
+
|
| 77 |
+
run_export_jsonld(config)
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
if __name__ == "__main__":
|
| 81 |
+
app()
|
cognate_pipeline/src/cognate_pipeline/cli/normalise_cmd.py
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""CLI handler for the normalise-ipa subcommand."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import logging
|
| 6 |
+
from pathlib import Path
|
| 7 |
+
|
| 8 |
+
import orjson
|
| 9 |
+
|
| 10 |
+
from cognate_pipeline.config.loader import load_config
|
| 11 |
+
from cognate_pipeline.ingest.models import RawLexeme
|
| 12 |
+
from cognate_pipeline.normalise.ipa_normaliser import IpaNormaliser
|
| 13 |
+
from cognate_pipeline.utils.logging_setup import setup_logging
|
| 14 |
+
|
| 15 |
+
logger = logging.getLogger(__name__)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def run_normalise(config_path: str) -> None:
|
| 19 |
+
cfg = load_config(config_path)
|
| 20 |
+
setup_logging(cfg.log_level)
|
| 21 |
+
|
| 22 |
+
ingest_dir = cfg.staging_dir / "ingest"
|
| 23 |
+
norm_dir = cfg.staging_dir / "normalised"
|
| 24 |
+
norm_dir.mkdir(parents=True, exist_ok=True)
|
| 25 |
+
|
| 26 |
+
normaliser = IpaNormaliser(cfg.normalisation)
|
| 27 |
+
|
| 28 |
+
for jsonl_path in sorted(ingest_dir.glob("*.jsonl")):
|
| 29 |
+
if jsonl_path.name.startswith("_"):
|
| 30 |
+
continue
|
| 31 |
+
logger.info("Normalising %s", jsonl_path.name)
|
| 32 |
+
out_path = norm_dir / jsonl_path.name
|
| 33 |
+
count = 0
|
| 34 |
+
with jsonl_path.open("rb") as fin, out_path.open("wb") as fout:
|
| 35 |
+
for line in fin:
|
| 36 |
+
raw = RawLexeme.from_dict(orjson.loads(line))
|
| 37 |
+
normalised = normaliser.normalise(raw)
|
| 38 |
+
fout.write(orjson.dumps(normalised.to_dict()) + b"\n")
|
| 39 |
+
count += 1
|
| 40 |
+
logger.info(" Wrote %d normalised lexemes to %s", count, out_path)
|
cognate_pipeline/src/cognate_pipeline/cognate/__init__.py
ADDED
|
File without changes
|
cognate_pipeline/src/cognate_pipeline/cognate/baseline_levenshtein.py
ADDED
|
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Weighted Levenshtein scorer using SCA sound classes."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import logging
|
| 6 |
+
from cognate_pipeline.cognate.models import CognateLink
|
| 7 |
+
from cognate_pipeline.normalise.models import NormalisedLexeme
|
| 8 |
+
|
| 9 |
+
logger = logging.getLogger(__name__)
|
| 10 |
+
|
| 11 |
+
# Substitution costs between SCA classes (lower = more similar)
|
| 12 |
+
# Same class = 0, vowels among themselves = 0.3, related consonants = 0.5, unrelated = 1.0
|
| 13 |
+
_VOWELS = set("AEIOU")
|
| 14 |
+
_LABIALS = {"P", "B", "M"}
|
| 15 |
+
_CORONALS = {"T", "D", "N", "S", "L", "R"}
|
| 16 |
+
_VELARS = {"K", "G"}
|
| 17 |
+
_LARYNGEALS = {"H"}
|
| 18 |
+
_GLIDES = {"W", "Y"}
|
| 19 |
+
|
| 20 |
+
_NATURAL_CLASSES = [_VOWELS, _LABIALS, _CORONALS, _VELARS, _LARYNGEALS, _GLIDES]
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def _substitution_cost(a: str, b: str) -> float:
|
| 24 |
+
"""Compute substitution cost between two SCA class characters."""
|
| 25 |
+
if a == b:
|
| 26 |
+
return 0.0
|
| 27 |
+
# Check if in same natural class
|
| 28 |
+
for cls in _NATURAL_CLASSES:
|
| 29 |
+
if a in cls and b in cls:
|
| 30 |
+
return 0.3
|
| 31 |
+
return 1.0
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def weighted_levenshtein(s1: str, s2: str) -> float:
|
| 35 |
+
"""Compute weighted Levenshtein distance using SCA-aware costs.
|
| 36 |
+
|
| 37 |
+
Insertion/deletion cost = 0.5, substitution cost varies by class.
|
| 38 |
+
"""
|
| 39 |
+
n, m = len(s1), len(s2)
|
| 40 |
+
if n == 0:
|
| 41 |
+
return m * 0.5
|
| 42 |
+
if m == 0:
|
| 43 |
+
return n * 0.5
|
| 44 |
+
|
| 45 |
+
dp = [[0.0] * (m + 1) for _ in range(n + 1)]
|
| 46 |
+
for i in range(n + 1):
|
| 47 |
+
dp[i][0] = i * 0.5
|
| 48 |
+
for j in range(m + 1):
|
| 49 |
+
dp[0][j] = j * 0.5
|
| 50 |
+
|
| 51 |
+
for i in range(1, n + 1):
|
| 52 |
+
for j in range(1, m + 1):
|
| 53 |
+
sub_cost = _substitution_cost(s1[i - 1], s2[j - 1])
|
| 54 |
+
dp[i][j] = min(
|
| 55 |
+
dp[i - 1][j] + 0.5, # deletion
|
| 56 |
+
dp[i][j - 1] + 0.5, # insertion
|
| 57 |
+
dp[i - 1][j - 1] + sub_cost, # substitution
|
| 58 |
+
)
|
| 59 |
+
return dp[n][m]
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def normalised_similarity(s1: str, s2: str) -> float:
|
| 63 |
+
"""Compute normalised similarity (0-1) from weighted Levenshtein.
|
| 64 |
+
|
| 65 |
+
1.0 = identical, 0.0 = maximally different.
|
| 66 |
+
"""
|
| 67 |
+
if not s1 and not s2:
|
| 68 |
+
return 1.0
|
| 69 |
+
max_len = max(len(s1), len(s2))
|
| 70 |
+
# Maximum possible distance is max_len * 1.0 (all substitutions at full cost)
|
| 71 |
+
dist = weighted_levenshtein(s1, s2)
|
| 72 |
+
return 1.0 - (dist / max_len) if max_len > 0 else 1.0
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
class BaselineLevenshtein:
|
| 76 |
+
"""Baseline cognate detector using weighted Levenshtein on sound classes."""
|
| 77 |
+
|
| 78 |
+
def score_pairs(
|
| 79 |
+
self,
|
| 80 |
+
pairs: list[tuple[NormalisedLexeme, NormalisedLexeme, str]],
|
| 81 |
+
threshold: float = 0.5,
|
| 82 |
+
) -> list[CognateLink]:
|
| 83 |
+
"""Score all candidate pairs and return links above threshold.
|
| 84 |
+
|
| 85 |
+
Each pair is (lexeme_a, lexeme_b, relationship_type).
|
| 86 |
+
"""
|
| 87 |
+
links: list[CognateLink] = []
|
| 88 |
+
for a, b, rel_type in pairs:
|
| 89 |
+
sc_a = a.sound_class
|
| 90 |
+
sc_b = b.sound_class
|
| 91 |
+
score = normalised_similarity(sc_a, sc_b)
|
| 92 |
+
if score >= threshold:
|
| 93 |
+
# Ensure consistent ordering
|
| 94 |
+
id_a, id_b = (a.id, b.id) if a.id < b.id else (b.id, a.id)
|
| 95 |
+
links.append(
|
| 96 |
+
CognateLink(
|
| 97 |
+
lexeme_id_a=id_a,
|
| 98 |
+
lexeme_id_b=id_b,
|
| 99 |
+
concept_id=a.concept_id,
|
| 100 |
+
relationship_type=rel_type,
|
| 101 |
+
score=round(score, 4),
|
| 102 |
+
method="baseline_lev",
|
| 103 |
+
threshold_used=threshold,
|
| 104 |
+
evidence={
|
| 105 |
+
"sound_class_a": sc_a,
|
| 106 |
+
"sound_class_b": sc_b,
|
| 107 |
+
"distance": round(weighted_levenshtein(sc_a, sc_b), 4),
|
| 108 |
+
},
|
| 109 |
+
)
|
| 110 |
+
)
|
| 111 |
+
logger.info("Scored %d pairs, %d above threshold %.2f", len(pairs), len(links), threshold)
|
| 112 |
+
return links
|
cognate_pipeline/src/cognate_pipeline/cognate/candidate_gen.py
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Generate cognate candidate pairs from normalised lexemes."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import json
|
| 6 |
+
import logging
|
| 7 |
+
from collections import defaultdict
|
| 8 |
+
from itertools import combinations
|
| 9 |
+
from pathlib import Path
|
| 10 |
+
|
| 11 |
+
from cognate_pipeline.ingest.language_resolver import LanguageResolver
|
| 12 |
+
from cognate_pipeline.normalise.models import NormalisedLexeme
|
| 13 |
+
|
| 14 |
+
logger = logging.getLogger(__name__)
|
| 15 |
+
|
| 16 |
+
# Load family map from JSON (generated by scripts/convert_cldf_to_tsv.py).
|
| 17 |
+
# Falls back to a minimal hardcoded map if the JSON is missing.
|
| 18 |
+
_FAMILY_MAP_PATH = Path(__file__).parent / "family_map.json"
|
| 19 |
+
|
| 20 |
+
_FAMILY_MAP_FALLBACK: dict[str, str] = {
|
| 21 |
+
"uga": "semitic", "heb": "semitic", "akk": "semitic", "arc": "semitic",
|
| 22 |
+
"phn": "semitic", "syc": "semitic", "arb": "semitic", "amh": "semitic",
|
| 23 |
+
"got": "germanic", "ang": "germanic", "non": "germanic", "goh": "germanic",
|
| 24 |
+
"lat": "italic", "osc": "italic", "xum": "italic",
|
| 25 |
+
"grc": "hellenic", "gmy": "hellenic",
|
| 26 |
+
"sga": "celtic", "cym": "celtic", "bre": "celtic",
|
| 27 |
+
"lit": "balto_slavic", "chu": "balto_slavic", "rus": "balto_slavic",
|
| 28 |
+
"san": "indo_iranian", "ave": "indo_iranian", "fas": "indo_iranian",
|
| 29 |
+
"otk": "turkic", "tur": "turkic", "aze": "turkic",
|
| 30 |
+
"fin": "uralic", "hun": "uralic", "est": "uralic",
|
| 31 |
+
"sux": "sumerian", "egy": "egyptian", "hit": "anatolian",
|
| 32 |
+
"xib": "iberian", "eus": "basque",
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
if _FAMILY_MAP_PATH.exists():
|
| 36 |
+
with open(_FAMILY_MAP_PATH, encoding="utf-8") as _f:
|
| 37 |
+
_FAMILY_MAP: dict[str, str] = json.load(_f)
|
| 38 |
+
logger.debug("Loaded %d family mappings from %s", len(_FAMILY_MAP), _FAMILY_MAP_PATH)
|
| 39 |
+
else:
|
| 40 |
+
_FAMILY_MAP = _FAMILY_MAP_FALLBACK
|
| 41 |
+
logger.debug("Using fallback family map (%d entries)", len(_FAMILY_MAP))
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def _get_family(language_id: str, glottocode: str) -> str:
|
| 45 |
+
"""Resolve a language to its family label."""
|
| 46 |
+
lid = language_id.lower()
|
| 47 |
+
if lid in _FAMILY_MAP:
|
| 48 |
+
return _FAMILY_MAP[lid]
|
| 49 |
+
return f"unknown_{lid}"
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def generate_candidates(
|
| 53 |
+
lexemes: list[NormalisedLexeme],
|
| 54 |
+
family_aware: bool = False,
|
| 55 |
+
) -> list[tuple[NormalisedLexeme, NormalisedLexeme, str]]:
|
| 56 |
+
"""Generate candidate pairs conditioned on shared concept_id.
|
| 57 |
+
|
| 58 |
+
Within each concept group, all cross-language pairs are generated.
|
| 59 |
+
If family_aware is True, each pair is tagged with a relationship_type:
|
| 60 |
+
- "cognate_inherited": both languages in the same family
|
| 61 |
+
- "similarity_only": languages in different families
|
| 62 |
+
|
| 63 |
+
Returns:
|
| 64 |
+
List of (lexeme_a, lexeme_b, relationship_type) tuples
|
| 65 |
+
with lexeme_a.id < lexeme_b.id.
|
| 66 |
+
"""
|
| 67 |
+
# Group by concept_id
|
| 68 |
+
by_concept: dict[str, list[NormalisedLexeme]] = defaultdict(list)
|
| 69 |
+
for lex in lexemes:
|
| 70 |
+
if lex.concept_id:
|
| 71 |
+
by_concept[lex.concept_id].append(lex)
|
| 72 |
+
|
| 73 |
+
pairs: list[tuple[NormalisedLexeme, NormalisedLexeme, str]] = []
|
| 74 |
+
for concept_id, group in by_concept.items():
|
| 75 |
+
if len(group) < 2:
|
| 76 |
+
continue
|
| 77 |
+
for a, b in combinations(group, 2):
|
| 78 |
+
# Skip same-language pairs
|
| 79 |
+
if a.language_id == b.language_id:
|
| 80 |
+
continue
|
| 81 |
+
# Ensure consistent ordering
|
| 82 |
+
if a.id > b.id:
|
| 83 |
+
a, b = b, a
|
| 84 |
+
|
| 85 |
+
rel_type = "cognate_candidate"
|
| 86 |
+
if family_aware:
|
| 87 |
+
fam_a = _get_family(a.language_id, a.glottocode)
|
| 88 |
+
fam_b = _get_family(b.language_id, b.glottocode)
|
| 89 |
+
if fam_a == fam_b:
|
| 90 |
+
rel_type = "cognate_inherited"
|
| 91 |
+
else:
|
| 92 |
+
rel_type = "similarity_only"
|
| 93 |
+
|
| 94 |
+
pairs.append((a, b, rel_type))
|
| 95 |
+
|
| 96 |
+
logger.info(
|
| 97 |
+
"Generated %d candidate pairs from %d concepts",
|
| 98 |
+
len(pairs),
|
| 99 |
+
len(by_concept),
|
| 100 |
+
)
|
| 101 |
+
return pairs
|
cognate_pipeline/src/cognate_pipeline/cognate/clustering.py
ADDED
|
@@ -0,0 +1,169 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Clustering algorithms for cognate set formation."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import logging
|
| 6 |
+
import uuid
|
| 7 |
+
from collections import defaultdict
|
| 8 |
+
|
| 9 |
+
from cognate_pipeline.cognate.models import CognateLink, CognateSet, CognateSetMember
|
| 10 |
+
from cognate_pipeline.config.schema import ClusteringAlgorithm
|
| 11 |
+
|
| 12 |
+
logger = logging.getLogger(__name__)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def cluster_links(
|
| 16 |
+
links: list[CognateLink],
|
| 17 |
+
algorithm: ClusteringAlgorithm = ClusteringAlgorithm.CONNECTED_COMPONENTS,
|
| 18 |
+
) -> list[CognateSet]:
|
| 19 |
+
"""Cluster pairwise CognateLinks into CognateSets."""
|
| 20 |
+
if algorithm == ClusteringAlgorithm.CONNECTED_COMPONENTS:
|
| 21 |
+
return _connected_components(links)
|
| 22 |
+
elif algorithm == ClusteringAlgorithm.UPGMA:
|
| 23 |
+
return _upgma(links)
|
| 24 |
+
else:
|
| 25 |
+
raise ValueError(f"Unknown clustering algorithm: {algorithm}")
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def _connected_components(links: list[CognateLink]) -> list[CognateSet]:
|
| 29 |
+
"""Form cognate sets via connected components on the link graph."""
|
| 30 |
+
if not links:
|
| 31 |
+
return []
|
| 32 |
+
|
| 33 |
+
# Build adjacency list grouped by concept
|
| 34 |
+
by_concept: dict[str, list[CognateLink]] = defaultdict(list)
|
| 35 |
+
for link in links:
|
| 36 |
+
by_concept[link.concept_id].append(link)
|
| 37 |
+
|
| 38 |
+
sets: list[CognateSet] = []
|
| 39 |
+
|
| 40 |
+
for concept_id, concept_links in by_concept.items():
|
| 41 |
+
# Union-Find
|
| 42 |
+
parent: dict[str, str] = {}
|
| 43 |
+
|
| 44 |
+
def find(x: str) -> str:
|
| 45 |
+
while parent.get(x, x) != x:
|
| 46 |
+
parent[x] = parent.get(parent[x], parent[x])
|
| 47 |
+
x = parent[x]
|
| 48 |
+
return x
|
| 49 |
+
|
| 50 |
+
def union(x: str, y: str) -> None:
|
| 51 |
+
px, py = find(x), find(y)
|
| 52 |
+
if px != py:
|
| 53 |
+
parent[px] = py
|
| 54 |
+
|
| 55 |
+
for link in concept_links:
|
| 56 |
+
parent.setdefault(link.lexeme_id_a, link.lexeme_id_a)
|
| 57 |
+
parent.setdefault(link.lexeme_id_b, link.lexeme_id_b)
|
| 58 |
+
union(link.lexeme_id_a, link.lexeme_id_b)
|
| 59 |
+
|
| 60 |
+
# Collect components
|
| 61 |
+
components: dict[str, list[str]] = defaultdict(list)
|
| 62 |
+
for node in parent:
|
| 63 |
+
components[find(node)].append(node)
|
| 64 |
+
|
| 65 |
+
for members in components.values():
|
| 66 |
+
if len(members) < 2:
|
| 67 |
+
continue
|
| 68 |
+
set_id = f"cs_{uuid.uuid4().hex[:12]}"
|
| 69 |
+
sets.append(
|
| 70 |
+
CognateSet(
|
| 71 |
+
id=set_id,
|
| 72 |
+
concept_id=concept_id,
|
| 73 |
+
method="connected_components",
|
| 74 |
+
members=[
|
| 75 |
+
CognateSetMember(lexeme_id=m) for m in sorted(members)
|
| 76 |
+
],
|
| 77 |
+
quality={"size": len(members)},
|
| 78 |
+
)
|
| 79 |
+
)
|
| 80 |
+
|
| 81 |
+
logger.info("Formed %d cognate sets via connected components", len(sets))
|
| 82 |
+
return sets
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def _upgma(links: list[CognateLink]) -> list[CognateSet]:
|
| 86 |
+
"""Form cognate sets via UPGMA hierarchical clustering.
|
| 87 |
+
|
| 88 |
+
Uses a simple agglomerative approach: iteratively merge the closest
|
| 89 |
+
pair of clusters based on average link score.
|
| 90 |
+
"""
|
| 91 |
+
if not links:
|
| 92 |
+
return []
|
| 93 |
+
|
| 94 |
+
by_concept: dict[str, list[CognateLink]] = defaultdict(list)
|
| 95 |
+
for link in links:
|
| 96 |
+
by_concept[link.concept_id].append(link)
|
| 97 |
+
|
| 98 |
+
sets: list[CognateSet] = []
|
| 99 |
+
|
| 100 |
+
for concept_id, concept_links in by_concept.items():
|
| 101 |
+
# Build distance matrix
|
| 102 |
+
nodes: set[str] = set()
|
| 103 |
+
score_map: dict[tuple[str, str], float] = {}
|
| 104 |
+
for link in concept_links:
|
| 105 |
+
nodes.add(link.lexeme_id_a)
|
| 106 |
+
nodes.add(link.lexeme_id_b)
|
| 107 |
+
key = (
|
| 108 |
+
min(link.lexeme_id_a, link.lexeme_id_b),
|
| 109 |
+
max(link.lexeme_id_a, link.lexeme_id_b),
|
| 110 |
+
)
|
| 111 |
+
score_map[key] = link.score
|
| 112 |
+
|
| 113 |
+
if len(nodes) < 2:
|
| 114 |
+
continue
|
| 115 |
+
|
| 116 |
+
# Initialize clusters
|
| 117 |
+
clusters: dict[str, list[str]] = {n: [n] for n in nodes}
|
| 118 |
+
|
| 119 |
+
while len(clusters) > 1:
|
| 120 |
+
# Find best merge (highest average score)
|
| 121 |
+
best_score = -1.0
|
| 122 |
+
best_pair = None
|
| 123 |
+
cluster_ids = list(clusters.keys())
|
| 124 |
+
for i in range(len(cluster_ids)):
|
| 125 |
+
for j in range(i + 1, len(cluster_ids)):
|
| 126 |
+
ci, cj = cluster_ids[i], cluster_ids[j]
|
| 127 |
+
# Average score between clusters
|
| 128 |
+
total = 0.0
|
| 129 |
+
count = 0
|
| 130 |
+
for a in clusters[ci]:
|
| 131 |
+
for b in clusters[cj]:
|
| 132 |
+
key = (min(a, b), max(a, b))
|
| 133 |
+
if key in score_map:
|
| 134 |
+
total += score_map[key]
|
| 135 |
+
count += 1
|
| 136 |
+
avg = total / count if count > 0 else 0.0
|
| 137 |
+
if avg > best_score:
|
| 138 |
+
best_score = avg
|
| 139 |
+
best_pair = (ci, cj)
|
| 140 |
+
|
| 141 |
+
if best_pair is None or best_score <= 0:
|
| 142 |
+
break
|
| 143 |
+
|
| 144 |
+
# Merge
|
| 145 |
+
ci, cj = best_pair
|
| 146 |
+
merged = clusters[ci] + clusters[cj]
|
| 147 |
+
new_id = f"{ci}+{cj}"
|
| 148 |
+
del clusters[ci]
|
| 149 |
+
del clusters[cj]
|
| 150 |
+
clusters[new_id] = merged
|
| 151 |
+
|
| 152 |
+
for members in clusters.values():
|
| 153 |
+
if len(members) < 2:
|
| 154 |
+
continue
|
| 155 |
+
set_id = f"cs_{uuid.uuid4().hex[:12]}"
|
| 156 |
+
sets.append(
|
| 157 |
+
CognateSet(
|
| 158 |
+
id=set_id,
|
| 159 |
+
concept_id=concept_id,
|
| 160 |
+
method="upgma",
|
| 161 |
+
members=[
|
| 162 |
+
CognateSetMember(lexeme_id=m) for m in sorted(members)
|
| 163 |
+
],
|
| 164 |
+
quality={"size": len(members)},
|
| 165 |
+
)
|
| 166 |
+
)
|
| 167 |
+
|
| 168 |
+
logger.info("Formed %d cognate sets via UPGMA", len(sets))
|
| 169 |
+
return sets
|
cognate_pipeline/src/cognate_pipeline/cognate/family_map.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:474d5577e087aaa03ffc54be9abcb4ba5792d4ea2caa9adca2b8131abc8fcf9e
|
| 3 |
+
size 26191
|
cognate_pipeline/src/cognate_pipeline/cognate/lexstat_detector.py
ADDED
|
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""LingPy LexStat wrapper for cognate detection."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import logging
|
| 6 |
+
import tempfile
|
| 7 |
+
from pathlib import Path
|
| 8 |
+
|
| 9 |
+
from cognate_pipeline.cognate.models import CognateLink, CognateSet, CognateSetMember
|
| 10 |
+
from cognate_pipeline.normalise.models import NormalisedLexeme
|
| 11 |
+
|
| 12 |
+
logger = logging.getLogger(__name__)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class LexStatDetector:
|
| 16 |
+
"""Wraps LingPy's LexStat for cognate detection.
|
| 17 |
+
|
| 18 |
+
Requires the `lingpy` optional dependency.
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
def detect(
|
| 22 |
+
self,
|
| 23 |
+
lexemes: list[NormalisedLexeme],
|
| 24 |
+
threshold: float = 0.5,
|
| 25 |
+
) -> list[CognateLink]:
|
| 26 |
+
"""Run LexStat cognate detection on the given lexemes."""
|
| 27 |
+
try:
|
| 28 |
+
from lingpy import LexStat
|
| 29 |
+
except ImportError:
|
| 30 |
+
raise ImportError(
|
| 31 |
+
"lingpy is required for LexStat detection. "
|
| 32 |
+
"Install with: pip install cognate-pipeline[lingpy]"
|
| 33 |
+
)
|
| 34 |
+
|
| 35 |
+
# Build LingPy-format TSV
|
| 36 |
+
with tempfile.NamedTemporaryFile(
|
| 37 |
+
mode="w", suffix=".tsv", delete=False, encoding="utf-8"
|
| 38 |
+
) as tmp:
|
| 39 |
+
tmp.write("ID\tDOCULECT\tCONCEPT\tIPA\tTOKENS\n")
|
| 40 |
+
id_map: dict[int, str] = {}
|
| 41 |
+
for idx, lex in enumerate(lexemes, start=1):
|
| 42 |
+
tokens = " ".join(list(lex.ipa_canonical)) if lex.ipa_canonical else lex.form
|
| 43 |
+
tmp.write(
|
| 44 |
+
f"{idx}\t{lex.language_id}\t{lex.concept_id}\t"
|
| 45 |
+
f"{lex.ipa_canonical or lex.form}\t{tokens}\n"
|
| 46 |
+
)
|
| 47 |
+
id_map[idx] = lex.id
|
| 48 |
+
tmp_path = tmp.name
|
| 49 |
+
|
| 50 |
+
try:
|
| 51 |
+
lex_stat = LexStat(tmp_path)
|
| 52 |
+
lex_stat.get_scorer(runs=100)
|
| 53 |
+
lex_stat.cluster(method="lexstat", threshold=threshold)
|
| 54 |
+
|
| 55 |
+
# Extract cognate links from clustering results
|
| 56 |
+
links: list[CognateLink] = []
|
| 57 |
+
cogid_col = lex_stat.header.get("cogid", lex_stat.header.get("lexstatid"))
|
| 58 |
+
if cogid_col is None:
|
| 59 |
+
logger.warning("No cognate ID column found in LexStat output")
|
| 60 |
+
return links
|
| 61 |
+
|
| 62 |
+
# Group by cogid to find pairs
|
| 63 |
+
from collections import defaultdict
|
| 64 |
+
|
| 65 |
+
cogid_groups: dict[int, list[int]] = defaultdict(list)
|
| 66 |
+
for idx in lex_stat:
|
| 67 |
+
cogid = lex_stat[idx, cogid_col]
|
| 68 |
+
cogid_groups[cogid].append(idx)
|
| 69 |
+
|
| 70 |
+
from itertools import combinations
|
| 71 |
+
|
| 72 |
+
for cogid, members in cogid_groups.items():
|
| 73 |
+
if len(members) < 2:
|
| 74 |
+
continue
|
| 75 |
+
concept = lex_stat[members[0], "concept"]
|
| 76 |
+
for a_idx, b_idx in combinations(members, 2):
|
| 77 |
+
id_a = id_map[a_idx]
|
| 78 |
+
id_b = id_map[b_idx]
|
| 79 |
+
if id_a > id_b:
|
| 80 |
+
id_a, id_b = id_b, id_a
|
| 81 |
+
links.append(
|
| 82 |
+
CognateLink(
|
| 83 |
+
lexeme_id_a=id_a,
|
| 84 |
+
lexeme_id_b=id_b,
|
| 85 |
+
concept_id=concept,
|
| 86 |
+
relationship_type="cognate_candidate",
|
| 87 |
+
score=1.0,
|
| 88 |
+
method="lexstat",
|
| 89 |
+
threshold_used=threshold,
|
| 90 |
+
evidence={"cogid": cogid},
|
| 91 |
+
)
|
| 92 |
+
)
|
| 93 |
+
|
| 94 |
+
return links
|
| 95 |
+
finally:
|
| 96 |
+
Path(tmp_path).unlink(missing_ok=True)
|
cognate_pipeline/src/cognate_pipeline/cognate/models.py
ADDED
|
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Data models for cognate detection and clustering."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
from dataclasses import dataclass, field
|
| 6 |
+
from typing import Any
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
@dataclass
|
| 10 |
+
class CognateLink:
|
| 11 |
+
"""A scored pairwise cognate relationship between two lexemes."""
|
| 12 |
+
|
| 13 |
+
lexeme_id_a: str
|
| 14 |
+
lexeme_id_b: str
|
| 15 |
+
concept_id: str
|
| 16 |
+
relationship_type: str = "cognate_candidate" # cognate_inherited | similarity_only | cognate_candidate
|
| 17 |
+
score: float = 0.0
|
| 18 |
+
method: str = ""
|
| 19 |
+
threshold_used: float = 0.0
|
| 20 |
+
evidence: dict[str, Any] = field(default_factory=dict)
|
| 21 |
+
|
| 22 |
+
def to_dict(self) -> dict[str, Any]:
|
| 23 |
+
return {
|
| 24 |
+
"lexeme_id_a": self.lexeme_id_a,
|
| 25 |
+
"lexeme_id_b": self.lexeme_id_b,
|
| 26 |
+
"concept_id": self.concept_id,
|
| 27 |
+
"relationship_type": self.relationship_type,
|
| 28 |
+
"score": self.score,
|
| 29 |
+
"method": self.method,
|
| 30 |
+
"threshold_used": self.threshold_used,
|
| 31 |
+
"evidence": self.evidence,
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
@classmethod
|
| 35 |
+
def from_dict(cls, d: dict[str, Any]) -> CognateLink:
|
| 36 |
+
return cls(
|
| 37 |
+
lexeme_id_a=d["lexeme_id_a"],
|
| 38 |
+
lexeme_id_b=d["lexeme_id_b"],
|
| 39 |
+
concept_id=d.get("concept_id", ""),
|
| 40 |
+
relationship_type=d.get("relationship_type", "cognate_candidate"),
|
| 41 |
+
score=d.get("score", 0.0),
|
| 42 |
+
method=d.get("method", ""),
|
| 43 |
+
threshold_used=d.get("threshold_used", 0.0),
|
| 44 |
+
evidence=d.get("evidence", {}),
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
@dataclass
|
| 49 |
+
class CognateSetMember:
|
| 50 |
+
"""A member of a cognate set."""
|
| 51 |
+
|
| 52 |
+
lexeme_id: str
|
| 53 |
+
role: str = "member" # member | proto | reflex
|
| 54 |
+
|
| 55 |
+
def to_dict(self) -> dict[str, Any]:
|
| 56 |
+
return {"lexeme_id": self.lexeme_id, "role": self.role}
|
| 57 |
+
|
| 58 |
+
@classmethod
|
| 59 |
+
def from_dict(cls, d: dict[str, Any]) -> CognateSetMember:
|
| 60 |
+
return cls(lexeme_id=d["lexeme_id"], role=d.get("role", "member"))
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
@dataclass
|
| 64 |
+
class CognateSet:
|
| 65 |
+
"""A cluster of cognate lexemes sharing a common etymon."""
|
| 66 |
+
|
| 67 |
+
id: str
|
| 68 |
+
concept_id: str
|
| 69 |
+
method: str
|
| 70 |
+
members: list[CognateSetMember] = field(default_factory=list)
|
| 71 |
+
quality: dict[str, Any] = field(default_factory=dict)
|
| 72 |
+
|
| 73 |
+
def to_dict(self) -> dict[str, Any]:
|
| 74 |
+
return {
|
| 75 |
+
"id": self.id,
|
| 76 |
+
"concept_id": self.concept_id,
|
| 77 |
+
"method": self.method,
|
| 78 |
+
"members": [m.to_dict() for m in self.members],
|
| 79 |
+
"quality": self.quality,
|
| 80 |
+
}
|
| 81 |
+
|
| 82 |
+
@classmethod
|
| 83 |
+
def from_dict(cls, d: dict[str, Any]) -> CognateSet:
|
| 84 |
+
return cls(
|
| 85 |
+
id=d["id"],
|
| 86 |
+
concept_id=d.get("concept_id", ""),
|
| 87 |
+
method=d.get("method", ""),
|
| 88 |
+
members=[CognateSetMember.from_dict(m) for m in d.get("members", [])],
|
| 89 |
+
quality=d.get("quality", {}),
|
| 90 |
+
)
|
cognate_pipeline/src/cognate_pipeline/config/__init__.py
ADDED
|
File without changes
|
cognate_pipeline/src/cognate_pipeline/config/loader.py
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Load and validate pipeline configuration from YAML."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
|
| 7 |
+
import yaml
|
| 8 |
+
|
| 9 |
+
from .schema import PipelineConfig
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def load_config(path: Path | str) -> PipelineConfig:
|
| 13 |
+
"""Read a YAML file and return a validated PipelineConfig."""
|
| 14 |
+
path = Path(path)
|
| 15 |
+
with path.open("r", encoding="utf-8") as fh:
|
| 16 |
+
raw = yaml.safe_load(fh)
|
| 17 |
+
if raw is None:
|
| 18 |
+
raw = {}
|
| 19 |
+
return PipelineConfig.model_validate(raw)
|
cognate_pipeline/src/cognate_pipeline/config/schema.py
ADDED
|
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Pydantic v2 configuration models for the cognate pipeline."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
from enum import Enum
|
| 6 |
+
from pathlib import Path
|
| 7 |
+
from typing import Any
|
| 8 |
+
|
| 9 |
+
from pydantic import BaseModel, Field
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class CognateMethod(str, Enum):
|
| 13 |
+
BASELINE_LEV = "baseline_lev"
|
| 14 |
+
LEXSTAT = "lexstat"
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class ClusteringAlgorithm(str, Enum):
|
| 18 |
+
UPGMA = "upgma"
|
| 19 |
+
CONNECTED_COMPONENTS = "connected_components"
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class SourceFormat(str, Enum):
|
| 23 |
+
CLDF = "cldf"
|
| 24 |
+
CSV = "csv"
|
| 25 |
+
TSV = "tsv"
|
| 26 |
+
COG = "cog"
|
| 27 |
+
JSON = "json"
|
| 28 |
+
NDJSON = "ndjson"
|
| 29 |
+
WIKTIONARY = "wiktionary"
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class DatabaseConfig(BaseModel):
|
| 33 |
+
host: str = "localhost"
|
| 34 |
+
port: int = 5432
|
| 35 |
+
name: str = "cognate_db"
|
| 36 |
+
user: str = "postgres"
|
| 37 |
+
password: str = ""
|
| 38 |
+
schema_name: str = "public"
|
| 39 |
+
|
| 40 |
+
@property
|
| 41 |
+
def url(self) -> str:
|
| 42 |
+
return (
|
| 43 |
+
f"postgresql+psycopg://{self.user}:{self.password}"
|
| 44 |
+
f"@{self.host}:{self.port}/{self.name}"
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
class ColumnMapping(BaseModel):
|
| 49 |
+
"""Maps source columns to internal fields."""
|
| 50 |
+
|
| 51 |
+
language: str = "Language_ID"
|
| 52 |
+
form: str = "Form"
|
| 53 |
+
concept: str = "Parameter_ID"
|
| 54 |
+
ipa: str | None = None
|
| 55 |
+
glottocode: str | None = None
|
| 56 |
+
source_id: str | None = None
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
class SourceDef(BaseModel):
|
| 60 |
+
"""Definition of a single data source."""
|
| 61 |
+
|
| 62 |
+
name: str
|
| 63 |
+
path: Path
|
| 64 |
+
format: SourceFormat
|
| 65 |
+
license: str = "unknown"
|
| 66 |
+
license_url: str = ""
|
| 67 |
+
citation_bibtex: str = ""
|
| 68 |
+
column_mapping: ColumnMapping = Field(default_factory=ColumnMapping)
|
| 69 |
+
delimiter: str | None = None
|
| 70 |
+
encoding: str = "utf-8"
|
| 71 |
+
extra: dict[str, Any] = Field(default_factory=dict)
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
class NormalisationConfig(BaseModel):
|
| 75 |
+
unicode_form: str = "NFC"
|
| 76 |
+
strip_suprasegmentals: bool = False
|
| 77 |
+
strip_whitespace: bool = True
|
| 78 |
+
ipa_backend_priority: list[str] = Field(
|
| 79 |
+
default_factory=lambda: ["attested", "epitran", "phonemizer"]
|
| 80 |
+
)
|
| 81 |
+
transliteration_passthrough: bool = True
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
class CognateConfig(BaseModel):
|
| 85 |
+
method: CognateMethod = CognateMethod.BASELINE_LEV
|
| 86 |
+
clustering: ClusteringAlgorithm = ClusteringAlgorithm.CONNECTED_COMPONENTS
|
| 87 |
+
threshold: float = 0.5
|
| 88 |
+
family_aware: bool = True
|
| 89 |
+
batch_size: int = 10000
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
class ExportConfig(BaseModel):
|
| 93 |
+
cldf_output_dir: Path = Path("export/cldf")
|
| 94 |
+
jsonld_output_dir: Path = Path("export/jsonld")
|
| 95 |
+
include_provenance: bool = True
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
class PipelineConfig(BaseModel):
|
| 99 |
+
"""Top-level pipeline configuration."""
|
| 100 |
+
|
| 101 |
+
staging_dir: Path = Path("staging")
|
| 102 |
+
sources: list[SourceDef] = Field(default_factory=list)
|
| 103 |
+
database: DatabaseConfig = Field(default_factory=DatabaseConfig)
|
| 104 |
+
normalisation: NormalisationConfig = Field(default_factory=NormalisationConfig)
|
| 105 |
+
cognate: CognateConfig = Field(default_factory=CognateConfig)
|
| 106 |
+
export: ExportConfig = Field(default_factory=ExportConfig)
|
| 107 |
+
glottolog_data_dir: Path | None = None
|
| 108 |
+
batch_size: int = 5000
|
| 109 |
+
log_level: str = "INFO"
|
cognate_pipeline/src/cognate_pipeline/db/__init__.py
ADDED
|
File without changes
|
cognate_pipeline/src/cognate_pipeline/db/connection.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Database engine and session factory."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
from sqlalchemy import create_engine, Engine
|
| 6 |
+
from sqlalchemy.orm import Session, sessionmaker
|
| 7 |
+
|
| 8 |
+
from cognate_pipeline.config.schema import DatabaseConfig
|
| 9 |
+
|
| 10 |
+
_engine_cache: dict[str, Engine] = {}
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def get_engine(config: DatabaseConfig) -> Engine:
|
| 14 |
+
"""Get or create a SQLAlchemy engine for the given config."""
|
| 15 |
+
url = config.url
|
| 16 |
+
if url not in _engine_cache:
|
| 17 |
+
_engine_cache[url] = create_engine(
|
| 18 |
+
url,
|
| 19 |
+
pool_size=5,
|
| 20 |
+
max_overflow=10,
|
| 21 |
+
pool_pre_ping=True,
|
| 22 |
+
)
|
| 23 |
+
return _engine_cache[url]
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def get_session(engine: Engine) -> Session:
|
| 27 |
+
"""Create a new session bound to the given engine."""
|
| 28 |
+
factory = sessionmaker(bind=engine)
|
| 29 |
+
return factory()
|
cognate_pipeline/src/cognate_pipeline/db/loader.py
ADDED
|
@@ -0,0 +1,171 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Batch data loader for PostgreSQL."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import logging
|
| 6 |
+
from typing import Any
|
| 7 |
+
|
| 8 |
+
from sqlalchemy import Engine, select, text
|
| 9 |
+
from sqlalchemy.orm import Session, sessionmaker
|
| 10 |
+
|
| 11 |
+
from cognate_pipeline.db.schema import (
|
| 12 |
+
Base,
|
| 13 |
+
CognateLink as CognateLinkTable,
|
| 14 |
+
CognateSetMember,
|
| 15 |
+
CognateSetTable,
|
| 16 |
+
Language,
|
| 17 |
+
Lexeme,
|
| 18 |
+
Source,
|
| 19 |
+
)
|
| 20 |
+
from cognate_pipeline.utils.batching import batched
|
| 21 |
+
|
| 22 |
+
logger = logging.getLogger(__name__)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class BatchLoader:
|
| 26 |
+
"""Loads staged data into PostgreSQL in batches."""
|
| 27 |
+
|
| 28 |
+
def __init__(self, engine: Engine, batch_size: int = 5000) -> None:
|
| 29 |
+
self.engine = engine
|
| 30 |
+
self.batch_size = batch_size
|
| 31 |
+
self._session_factory = sessionmaker(bind=engine)
|
| 32 |
+
# Ensure tables exist
|
| 33 |
+
Base.metadata.create_all(engine)
|
| 34 |
+
|
| 35 |
+
def _session(self) -> Session:
|
| 36 |
+
return self._session_factory()
|
| 37 |
+
|
| 38 |
+
def load_sources(self, registry: dict[str, Any]) -> None:
|
| 39 |
+
"""Load source metadata from license registry dict."""
|
| 40 |
+
with self._session() as session:
|
| 41 |
+
for source_data in registry.get("sources", {}).values():
|
| 42 |
+
existing = session.execute(
|
| 43 |
+
select(Source).where(Source.source_name == source_data["source_name"])
|
| 44 |
+
).scalar_one_or_none()
|
| 45 |
+
if existing is None:
|
| 46 |
+
session.add(Source(
|
| 47 |
+
source_name=source_data["source_name"],
|
| 48 |
+
license=source_data.get("license", "unknown"),
|
| 49 |
+
license_url=source_data.get("license_url", ""),
|
| 50 |
+
citation_bibtex=source_data.get("citation_bibtex", ""),
|
| 51 |
+
))
|
| 52 |
+
session.commit()
|
| 53 |
+
|
| 54 |
+
def load_lexemes(self, records: list[dict[str, Any]]) -> None:
|
| 55 |
+
"""Load normalised lexeme records into the database."""
|
| 56 |
+
with self._session() as session:
|
| 57 |
+
# Pre-load language and source caches
|
| 58 |
+
lang_cache: dict[str, int] = {}
|
| 59 |
+
source_cache: dict[str, int] = {}
|
| 60 |
+
|
| 61 |
+
for batch in batched(records, self.batch_size):
|
| 62 |
+
for rec in batch:
|
| 63 |
+
# Ensure language exists
|
| 64 |
+
lang_id_str = rec.get("language_id", "")
|
| 65 |
+
glottocode = rec.get("glottocode", "") or lang_id_str
|
| 66 |
+
if glottocode not in lang_cache:
|
| 67 |
+
lang = session.execute(
|
| 68 |
+
select(Language).where(Language.glottocode == glottocode)
|
| 69 |
+
).scalar_one_or_none()
|
| 70 |
+
if lang is None:
|
| 71 |
+
lang = Language(glottocode=glottocode, name=lang_id_str)
|
| 72 |
+
session.add(lang)
|
| 73 |
+
session.flush()
|
| 74 |
+
lang_cache[glottocode] = lang.id
|
| 75 |
+
|
| 76 |
+
# Ensure source exists
|
| 77 |
+
source_name = rec.get("source_name", "unknown")
|
| 78 |
+
if source_name not in source_cache:
|
| 79 |
+
src = session.execute(
|
| 80 |
+
select(Source).where(Source.source_name == source_name)
|
| 81 |
+
).scalar_one_or_none()
|
| 82 |
+
if src is None:
|
| 83 |
+
src = Source(source_name=source_name)
|
| 84 |
+
session.add(src)
|
| 85 |
+
session.flush()
|
| 86 |
+
source_cache[source_name] = src.id
|
| 87 |
+
|
| 88 |
+
session.add(Lexeme(
|
| 89 |
+
external_id=rec.get("id", ""),
|
| 90 |
+
language_id=lang_cache[glottocode],
|
| 91 |
+
source_id=source_cache[source_name],
|
| 92 |
+
concept_id=rec.get("concept_id", ""),
|
| 93 |
+
lemma=rec.get("form", ""),
|
| 94 |
+
orthography=rec.get("form", ""),
|
| 95 |
+
phonetic_raw=rec.get("phonetic_raw", rec.get("ipa_raw", "")),
|
| 96 |
+
phonetic_canonical=rec.get("phonetic_canonical", rec.get("ipa_canonical", "")),
|
| 97 |
+
transcription_type=rec.get("transcription_type", "unknown"),
|
| 98 |
+
sound_class=rec.get("sound_class", ""),
|
| 99 |
+
confidence=rec.get("confidence", 1.0),
|
| 100 |
+
provenance=rec.get("provenance"),
|
| 101 |
+
))
|
| 102 |
+
session.commit()
|
| 103 |
+
logger.debug("Committed batch of %d lexemes", len(batch))
|
| 104 |
+
|
| 105 |
+
def load_cognate_links(self, links: list[dict[str, Any]]) -> None:
|
| 106 |
+
"""Load cognate link records.
|
| 107 |
+
|
| 108 |
+
Note: This requires lexemes to be loaded first, as it needs to
|
| 109 |
+
resolve external IDs to database IDs.
|
| 110 |
+
"""
|
| 111 |
+
with self._session() as session:
|
| 112 |
+
# Build external_id -> db_id mapping
|
| 113 |
+
id_map: dict[str, int] = {}
|
| 114 |
+
for lex in session.execute(select(Lexeme)).scalars():
|
| 115 |
+
id_map[lex.external_id] = lex.id
|
| 116 |
+
|
| 117 |
+
# Get default source
|
| 118 |
+
default_source = session.execute(select(Source)).scalars().first()
|
| 119 |
+
source_id = default_source.id if default_source else 1
|
| 120 |
+
|
| 121 |
+
for batch in batched(links, self.batch_size):
|
| 122 |
+
for link_data in batch:
|
| 123 |
+
db_id_a = id_map.get(link_data["lexeme_id_a"])
|
| 124 |
+
db_id_b = id_map.get(link_data["lexeme_id_b"])
|
| 125 |
+
if db_id_a is None or db_id_b is None:
|
| 126 |
+
continue
|
| 127 |
+
# Ensure ordering
|
| 128 |
+
if db_id_a > db_id_b:
|
| 129 |
+
db_id_a, db_id_b = db_id_b, db_id_a
|
| 130 |
+
session.add(CognateLinkTable(
|
| 131 |
+
lexeme_id_a=db_id_a,
|
| 132 |
+
lexeme_id_b=db_id_b,
|
| 133 |
+
source_id=source_id,
|
| 134 |
+
concept_id=link_data.get("concept_id", ""),
|
| 135 |
+
relationship_type=link_data.get("relationship_type", "cognate_candidate"),
|
| 136 |
+
score=link_data.get("score", 0.0),
|
| 137 |
+
method=link_data.get("method", ""),
|
| 138 |
+
threshold_used=link_data.get("threshold_used", 0.0),
|
| 139 |
+
evidence=link_data.get("evidence"),
|
| 140 |
+
))
|
| 141 |
+
session.commit()
|
| 142 |
+
|
| 143 |
+
def load_cognate_sets(self, sets_data: list[dict[str, Any]]) -> None:
|
| 144 |
+
"""Load cognate set records."""
|
| 145 |
+
with self._session() as session:
|
| 146 |
+
# Build external_id -> db_id mapping
|
| 147 |
+
id_map: dict[str, int] = {}
|
| 148 |
+
for lex in session.execute(select(Lexeme)).scalars():
|
| 149 |
+
id_map[lex.external_id] = lex.id
|
| 150 |
+
|
| 151 |
+
for batch in batched(sets_data, self.batch_size):
|
| 152 |
+
for set_data in batch:
|
| 153 |
+
cs = CognateSetTable(
|
| 154 |
+
external_id=set_data.get("id", ""),
|
| 155 |
+
concept_id=set_data.get("concept_id", ""),
|
| 156 |
+
method=set_data.get("method", ""),
|
| 157 |
+
quality=set_data.get("quality"),
|
| 158 |
+
)
|
| 159 |
+
session.add(cs)
|
| 160 |
+
session.flush()
|
| 161 |
+
|
| 162 |
+
for member_data in set_data.get("members", []):
|
| 163 |
+
db_id = id_map.get(member_data["lexeme_id"])
|
| 164 |
+
if db_id is None:
|
| 165 |
+
continue
|
| 166 |
+
session.add(CognateSetMember(
|
| 167 |
+
cognate_set_id=cs.id,
|
| 168 |
+
lexeme_id=db_id,
|
| 169 |
+
role=member_data.get("role", "member"),
|
| 170 |
+
))
|
| 171 |
+
session.commit()
|
cognate_pipeline/src/cognate_pipeline/db/migrations/__init__.py
ADDED
|
File without changes
|
cognate_pipeline/src/cognate_pipeline/db/migrations/env.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Alembic environment configuration."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
from logging.config import fileConfig
|
| 6 |
+
|
| 7 |
+
from alembic import context
|
| 8 |
+
from sqlalchemy import engine_from_config, pool
|
| 9 |
+
|
| 10 |
+
from cognate_pipeline.db.schema import Base
|
| 11 |
+
|
| 12 |
+
config = context.config
|
| 13 |
+
|
| 14 |
+
if config.config_file_name is not None:
|
| 15 |
+
fileConfig(config.config_file_name)
|
| 16 |
+
|
| 17 |
+
target_metadata = Base.metadata
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def run_migrations_offline() -> None:
|
| 21 |
+
"""Run migrations in 'offline' mode."""
|
| 22 |
+
url = config.get_main_option("sqlalchemy.url")
|
| 23 |
+
context.configure(
|
| 24 |
+
url=url,
|
| 25 |
+
target_metadata=target_metadata,
|
| 26 |
+
literal_binds=True,
|
| 27 |
+
dialect_opts={"paramstyle": "named"},
|
| 28 |
+
)
|
| 29 |
+
with context.begin_transaction():
|
| 30 |
+
context.run_migrations()
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def run_migrations_online() -> None:
|
| 34 |
+
"""Run migrations in 'online' mode."""
|
| 35 |
+
connectable = engine_from_config(
|
| 36 |
+
config.get_section(config.config_ini_section, {}),
|
| 37 |
+
prefix="sqlalchemy.",
|
| 38 |
+
poolclass=pool.NullPool,
|
| 39 |
+
)
|
| 40 |
+
with connectable.connect() as connection:
|
| 41 |
+
context.configure(connection=connection, target_metadata=target_metadata)
|
| 42 |
+
with context.begin_transaction():
|
| 43 |
+
context.run_migrations()
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
if context.is_offline_mode():
|
| 47 |
+
run_migrations_offline()
|
| 48 |
+
else:
|
| 49 |
+
run_migrations_online()
|
cognate_pipeline/src/cognate_pipeline/db/migrations/versions/001_initial_schema.py
ADDED
|
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Initial schema with all 8 tables.
|
| 2 |
+
|
| 3 |
+
Revision ID: 001
|
| 4 |
+
Create Date: 2024-01-01 00:00:00.000000
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from __future__ import annotations
|
| 8 |
+
|
| 9 |
+
from alembic import op
|
| 10 |
+
import sqlalchemy as sa
|
| 11 |
+
from sqlalchemy.dialects import postgresql
|
| 12 |
+
|
| 13 |
+
revision = "001"
|
| 14 |
+
down_revision = None
|
| 15 |
+
branch_labels = None
|
| 16 |
+
depends_on = None
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def upgrade() -> None:
|
| 20 |
+
# Language table
|
| 21 |
+
op.create_table(
|
| 22 |
+
"language",
|
| 23 |
+
sa.Column("id", sa.Integer(), autoincrement=True, nullable=False),
|
| 24 |
+
sa.Column("glottocode", sa.String(8), nullable=False),
|
| 25 |
+
sa.Column("name", sa.String(256), nullable=False, server_default=""),
|
| 26 |
+
sa.Column("iso639_3", sa.String(3), nullable=True),
|
| 27 |
+
sa.Column("family_glottocode", sa.String(8), nullable=True),
|
| 28 |
+
sa.Column("classification_path", postgresql.ARRAY(sa.Text()), nullable=True),
|
| 29 |
+
sa.Column("metadata", postgresql.JSONB(), nullable=True),
|
| 30 |
+
sa.PrimaryKeyConstraint("id"),
|
| 31 |
+
sa.UniqueConstraint("glottocode"),
|
| 32 |
+
)
|
| 33 |
+
op.execute("SELECT AddGeometryColumn('language', 'location', 4326, 'POINT', 2)")
|
| 34 |
+
|
| 35 |
+
# Source table
|
| 36 |
+
op.create_table(
|
| 37 |
+
"source",
|
| 38 |
+
sa.Column("id", sa.Integer(), autoincrement=True, nullable=False),
|
| 39 |
+
sa.Column("source_name", sa.String(256), nullable=False),
|
| 40 |
+
sa.Column("license", sa.String(128), nullable=False, server_default="unknown"),
|
| 41 |
+
sa.Column("license_url", sa.Text(), nullable=False, server_default=""),
|
| 42 |
+
sa.Column("citation_bibtex", sa.Text(), nullable=False, server_default=""),
|
| 43 |
+
sa.Column("retrieved_at", sa.DateTime(timezone=True), nullable=False, server_default=sa.func.now()),
|
| 44 |
+
sa.PrimaryKeyConstraint("id"),
|
| 45 |
+
sa.UniqueConstraint("source_name"),
|
| 46 |
+
)
|
| 47 |
+
|
| 48 |
+
# Lexeme table
|
| 49 |
+
op.create_table(
|
| 50 |
+
"lexeme",
|
| 51 |
+
sa.Column("id", sa.Integer(), autoincrement=True, nullable=False),
|
| 52 |
+
sa.Column("external_id", sa.String(256), nullable=False, server_default=""),
|
| 53 |
+
sa.Column("language_id", sa.Integer(), nullable=False),
|
| 54 |
+
sa.Column("source_id", sa.Integer(), nullable=False),
|
| 55 |
+
sa.Column("concept_id", sa.String(256), nullable=False, server_default=""),
|
| 56 |
+
sa.Column("lemma", sa.String(512), nullable=False),
|
| 57 |
+
sa.Column("orthography", sa.String(512), nullable=False, server_default=""),
|
| 58 |
+
sa.Column("ipa_raw", sa.Text(), nullable=False, server_default=""),
|
| 59 |
+
sa.Column("ipa_canonical", sa.Text(), nullable=False, server_default=""),
|
| 60 |
+
sa.Column("sound_class", sa.String(512), nullable=False, server_default=""),
|
| 61 |
+
sa.Column("confidence", sa.Float(), nullable=False, server_default="1.0"),
|
| 62 |
+
sa.Column("provenance", postgresql.JSONB(), nullable=True),
|
| 63 |
+
sa.ForeignKeyConstraint(["language_id"], ["language.id"]),
|
| 64 |
+
sa.ForeignKeyConstraint(["source_id"], ["source.id"]),
|
| 65 |
+
sa.PrimaryKeyConstraint("id"),
|
| 66 |
+
sa.CheckConstraint("confidence >= 0 AND confidence <= 1", name="ck_lexeme_confidence"),
|
| 67 |
+
)
|
| 68 |
+
op.create_index("ix_lexeme_concept", "lexeme", ["concept_id"])
|
| 69 |
+
op.create_index("ix_lexeme_language", "lexeme", ["language_id"])
|
| 70 |
+
|
| 71 |
+
# Name entity table
|
| 72 |
+
op.create_table(
|
| 73 |
+
"name_entity",
|
| 74 |
+
sa.Column("id", sa.Integer(), autoincrement=True, nullable=False),
|
| 75 |
+
sa.Column("entity_type", sa.String(64), nullable=False),
|
| 76 |
+
sa.Column("external_ids", postgresql.JSONB(), nullable=True),
|
| 77 |
+
sa.PrimaryKeyConstraint("id"),
|
| 78 |
+
sa.CheckConstraint(
|
| 79 |
+
"entity_type IN ('place', 'person', 'deity', 'ethnonym', 'other')",
|
| 80 |
+
name="ck_entity_type",
|
| 81 |
+
),
|
| 82 |
+
)
|
| 83 |
+
op.execute("SELECT AddGeometryColumn('name_entity', 'location', 4326, 'POINT', 2)")
|
| 84 |
+
|
| 85 |
+
# Name form table
|
| 86 |
+
op.create_table(
|
| 87 |
+
"name_form",
|
| 88 |
+
sa.Column("id", sa.Integer(), autoincrement=True, nullable=False),
|
| 89 |
+
sa.Column("name_entity_id", sa.Integer(), nullable=False),
|
| 90 |
+
sa.Column("language_id", sa.Integer(), nullable=False),
|
| 91 |
+
sa.Column("source_id", sa.Integer(), nullable=False),
|
| 92 |
+
sa.Column("name_string", sa.String(512), nullable=False),
|
| 93 |
+
sa.Column("ipa_raw", sa.Text(), nullable=False, server_default=""),
|
| 94 |
+
sa.Column("ipa_canonical", sa.Text(), nullable=False, server_default=""),
|
| 95 |
+
sa.Column("provenance", postgresql.JSONB(), nullable=True),
|
| 96 |
+
sa.ForeignKeyConstraint(["name_entity_id"], ["name_entity.id"]),
|
| 97 |
+
sa.ForeignKeyConstraint(["language_id"], ["language.id"]),
|
| 98 |
+
sa.ForeignKeyConstraint(["source_id"], ["source.id"]),
|
| 99 |
+
sa.PrimaryKeyConstraint("id"),
|
| 100 |
+
)
|
| 101 |
+
|
| 102 |
+
# Cognate link table
|
| 103 |
+
op.create_table(
|
| 104 |
+
"cognate_link",
|
| 105 |
+
sa.Column("id", sa.Integer(), autoincrement=True, nullable=False),
|
| 106 |
+
sa.Column("lexeme_id_a", sa.Integer(), nullable=False),
|
| 107 |
+
sa.Column("lexeme_id_b", sa.Integer(), nullable=False),
|
| 108 |
+
sa.Column("source_id", sa.Integer(), nullable=False),
|
| 109 |
+
sa.Column("concept_id", sa.String(256), nullable=False, server_default=""),
|
| 110 |
+
sa.Column("relationship_type", sa.String(64), nullable=False, server_default="cognate_candidate"),
|
| 111 |
+
sa.Column("score", sa.Float(), nullable=False, server_default="0.0"),
|
| 112 |
+
sa.Column("method", sa.String(64), nullable=False, server_default=""),
|
| 113 |
+
sa.Column("threshold_used", sa.Float(), nullable=False, server_default="0.0"),
|
| 114 |
+
sa.Column("evidence", postgresql.JSONB(), nullable=True),
|
| 115 |
+
sa.ForeignKeyConstraint(["lexeme_id_a"], ["lexeme.id"]),
|
| 116 |
+
sa.ForeignKeyConstraint(["lexeme_id_b"], ["lexeme.id"]),
|
| 117 |
+
sa.ForeignKeyConstraint(["source_id"], ["source.id"]),
|
| 118 |
+
sa.PrimaryKeyConstraint("id"),
|
| 119 |
+
sa.CheckConstraint("lexeme_id_a < lexeme_id_b", name="ck_link_ordering"),
|
| 120 |
+
sa.CheckConstraint(
|
| 121 |
+
"relationship_type IN ('cognate_inherited', 'similarity_only', 'cognate_candidate', 'borrowing')",
|
| 122 |
+
name="ck_relationship_type",
|
| 123 |
+
),
|
| 124 |
+
sa.UniqueConstraint("lexeme_id_a", "lexeme_id_b", "method", name="uq_link_pair_method"),
|
| 125 |
+
)
|
| 126 |
+
op.create_index("ix_cognate_link_concept", "cognate_link", ["concept_id"])
|
| 127 |
+
|
| 128 |
+
# Cognate set table
|
| 129 |
+
op.create_table(
|
| 130 |
+
"cognate_set",
|
| 131 |
+
sa.Column("id", sa.Integer(), autoincrement=True, nullable=False),
|
| 132 |
+
sa.Column("external_id", sa.String(256), nullable=False, server_default=""),
|
| 133 |
+
sa.Column("concept_id", sa.String(256), nullable=False, server_default=""),
|
| 134 |
+
sa.Column("method", sa.String(64), nullable=False, server_default=""),
|
| 135 |
+
sa.Column("quality", postgresql.JSONB(), nullable=True),
|
| 136 |
+
sa.PrimaryKeyConstraint("id"),
|
| 137 |
+
)
|
| 138 |
+
op.create_index("ix_cognate_set_concept", "cognate_set", ["concept_id"])
|
| 139 |
+
|
| 140 |
+
# Cognate set member table
|
| 141 |
+
op.create_table(
|
| 142 |
+
"cognate_set_member",
|
| 143 |
+
sa.Column("cognate_set_id", sa.Integer(), nullable=False),
|
| 144 |
+
sa.Column("lexeme_id", sa.Integer(), nullable=False),
|
| 145 |
+
sa.Column("role", sa.String(32), nullable=False, server_default="member"),
|
| 146 |
+
sa.ForeignKeyConstraint(["cognate_set_id"], ["cognate_set.id"]),
|
| 147 |
+
sa.ForeignKeyConstraint(["lexeme_id"], ["lexeme.id"]),
|
| 148 |
+
sa.PrimaryKeyConstraint("cognate_set_id", "lexeme_id"),
|
| 149 |
+
)
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
def downgrade() -> None:
|
| 153 |
+
op.drop_table("cognate_set_member")
|
| 154 |
+
op.drop_table("cognate_set")
|
| 155 |
+
op.drop_table("cognate_link")
|
| 156 |
+
op.drop_table("name_form")
|
| 157 |
+
op.drop_table("name_entity")
|
| 158 |
+
op.drop_table("lexeme")
|
| 159 |
+
op.drop_table("source")
|
| 160 |
+
op.drop_table("language")
|
cognate_pipeline/src/cognate_pipeline/db/schema.py
ADDED
|
@@ -0,0 +1,191 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""SQLAlchemy 2.0 ORM models for the cognate pipeline database.
|
| 2 |
+
|
| 3 |
+
8 tables:
|
| 4 |
+
- language
|
| 5 |
+
- source
|
| 6 |
+
- lexeme
|
| 7 |
+
- name_entity
|
| 8 |
+
- name_form
|
| 9 |
+
- cognate_link
|
| 10 |
+
- cognate_set
|
| 11 |
+
- cognate_set_member
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
from __future__ import annotations
|
| 15 |
+
|
| 16 |
+
import uuid
|
| 17 |
+
from datetime import datetime, timezone
|
| 18 |
+
|
| 19 |
+
from sqlalchemy import (
|
| 20 |
+
CheckConstraint,
|
| 21 |
+
Column,
|
| 22 |
+
DateTime,
|
| 23 |
+
Float,
|
| 24 |
+
ForeignKey,
|
| 25 |
+
Index,
|
| 26 |
+
Integer,
|
| 27 |
+
String,
|
| 28 |
+
Text,
|
| 29 |
+
UniqueConstraint,
|
| 30 |
+
)
|
| 31 |
+
from sqlalchemy.dialects.postgresql import ARRAY, JSONB, UUID
|
| 32 |
+
from sqlalchemy.orm import DeclarativeBase, Mapped, mapped_column, relationship
|
| 33 |
+
from geoalchemy2 import Geometry
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class Base(DeclarativeBase):
|
| 37 |
+
pass
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
class Language(Base):
|
| 41 |
+
__tablename__ = "language"
|
| 42 |
+
|
| 43 |
+
id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True)
|
| 44 |
+
glottocode: Mapped[str] = mapped_column(String(8), unique=True, nullable=False)
|
| 45 |
+
name: Mapped[str] = mapped_column(String(256), nullable=False, default="")
|
| 46 |
+
iso639_3: Mapped[str | None] = mapped_column(String(3), nullable=True)
|
| 47 |
+
family_glottocode: Mapped[str | None] = mapped_column(String(8), nullable=True)
|
| 48 |
+
classification_path = mapped_column(ARRAY(Text), nullable=True)
|
| 49 |
+
location = mapped_column(Geometry("POINT", srid=4326), nullable=True)
|
| 50 |
+
metadata_ = mapped_column("metadata", JSONB, nullable=True)
|
| 51 |
+
|
| 52 |
+
lexemes: Mapped[list[Lexeme]] = relationship(back_populates="language")
|
| 53 |
+
name_forms: Mapped[list[NameForm]] = relationship(back_populates="language")
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
class Source(Base):
|
| 57 |
+
__tablename__ = "source"
|
| 58 |
+
|
| 59 |
+
id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True)
|
| 60 |
+
source_name: Mapped[str] = mapped_column(String(256), unique=True, nullable=False)
|
| 61 |
+
license: Mapped[str] = mapped_column(String(128), nullable=False, default="unknown")
|
| 62 |
+
license_url: Mapped[str] = mapped_column(Text, nullable=False, default="")
|
| 63 |
+
citation_bibtex: Mapped[str] = mapped_column(Text, nullable=False, default="")
|
| 64 |
+
retrieved_at: Mapped[datetime] = mapped_column(
|
| 65 |
+
DateTime(timezone=True), nullable=False, default=lambda: datetime.now(timezone.utc)
|
| 66 |
+
)
|
| 67 |
+
|
| 68 |
+
lexemes: Mapped[list[Lexeme]] = relationship(back_populates="source")
|
| 69 |
+
name_forms: Mapped[list[NameForm]] = relationship(back_populates="source")
|
| 70 |
+
cognate_links: Mapped[list[CognateLink]] = relationship(back_populates="source")
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
class Lexeme(Base):
|
| 74 |
+
__tablename__ = "lexeme"
|
| 75 |
+
|
| 76 |
+
id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True)
|
| 77 |
+
external_id: Mapped[str] = mapped_column(String(256), nullable=False, default="")
|
| 78 |
+
language_id: Mapped[int] = mapped_column(ForeignKey("language.id"), nullable=False)
|
| 79 |
+
source_id: Mapped[int] = mapped_column(ForeignKey("source.id"), nullable=False)
|
| 80 |
+
concept_id: Mapped[str] = mapped_column(String(256), nullable=False, default="")
|
| 81 |
+
lemma: Mapped[str] = mapped_column(String(512), nullable=False)
|
| 82 |
+
orthography: Mapped[str] = mapped_column(String(512), nullable=False, default="")
|
| 83 |
+
phonetic_raw: Mapped[str] = mapped_column(Text, nullable=False, default="")
|
| 84 |
+
phonetic_canonical: Mapped[str] = mapped_column(Text, nullable=False, default="")
|
| 85 |
+
transcription_type: Mapped[str] = mapped_column(
|
| 86 |
+
String(32), nullable=False, default="unknown"
|
| 87 |
+
)
|
| 88 |
+
sound_class: Mapped[str] = mapped_column(String(512), nullable=False, default="")
|
| 89 |
+
confidence: Mapped[float] = mapped_column(Float, nullable=False, default=1.0)
|
| 90 |
+
provenance = mapped_column(JSONB, nullable=True)
|
| 91 |
+
|
| 92 |
+
language: Mapped[Language] = relationship(back_populates="lexemes")
|
| 93 |
+
source: Mapped[Source] = relationship(back_populates="lexemes")
|
| 94 |
+
|
| 95 |
+
__table_args__ = (
|
| 96 |
+
Index("ix_lexeme_concept", "concept_id"),
|
| 97 |
+
Index("ix_lexeme_language", "language_id"),
|
| 98 |
+
CheckConstraint("confidence >= 0 AND confidence <= 1", name="ck_lexeme_confidence"),
|
| 99 |
+
)
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
class NameEntity(Base):
|
| 103 |
+
__tablename__ = "name_entity"
|
| 104 |
+
|
| 105 |
+
id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True)
|
| 106 |
+
entity_type: Mapped[str] = mapped_column(String(64), nullable=False)
|
| 107 |
+
external_ids = mapped_column(JSONB, nullable=True)
|
| 108 |
+
location = mapped_column(Geometry("POINT", srid=4326), nullable=True)
|
| 109 |
+
|
| 110 |
+
name_forms: Mapped[list[NameForm]] = relationship(back_populates="name_entity")
|
| 111 |
+
|
| 112 |
+
__table_args__ = (
|
| 113 |
+
CheckConstraint(
|
| 114 |
+
"entity_type IN ('place', 'person', 'deity', 'ethnonym', 'other')",
|
| 115 |
+
name="ck_entity_type",
|
| 116 |
+
),
|
| 117 |
+
)
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
class NameForm(Base):
|
| 121 |
+
__tablename__ = "name_form"
|
| 122 |
+
|
| 123 |
+
id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True)
|
| 124 |
+
name_entity_id: Mapped[int] = mapped_column(ForeignKey("name_entity.id"), nullable=False)
|
| 125 |
+
language_id: Mapped[int] = mapped_column(ForeignKey("language.id"), nullable=False)
|
| 126 |
+
source_id: Mapped[int] = mapped_column(ForeignKey("source.id"), nullable=False)
|
| 127 |
+
name_string: Mapped[str] = mapped_column(String(512), nullable=False)
|
| 128 |
+
ipa_raw: Mapped[str] = mapped_column(Text, nullable=False, default="")
|
| 129 |
+
ipa_canonical: Mapped[str] = mapped_column(Text, nullable=False, default="")
|
| 130 |
+
provenance = mapped_column(JSONB, nullable=True)
|
| 131 |
+
|
| 132 |
+
name_entity: Mapped[NameEntity] = relationship(back_populates="name_forms")
|
| 133 |
+
language: Mapped[Language] = relationship(back_populates="name_forms")
|
| 134 |
+
source: Mapped[Source] = relationship(back_populates="name_forms")
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
class CognateLink(Base):
|
| 138 |
+
__tablename__ = "cognate_link"
|
| 139 |
+
|
| 140 |
+
id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True)
|
| 141 |
+
lexeme_id_a: Mapped[int] = mapped_column(ForeignKey("lexeme.id"), nullable=False)
|
| 142 |
+
lexeme_id_b: Mapped[int] = mapped_column(ForeignKey("lexeme.id"), nullable=False)
|
| 143 |
+
source_id: Mapped[int] = mapped_column(ForeignKey("source.id"), nullable=False)
|
| 144 |
+
concept_id: Mapped[str] = mapped_column(String(256), nullable=False, default="")
|
| 145 |
+
relationship_type: Mapped[str] = mapped_column(String(64), nullable=False, default="cognate_candidate")
|
| 146 |
+
score: Mapped[float] = mapped_column(Float, nullable=False, default=0.0)
|
| 147 |
+
method: Mapped[str] = mapped_column(String(64), nullable=False, default="")
|
| 148 |
+
threshold_used: Mapped[float] = mapped_column(Float, nullable=False, default=0.0)
|
| 149 |
+
evidence = mapped_column(JSONB, nullable=True)
|
| 150 |
+
|
| 151 |
+
source: Mapped[Source] = relationship(back_populates="cognate_links")
|
| 152 |
+
|
| 153 |
+
__table_args__ = (
|
| 154 |
+
CheckConstraint("lexeme_id_a < lexeme_id_b", name="ck_link_ordering"),
|
| 155 |
+
CheckConstraint(
|
| 156 |
+
"relationship_type IN ('cognate_inherited', 'similarity_only', 'cognate_candidate', 'borrowing')",
|
| 157 |
+
name="ck_relationship_type",
|
| 158 |
+
),
|
| 159 |
+
UniqueConstraint("lexeme_id_a", "lexeme_id_b", "method", name="uq_link_pair_method"),
|
| 160 |
+
Index("ix_cognate_link_concept", "concept_id"),
|
| 161 |
+
)
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
class CognateSetTable(Base):
|
| 165 |
+
__tablename__ = "cognate_set"
|
| 166 |
+
|
| 167 |
+
id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True)
|
| 168 |
+
external_id: Mapped[str] = mapped_column(String(256), nullable=False, default="")
|
| 169 |
+
concept_id: Mapped[str] = mapped_column(String(256), nullable=False, default="")
|
| 170 |
+
method: Mapped[str] = mapped_column(String(64), nullable=False, default="")
|
| 171 |
+
quality = mapped_column(JSONB, nullable=True)
|
| 172 |
+
|
| 173 |
+
members: Mapped[list[CognateSetMember]] = relationship(back_populates="cognate_set")
|
| 174 |
+
|
| 175 |
+
__table_args__ = (
|
| 176 |
+
Index("ix_cognate_set_concept", "concept_id"),
|
| 177 |
+
)
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
class CognateSetMember(Base):
|
| 181 |
+
__tablename__ = "cognate_set_member"
|
| 182 |
+
|
| 183 |
+
cognate_set_id: Mapped[int] = mapped_column(
|
| 184 |
+
ForeignKey("cognate_set.id"), primary_key=True
|
| 185 |
+
)
|
| 186 |
+
lexeme_id: Mapped[int] = mapped_column(
|
| 187 |
+
ForeignKey("lexeme.id"), primary_key=True
|
| 188 |
+
)
|
| 189 |
+
role: Mapped[str] = mapped_column(String(32), nullable=False, default="member")
|
| 190 |
+
|
| 191 |
+
cognate_set: Mapped[CognateSetTable] = relationship(back_populates="members")
|
cognate_pipeline/src/cognate_pipeline/export/__init__.py
ADDED
|
File without changes
|
cognate_pipeline/src/cognate_pipeline/export/cldf_exporter.py
ADDED
|
@@ -0,0 +1,152 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Export database contents as CLDF Wordlist."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import logging
|
| 6 |
+
from pathlib import Path
|
| 7 |
+
from typing import Any
|
| 8 |
+
|
| 9 |
+
from sqlalchemy import Engine, select
|
| 10 |
+
from sqlalchemy.orm import Session, sessionmaker
|
| 11 |
+
|
| 12 |
+
from cognate_pipeline.config.schema import ExportConfig
|
| 13 |
+
from cognate_pipeline.db.schema import (
|
| 14 |
+
CognateSetMember,
|
| 15 |
+
CognateSetTable,
|
| 16 |
+
Language,
|
| 17 |
+
Lexeme,
|
| 18 |
+
Source,
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
+
logger = logging.getLogger(__name__)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class CldfExporter:
|
| 25 |
+
"""Exports database contents as a CLDF Wordlist dataset."""
|
| 26 |
+
|
| 27 |
+
def __init__(self, engine: Engine, config: ExportConfig) -> None:
|
| 28 |
+
self.engine = engine
|
| 29 |
+
self.config = config
|
| 30 |
+
self._session_factory = sessionmaker(bind=engine)
|
| 31 |
+
|
| 32 |
+
def export(self, output_dir: Path) -> None:
|
| 33 |
+
"""Export CLDF files to the given directory."""
|
| 34 |
+
try:
|
| 35 |
+
from pycldf import Wordlist
|
| 36 |
+
except ImportError:
|
| 37 |
+
# Fallback: write CSV manually
|
| 38 |
+
self._export_csv_fallback(output_dir)
|
| 39 |
+
return
|
| 40 |
+
|
| 41 |
+
ds = Wordlist.in_dir(output_dir)
|
| 42 |
+
ds.add_component("LanguageTable")
|
| 43 |
+
ds.add_component("ParameterTable")
|
| 44 |
+
ds.add_component("CognateTable")
|
| 45 |
+
|
| 46 |
+
with self._session_factory() as session:
|
| 47 |
+
# Languages
|
| 48 |
+
languages = list(session.execute(select(Language)).scalars())
|
| 49 |
+
for lang in languages:
|
| 50 |
+
ds.add_language(
|
| 51 |
+
ID=lang.glottocode,
|
| 52 |
+
Name=lang.name,
|
| 53 |
+
Glottocode=lang.glottocode,
|
| 54 |
+
ISO639P3code=lang.iso639_3 or "",
|
| 55 |
+
)
|
| 56 |
+
|
| 57 |
+
# Lexemes -> Forms + Parameters
|
| 58 |
+
concepts_seen: set[str] = set()
|
| 59 |
+
lexemes = list(session.execute(select(Lexeme)).scalars())
|
| 60 |
+
for lex in lexemes:
|
| 61 |
+
if lex.concept_id and lex.concept_id not in concepts_seen:
|
| 62 |
+
ds.add_concept(ID=lex.concept_id, Name=lex.concept_id)
|
| 63 |
+
concepts_seen.add(lex.concept_id)
|
| 64 |
+
|
| 65 |
+
lang = session.get(Language, lex.language_id)
|
| 66 |
+
segments = list(lex.phonetic_canonical) if lex.phonetic_canonical else []
|
| 67 |
+
ds.add_form(
|
| 68 |
+
ID=str(lex.id),
|
| 69 |
+
Language_ID=lang.glottocode if lang else "",
|
| 70 |
+
Parameter_ID=lex.concept_id,
|
| 71 |
+
Form=lex.lemma,
|
| 72 |
+
Segments=segments,
|
| 73 |
+
)
|
| 74 |
+
|
| 75 |
+
# Cognate sets
|
| 76 |
+
cog_sets = list(session.execute(select(CognateSetTable)).scalars())
|
| 77 |
+
for cs in cog_sets:
|
| 78 |
+
members = list(
|
| 79 |
+
session.execute(
|
| 80 |
+
select(CognateSetMember).where(
|
| 81 |
+
CognateSetMember.cognate_set_id == cs.id
|
| 82 |
+
)
|
| 83 |
+
).scalars()
|
| 84 |
+
)
|
| 85 |
+
for member in members:
|
| 86 |
+
ds.add_cognate(
|
| 87 |
+
ID=f"cog_{cs.id}_{member.lexeme_id}",
|
| 88 |
+
Form_ID=str(member.lexeme_id),
|
| 89 |
+
Cognateset_ID=str(cs.id),
|
| 90 |
+
)
|
| 91 |
+
|
| 92 |
+
ds.write()
|
| 93 |
+
logger.info("CLDF export complete: %s", output_dir)
|
| 94 |
+
|
| 95 |
+
def _export_csv_fallback(self, output_dir: Path) -> None:
|
| 96 |
+
"""Export as plain CSV when pycldf is not available."""
|
| 97 |
+
import csv
|
| 98 |
+
|
| 99 |
+
output_dir.mkdir(parents=True, exist_ok=True)
|
| 100 |
+
|
| 101 |
+
with self._session_factory() as session:
|
| 102 |
+
# Languages
|
| 103 |
+
languages = list(session.execute(select(Language)).scalars())
|
| 104 |
+
with (output_dir / "languages.csv").open("w", newline="", encoding="utf-8") as f:
|
| 105 |
+
writer = csv.writer(f)
|
| 106 |
+
writer.writerow(["ID", "Name", "Glottocode", "ISO639P3code"])
|
| 107 |
+
for lang in languages:
|
| 108 |
+
writer.writerow([lang.glottocode, lang.name, lang.glottocode, lang.iso639_3 or ""])
|
| 109 |
+
|
| 110 |
+
# Forms
|
| 111 |
+
lexemes = list(session.execute(select(Lexeme)).scalars())
|
| 112 |
+
concepts: set[str] = set()
|
| 113 |
+
with (output_dir / "forms.csv").open("w", newline="", encoding="utf-8") as f:
|
| 114 |
+
writer = csv.writer(f)
|
| 115 |
+
writer.writerow(["ID", "Language_ID", "Parameter_ID", "Form", "Segments"])
|
| 116 |
+
for lex in lexemes:
|
| 117 |
+
lang = session.get(Language, lex.language_id)
|
| 118 |
+
segments = " ".join(list(lex.phonetic_canonical)) if lex.phonetic_canonical else ""
|
| 119 |
+
writer.writerow([
|
| 120 |
+
lex.id,
|
| 121 |
+
lang.glottocode if lang else "",
|
| 122 |
+
lex.concept_id,
|
| 123 |
+
lex.lemma,
|
| 124 |
+
segments,
|
| 125 |
+
])
|
| 126 |
+
if lex.concept_id:
|
| 127 |
+
concepts.add(lex.concept_id)
|
| 128 |
+
|
| 129 |
+
# Parameters
|
| 130 |
+
with (output_dir / "parameters.csv").open("w", newline="", encoding="utf-8") as f:
|
| 131 |
+
writer = csv.writer(f)
|
| 132 |
+
writer.writerow(["ID", "Name"])
|
| 133 |
+
for c in sorted(concepts):
|
| 134 |
+
writer.writerow([c, c])
|
| 135 |
+
|
| 136 |
+
# Cognates
|
| 137 |
+
cog_sets = list(session.execute(select(CognateSetTable)).scalars())
|
| 138 |
+
with (output_dir / "cognates.csv").open("w", newline="", encoding="utf-8") as f:
|
| 139 |
+
writer = csv.writer(f)
|
| 140 |
+
writer.writerow(["ID", "Form_ID", "Cognateset_ID"])
|
| 141 |
+
for cs in cog_sets:
|
| 142 |
+
members = list(
|
| 143 |
+
session.execute(
|
| 144 |
+
select(CognateSetMember).where(
|
| 145 |
+
CognateSetMember.cognate_set_id == cs.id
|
| 146 |
+
)
|
| 147 |
+
).scalars()
|
| 148 |
+
)
|
| 149 |
+
for member in members:
|
| 150 |
+
writer.writerow([f"cog_{cs.id}_{member.lexeme_id}", member.lexeme_id, cs.id])
|
| 151 |
+
|
| 152 |
+
logger.info("CSV fallback export complete: %s", output_dir)
|
cognate_pipeline/src/cognate_pipeline/export/jsonld_exporter.py
ADDED
|
@@ -0,0 +1,138 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Export cognate links as JSON-LD."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import logging
|
| 6 |
+
from pathlib import Path
|
| 7 |
+
from typing import Any
|
| 8 |
+
|
| 9 |
+
import orjson
|
| 10 |
+
from sqlalchemy import Engine, select
|
| 11 |
+
from sqlalchemy.orm import Session, sessionmaker
|
| 12 |
+
|
| 13 |
+
from cognate_pipeline.config.schema import ExportConfig
|
| 14 |
+
from cognate_pipeline.db.schema import (
|
| 15 |
+
CognateLink,
|
| 16 |
+
CognateSetMember,
|
| 17 |
+
CognateSetTable,
|
| 18 |
+
Language,
|
| 19 |
+
Lexeme,
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
logger = logging.getLogger(__name__)
|
| 23 |
+
|
| 24 |
+
_CONTEXT = {
|
| 25 |
+
"@context": {
|
| 26 |
+
"ontolex": "http://www.w3.org/ns/lemon/ontolex#",
|
| 27 |
+
"lexinfo": "http://www.lexinfo.net/ontology/3.0/lexinfo#",
|
| 28 |
+
"glottolog": "https://glottolog.org/resource/languoid/id/",
|
| 29 |
+
"concepticon": "https://concepticon.clld.org/parameters/",
|
| 30 |
+
"cognate": "http://example.org/cognate#",
|
| 31 |
+
"lexeme": "cognate:lexeme",
|
| 32 |
+
"form": "ontolex:writtenRep",
|
| 33 |
+
"language": "lexinfo:language",
|
| 34 |
+
"glottocode": "glottolog:",
|
| 35 |
+
"concept": "concepticon:",
|
| 36 |
+
"ipa": "ontolex:phoneticRep",
|
| 37 |
+
"soundClass": "cognate:soundClass",
|
| 38 |
+
"cognateLink": "cognate:CognateLink",
|
| 39 |
+
"score": "cognate:score",
|
| 40 |
+
"method": "cognate:method",
|
| 41 |
+
"evidence": "cognate:evidence",
|
| 42 |
+
}
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
class JsonLdExporter:
|
| 47 |
+
"""Exports cognate links as JSON-LD."""
|
| 48 |
+
|
| 49 |
+
def __init__(self, engine: Engine, config: ExportConfig) -> None:
|
| 50 |
+
self.engine = engine
|
| 51 |
+
self.config = config
|
| 52 |
+
self._session_factory = sessionmaker(bind=engine)
|
| 53 |
+
|
| 54 |
+
def export(self, output_dir: Path) -> None:
|
| 55 |
+
output_dir.mkdir(parents=True, exist_ok=True)
|
| 56 |
+
|
| 57 |
+
with self._session_factory() as session:
|
| 58 |
+
# Build lexeme nodes
|
| 59 |
+
lexeme_nodes = self._build_lexeme_nodes(session)
|
| 60 |
+
|
| 61 |
+
# Build cognate link edges
|
| 62 |
+
link_edges = self._build_link_edges(session)
|
| 63 |
+
|
| 64 |
+
# Build cognate sets
|
| 65 |
+
set_nodes = self._build_set_nodes(session)
|
| 66 |
+
|
| 67 |
+
doc: dict[str, Any] = {
|
| 68 |
+
**_CONTEXT,
|
| 69 |
+
"@graph": {
|
| 70 |
+
"lexemes": lexeme_nodes,
|
| 71 |
+
"cognateLinks": link_edges,
|
| 72 |
+
"cognateSets": set_nodes,
|
| 73 |
+
},
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
out_path = output_dir / "cognates.jsonld"
|
| 77 |
+
out_path.write_bytes(orjson.dumps(doc, option=orjson.OPT_INDENT_2))
|
| 78 |
+
logger.info("JSON-LD export written to %s", out_path)
|
| 79 |
+
|
| 80 |
+
def _build_lexeme_nodes(self, session: Session) -> list[dict]:
|
| 81 |
+
nodes = []
|
| 82 |
+
for lex in session.execute(select(Lexeme)).scalars():
|
| 83 |
+
lang = session.get(Language, lex.language_id)
|
| 84 |
+
node: dict[str, Any] = {
|
| 85 |
+
"@id": f"lexeme:{lex.id}",
|
| 86 |
+
"@type": "lexeme",
|
| 87 |
+
"externalId": lex.external_id,
|
| 88 |
+
"form": lex.lemma,
|
| 89 |
+
"language": lang.glottocode if lang else "",
|
| 90 |
+
"concept": lex.concept_id,
|
| 91 |
+
"phoneticCanonical": lex.phonetic_canonical,
|
| 92 |
+
"transcriptionType": lex.transcription_type,
|
| 93 |
+
"soundClass": lex.sound_class,
|
| 94 |
+
"confidence": lex.confidence,
|
| 95 |
+
}
|
| 96 |
+
if self.config.include_provenance and lex.provenance:
|
| 97 |
+
node["provenance"] = lex.provenance
|
| 98 |
+
nodes.append(node)
|
| 99 |
+
return nodes
|
| 100 |
+
|
| 101 |
+
def _build_link_edges(self, session: Session) -> list[dict]:
|
| 102 |
+
edges = []
|
| 103 |
+
for link in session.execute(select(CognateLink)).scalars():
|
| 104 |
+
edge: dict[str, Any] = {
|
| 105 |
+
"@type": "cognateLink",
|
| 106 |
+
"lexemeA": f"lexeme:{link.lexeme_id_a}",
|
| 107 |
+
"lexemeB": f"lexeme:{link.lexeme_id_b}",
|
| 108 |
+
"concept": link.concept_id,
|
| 109 |
+
"relationshipType": link.relationship_type,
|
| 110 |
+
"score": link.score,
|
| 111 |
+
"method": link.method,
|
| 112 |
+
}
|
| 113 |
+
if link.evidence:
|
| 114 |
+
edge["evidence"] = link.evidence
|
| 115 |
+
edges.append(edge)
|
| 116 |
+
return edges
|
| 117 |
+
|
| 118 |
+
def _build_set_nodes(self, session: Session) -> list[dict]:
|
| 119 |
+
sets = []
|
| 120 |
+
for cs in session.execute(select(CognateSetTable)).scalars():
|
| 121 |
+
members = list(
|
| 122 |
+
session.execute(
|
| 123 |
+
select(CognateSetMember).where(
|
| 124 |
+
CognateSetMember.cognate_set_id == cs.id
|
| 125 |
+
)
|
| 126 |
+
).scalars()
|
| 127 |
+
)
|
| 128 |
+
sets.append({
|
| 129 |
+
"@id": f"cognateSet:{cs.id}",
|
| 130 |
+
"concept": cs.concept_id,
|
| 131 |
+
"method": cs.method,
|
| 132 |
+
"members": [
|
| 133 |
+
{"lexeme": f"lexeme:{m.lexeme_id}", "role": m.role}
|
| 134 |
+
for m in members
|
| 135 |
+
],
|
| 136 |
+
"quality": cs.quality,
|
| 137 |
+
})
|
| 138 |
+
return sets
|
cognate_pipeline/src/cognate_pipeline/ingest/__init__.py
ADDED
|
File without changes
|
cognate_pipeline/src/cognate_pipeline/ingest/base.py
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Base protocol for source ingesters."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
from collections.abc import Iterator
|
| 6 |
+
from typing import Protocol, runtime_checkable
|
| 7 |
+
|
| 8 |
+
from cognate_pipeline.config.schema import SourceDef
|
| 9 |
+
from cognate_pipeline.ingest.models import RawLexeme
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
@runtime_checkable
|
| 13 |
+
class SourceIngester(Protocol):
|
| 14 |
+
"""Protocol that all ingesters must implement."""
|
| 15 |
+
|
| 16 |
+
def __init__(self, source_def: SourceDef) -> None: ...
|
| 17 |
+
|
| 18 |
+
def ingest(self) -> Iterator[RawLexeme]:
|
| 19 |
+
"""Yield RawLexeme objects from the source."""
|
| 20 |
+
...
|
cognate_pipeline/src/cognate_pipeline/ingest/cldf_ingester.py
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""CLDF FormTable ingester using pycldf."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import logging
|
| 6 |
+
from collections.abc import Iterator
|
| 7 |
+
|
| 8 |
+
from cognate_pipeline.config.schema import SourceDef
|
| 9 |
+
from cognate_pipeline.ingest.models import RawLexeme, TranscriptionType
|
| 10 |
+
from cognate_pipeline.provenance.tracker import ProvenanceRecord
|
| 11 |
+
|
| 12 |
+
logger = logging.getLogger(__name__)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class CldfIngester:
|
| 16 |
+
"""Ingests CLDF Wordlist datasets via pycldf."""
|
| 17 |
+
|
| 18 |
+
def __init__(self, source_def: SourceDef) -> None:
|
| 19 |
+
self.source_def = source_def
|
| 20 |
+
|
| 21 |
+
def ingest(self) -> Iterator[RawLexeme]:
|
| 22 |
+
try:
|
| 23 |
+
from pycldf import Dataset
|
| 24 |
+
except ImportError:
|
| 25 |
+
raise ImportError(
|
| 26 |
+
"pycldf is required for CLDF ingestion. "
|
| 27 |
+
"Install with: pip install cognate-pipeline[cldf]"
|
| 28 |
+
)
|
| 29 |
+
|
| 30 |
+
path = self.source_def.path
|
| 31 |
+
# Find metadata file
|
| 32 |
+
metadata_path = path / "Wordlist-metadata.json"
|
| 33 |
+
if not metadata_path.exists():
|
| 34 |
+
# Try cldf-metadata.json
|
| 35 |
+
metadata_path = path / "cldf-metadata.json"
|
| 36 |
+
if not metadata_path.exists():
|
| 37 |
+
raise FileNotFoundError(
|
| 38 |
+
f"No CLDF metadata found in {path}. "
|
| 39 |
+
"Expected Wordlist-metadata.json or cldf-metadata.json"
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
ds = Dataset.from_metadata(metadata_path)
|
| 43 |
+
|
| 44 |
+
# Build language_id -> glottocode mapping
|
| 45 |
+
lang_map: dict[str, str] = {}
|
| 46 |
+
if "LanguageTable" in ds:
|
| 47 |
+
for lang in ds["LanguageTable"]:
|
| 48 |
+
lid = lang.get("ID", "")
|
| 49 |
+
glottocode = lang.get("Glottocode", "")
|
| 50 |
+
lang_map[lid] = glottocode
|
| 51 |
+
|
| 52 |
+
# Read forms
|
| 53 |
+
for form in ds["FormTable"]:
|
| 54 |
+
form_id = str(form.get("ID", ""))
|
| 55 |
+
language_id = str(form.get("Language_ID", ""))
|
| 56 |
+
concept_id = str(form.get("Parameter_ID", ""))
|
| 57 |
+
value = str(form.get("Form", ""))
|
| 58 |
+
|
| 59 |
+
if not value or value == "_":
|
| 60 |
+
continue
|
| 61 |
+
|
| 62 |
+
glottocode = lang_map.get(language_id, "")
|
| 63 |
+
|
| 64 |
+
# Segments in CLDF are typically IPA tokenisations
|
| 65 |
+
segments = form.get("Segments", [])
|
| 66 |
+
phonetic_raw = ""
|
| 67 |
+
transcription_type = TranscriptionType.UNKNOWN
|
| 68 |
+
if segments:
|
| 69 |
+
if isinstance(segments, list):
|
| 70 |
+
phonetic_raw = " ".join(segments)
|
| 71 |
+
else:
|
| 72 |
+
phonetic_raw = str(segments)
|
| 73 |
+
transcription_type = TranscriptionType.IPA
|
| 74 |
+
|
| 75 |
+
yield RawLexeme(
|
| 76 |
+
id=form_id or f"{self.source_def.name}_{language_id}_{concept_id}",
|
| 77 |
+
language_id=language_id,
|
| 78 |
+
glottocode=glottocode,
|
| 79 |
+
concept_id=concept_id,
|
| 80 |
+
form=value,
|
| 81 |
+
phonetic_raw=phonetic_raw,
|
| 82 |
+
transcription_type=transcription_type,
|
| 83 |
+
source_name=self.source_def.name,
|
| 84 |
+
provenance=ProvenanceRecord(
|
| 85 |
+
source_name=self.source_def.name,
|
| 86 |
+
source_format="cldf",
|
| 87 |
+
original_id=form_id,
|
| 88 |
+
).add_step("ingest", {"dataset": str(path)}),
|
| 89 |
+
)
|
cognate_pipeline/src/cognate_pipeline/ingest/csv_ingester.py
ADDED
|
@@ -0,0 +1,164 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""CSV/TSV/COG file ingester with configurable column mapping."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import csv
|
| 6 |
+
import logging
|
| 7 |
+
from collections.abc import Iterator
|
| 8 |
+
from pathlib import Path
|
| 9 |
+
|
| 10 |
+
from cognate_pipeline.config.schema import SourceDef, SourceFormat
|
| 11 |
+
from cognate_pipeline.ingest.models import RawLexeme, TranscriptionType
|
| 12 |
+
from cognate_pipeline.provenance.tracker import ProvenanceRecord
|
| 13 |
+
|
| 14 |
+
logger = logging.getLogger(__name__)
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class CsvIngester:
|
| 18 |
+
"""Ingests CSV, TSV, and .cog (cognate pair) files."""
|
| 19 |
+
|
| 20 |
+
def __init__(self, source_def: SourceDef) -> None:
|
| 21 |
+
self.source_def = source_def
|
| 22 |
+
|
| 23 |
+
def ingest(self) -> Iterator[RawLexeme]:
|
| 24 |
+
fmt = self.source_def.format
|
| 25 |
+
if fmt in (SourceFormat.COG, SourceFormat.TSV):
|
| 26 |
+
delimiter = self.source_def.delimiter or "\t"
|
| 27 |
+
else:
|
| 28 |
+
delimiter = self.source_def.delimiter or ","
|
| 29 |
+
|
| 30 |
+
path = Path(self.source_def.path)
|
| 31 |
+
if fmt == SourceFormat.COG:
|
| 32 |
+
yield from self._ingest_cog(path, delimiter)
|
| 33 |
+
else:
|
| 34 |
+
yield from self._ingest_tabular(path, delimiter)
|
| 35 |
+
|
| 36 |
+
def _ingest_cog(self, path: Path, delimiter: str) -> Iterator[RawLexeme]:
|
| 37 |
+
"""Ingest .cog format: two-column cognate pair file.
|
| 38 |
+
|
| 39 |
+
Each row has (lang_a_form, lang_b_form).
|
| 40 |
+
lang_b_form may contain pipe-separated alternatives.
|
| 41 |
+
`_` represents null / missing.
|
| 42 |
+
|
| 43 |
+
These files use consonantal transliteration, NOT IPA.
|
| 44 |
+
"""
|
| 45 |
+
extra = self.source_def.extra
|
| 46 |
+
lang_a = extra.get("lang_a", "lang_a")
|
| 47 |
+
lang_b = extra.get("lang_b", "lang_b")
|
| 48 |
+
|
| 49 |
+
with path.open("r", encoding=self.source_def.encoding) as fh:
|
| 50 |
+
reader = csv.reader(fh, delimiter=delimiter)
|
| 51 |
+
header = next(reader, None)
|
| 52 |
+
if header is None:
|
| 53 |
+
return
|
| 54 |
+
|
| 55 |
+
for row_idx, row in enumerate(reader, start=1):
|
| 56 |
+
if len(row) < 2:
|
| 57 |
+
continue
|
| 58 |
+
form_a = row[0].strip()
|
| 59 |
+
form_b_raw = row[1].strip()
|
| 60 |
+
|
| 61 |
+
# Skip null entries
|
| 62 |
+
if form_a == "_" or not form_a:
|
| 63 |
+
continue
|
| 64 |
+
|
| 65 |
+
# Generate a concept_id from the pair (the cognate pair itself)
|
| 66 |
+
concept_id = f"pair_{row_idx}"
|
| 67 |
+
|
| 68 |
+
# Emit lang_a form
|
| 69 |
+
yield RawLexeme(
|
| 70 |
+
id=f"{self.source_def.name}_{lang_a}_{row_idx}",
|
| 71 |
+
language_id=lang_a,
|
| 72 |
+
glottocode="",
|
| 73 |
+
concept_id=concept_id,
|
| 74 |
+
form=form_a,
|
| 75 |
+
transcription_type=TranscriptionType.TRANSLITERATION,
|
| 76 |
+
source_name=self.source_def.name,
|
| 77 |
+
provenance=ProvenanceRecord(
|
| 78 |
+
source_name=self.source_def.name,
|
| 79 |
+
source_format="cog",
|
| 80 |
+
original_id=f"row_{row_idx}_col_a",
|
| 81 |
+
).add_step("ingest", {"file": path.name, "row": row_idx}),
|
| 82 |
+
)
|
| 83 |
+
|
| 84 |
+
# Emit lang_b forms (pipe-separated alternatives)
|
| 85 |
+
if form_b_raw == "_" or not form_b_raw:
|
| 86 |
+
continue
|
| 87 |
+
alternatives = [f.strip() for f in form_b_raw.split("|") if f.strip() and f.strip() != "_"]
|
| 88 |
+
if not alternatives:
|
| 89 |
+
continue
|
| 90 |
+
|
| 91 |
+
primary = alternatives[0]
|
| 92 |
+
rest = alternatives[1:] if len(alternatives) > 1 else []
|
| 93 |
+
yield RawLexeme(
|
| 94 |
+
id=f"{self.source_def.name}_{lang_b}_{row_idx}",
|
| 95 |
+
language_id=lang_b,
|
| 96 |
+
glottocode="",
|
| 97 |
+
concept_id=concept_id,
|
| 98 |
+
form=primary,
|
| 99 |
+
alternatives=rest,
|
| 100 |
+
transcription_type=TranscriptionType.TRANSLITERATION,
|
| 101 |
+
source_name=self.source_def.name,
|
| 102 |
+
provenance=ProvenanceRecord(
|
| 103 |
+
source_name=self.source_def.name,
|
| 104 |
+
source_format="cog",
|
| 105 |
+
original_id=f"row_{row_idx}_col_b",
|
| 106 |
+
).add_step("ingest", {"file": path.name, "row": row_idx}),
|
| 107 |
+
)
|
| 108 |
+
|
| 109 |
+
def _ingest_tabular(self, path: Path, delimiter: str) -> Iterator[RawLexeme]:
|
| 110 |
+
"""Ingest standard CSV/TSV with column mapping."""
|
| 111 |
+
mapping = self.source_def.column_mapping
|
| 112 |
+
|
| 113 |
+
with path.open("r", encoding=self.source_def.encoding) as fh:
|
| 114 |
+
reader = csv.DictReader(fh, delimiter=delimiter)
|
| 115 |
+
for row_idx, row in enumerate(reader, start=1):
|
| 116 |
+
form_col = mapping.form or "Form"
|
| 117 |
+
form = row.get(form_col, "").strip()
|
| 118 |
+
if not form or form == "_":
|
| 119 |
+
continue
|
| 120 |
+
|
| 121 |
+
lang_col = mapping.language or "Language_ID"
|
| 122 |
+
language_id = row.get(lang_col, "").strip()
|
| 123 |
+
|
| 124 |
+
concept_col = mapping.concept or "Parameter_ID"
|
| 125 |
+
concept_id = row.get(concept_col, "").strip()
|
| 126 |
+
|
| 127 |
+
glottocode = ""
|
| 128 |
+
if mapping.glottocode:
|
| 129 |
+
glottocode = row.get(mapping.glottocode, "").strip()
|
| 130 |
+
|
| 131 |
+
phonetic_raw = ""
|
| 132 |
+
transcription_type = TranscriptionType.UNKNOWN
|
| 133 |
+
if mapping.ipa:
|
| 134 |
+
phonetic_raw = row.get(mapping.ipa, "").strip()
|
| 135 |
+
if phonetic_raw:
|
| 136 |
+
transcription_type = TranscriptionType.IPA
|
| 137 |
+
|
| 138 |
+
# Handle pipe-separated alternatives in form
|
| 139 |
+
alternatives = []
|
| 140 |
+
if "|" in form:
|
| 141 |
+
parts = [p.strip() for p in form.split("|") if p.strip()]
|
| 142 |
+
form = parts[0]
|
| 143 |
+
alternatives = parts[1:]
|
| 144 |
+
|
| 145 |
+
source_id = ""
|
| 146 |
+
if mapping.source_id:
|
| 147 |
+
source_id = row.get(mapping.source_id, "").strip()
|
| 148 |
+
|
| 149 |
+
yield RawLexeme(
|
| 150 |
+
id=source_id or f"{self.source_def.name}_{row_idx}",
|
| 151 |
+
language_id=language_id,
|
| 152 |
+
glottocode=glottocode,
|
| 153 |
+
concept_id=concept_id,
|
| 154 |
+
form=form,
|
| 155 |
+
phonetic_raw=phonetic_raw,
|
| 156 |
+
transcription_type=transcription_type,
|
| 157 |
+
alternatives=alternatives,
|
| 158 |
+
source_name=self.source_def.name,
|
| 159 |
+
provenance=ProvenanceRecord(
|
| 160 |
+
source_name=self.source_def.name,
|
| 161 |
+
source_format=self.source_def.format.value,
|
| 162 |
+
original_id=source_id or f"row_{row_idx}",
|
| 163 |
+
).add_step("ingest", {"file": path.name, "row": row_idx}),
|
| 164 |
+
)
|
cognate_pipeline/src/cognate_pipeline/ingest/json_ingester.py
ADDED
|
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Generic JSON/NDJSON ingester with field mapping."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import logging
|
| 6 |
+
from collections.abc import Iterator
|
| 7 |
+
from pathlib import Path
|
| 8 |
+
|
| 9 |
+
import orjson
|
| 10 |
+
|
| 11 |
+
from cognate_pipeline.config.schema import SourceDef, SourceFormat
|
| 12 |
+
from cognate_pipeline.ingest.models import RawLexeme, TranscriptionType
|
| 13 |
+
from cognate_pipeline.provenance.tracker import ProvenanceRecord
|
| 14 |
+
|
| 15 |
+
logger = logging.getLogger(__name__)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class JsonIngester:
|
| 19 |
+
"""Ingests JSON arrays or NDJSON files with configurable field mapping."""
|
| 20 |
+
|
| 21 |
+
def __init__(self, source_def: SourceDef) -> None:
|
| 22 |
+
self.source_def = source_def
|
| 23 |
+
|
| 24 |
+
def ingest(self) -> Iterator[RawLexeme]:
|
| 25 |
+
path = Path(self.source_def.path)
|
| 26 |
+
if self.source_def.format == SourceFormat.NDJSON:
|
| 27 |
+
yield from self._ingest_ndjson(path)
|
| 28 |
+
else:
|
| 29 |
+
yield from self._ingest_json_array(path)
|
| 30 |
+
|
| 31 |
+
def _ingest_ndjson(self, path: Path) -> Iterator[RawLexeme]:
|
| 32 |
+
with path.open("rb") as fh:
|
| 33 |
+
for line_num, line in enumerate(fh, start=1):
|
| 34 |
+
line = line.strip()
|
| 35 |
+
if not line:
|
| 36 |
+
continue
|
| 37 |
+
try:
|
| 38 |
+
obj = orjson.loads(line)
|
| 39 |
+
except orjson.JSONDecodeError:
|
| 40 |
+
logger.warning("Skipping invalid JSON at line %d", line_num)
|
| 41 |
+
continue
|
| 42 |
+
lexeme = self._map_object(obj, line_num)
|
| 43 |
+
if lexeme:
|
| 44 |
+
yield lexeme
|
| 45 |
+
|
| 46 |
+
def _ingest_json_array(self, path: Path) -> Iterator[RawLexeme]:
|
| 47 |
+
data = orjson.loads(path.read_bytes())
|
| 48 |
+
if isinstance(data, list):
|
| 49 |
+
items = data
|
| 50 |
+
elif isinstance(data, dict):
|
| 51 |
+
# Try to find a list field
|
| 52 |
+
for key in ("forms", "data", "entries", "items", "results"):
|
| 53 |
+
if key in data and isinstance(data[key], list):
|
| 54 |
+
items = data[key]
|
| 55 |
+
break
|
| 56 |
+
else:
|
| 57 |
+
items = [data]
|
| 58 |
+
else:
|
| 59 |
+
return
|
| 60 |
+
|
| 61 |
+
for idx, obj in enumerate(items, start=1):
|
| 62 |
+
lexeme = self._map_object(obj, idx)
|
| 63 |
+
if lexeme:
|
| 64 |
+
yield lexeme
|
| 65 |
+
|
| 66 |
+
def _map_object(self, obj: dict, idx: int) -> RawLexeme | None:
|
| 67 |
+
mapping = self.source_def.column_mapping
|
| 68 |
+
form = self._get_nested(obj, mapping.form) or ""
|
| 69 |
+
if not form or form == "_":
|
| 70 |
+
return None
|
| 71 |
+
|
| 72 |
+
language_id = self._get_nested(obj, mapping.language) or ""
|
| 73 |
+
concept_id = self._get_nested(obj, mapping.concept) or ""
|
| 74 |
+
glottocode = ""
|
| 75 |
+
if mapping.glottocode:
|
| 76 |
+
glottocode = self._get_nested(obj, mapping.glottocode) or ""
|
| 77 |
+
phonetic_raw = ""
|
| 78 |
+
transcription_type = TranscriptionType.UNKNOWN
|
| 79 |
+
if mapping.ipa:
|
| 80 |
+
phonetic_raw = self._get_nested(obj, mapping.ipa) or ""
|
| 81 |
+
if phonetic_raw:
|
| 82 |
+
transcription_type = TranscriptionType.IPA
|
| 83 |
+
|
| 84 |
+
return RawLexeme(
|
| 85 |
+
id=f"{self.source_def.name}_{idx}",
|
| 86 |
+
language_id=language_id,
|
| 87 |
+
glottocode=glottocode,
|
| 88 |
+
concept_id=concept_id,
|
| 89 |
+
form=form,
|
| 90 |
+
phonetic_raw=phonetic_raw,
|
| 91 |
+
transcription_type=transcription_type,
|
| 92 |
+
source_name=self.source_def.name,
|
| 93 |
+
provenance=ProvenanceRecord(
|
| 94 |
+
source_name=self.source_def.name,
|
| 95 |
+
source_format=self.source_def.format.value,
|
| 96 |
+
original_id=str(idx),
|
| 97 |
+
).add_step("ingest", {"index": idx}),
|
| 98 |
+
)
|
| 99 |
+
|
| 100 |
+
@staticmethod
|
| 101 |
+
def _get_nested(obj: dict, path: str) -> str:
|
| 102 |
+
"""Resolve a dot-separated path in a nested dict."""
|
| 103 |
+
if not path:
|
| 104 |
+
return ""
|
| 105 |
+
parts = path.split(".")
|
| 106 |
+
current = obj
|
| 107 |
+
for part in parts:
|
| 108 |
+
if isinstance(current, dict):
|
| 109 |
+
current = current.get(part)
|
| 110 |
+
else:
|
| 111 |
+
return ""
|
| 112 |
+
if current is None:
|
| 113 |
+
return ""
|
| 114 |
+
return str(current)
|
cognate_pipeline/src/cognate_pipeline/ingest/language_map.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:891e2f0de3e780027fb9f9fe77d4ecc4301978f80bcf3fef00cd71c2eb13b292
|
| 3 |
+
size 7920
|
cognate_pipeline/src/cognate_pipeline/ingest/language_resolver.py
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Resolve language identifiers to Glottocodes."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import json
|
| 6 |
+
import logging
|
| 7 |
+
from pathlib import Path
|
| 8 |
+
from typing import Any
|
| 9 |
+
|
| 10 |
+
from cognate_pipeline.utils.glottolog import GlottologTree
|
| 11 |
+
|
| 12 |
+
logger = logging.getLogger(__name__)
|
| 13 |
+
|
| 14 |
+
# Load language map from JSON (generated by scripts/convert_cldf_to_tsv.py).
|
| 15 |
+
# Falls back to a minimal hardcoded map if the JSON is missing.
|
| 16 |
+
_LANGUAGE_MAP_PATH = Path(__file__).parent / "language_map.json"
|
| 17 |
+
|
| 18 |
+
_HARDCODED_FALLBACK: dict[str, str] = {
|
| 19 |
+
"uga": "ugar1238", "heb": "hebr1245", "got": "goth1244", "xib": "iber1250",
|
| 20 |
+
"akk": "akka1240", "sux": "sume1241", "lat": "lati1261", "grc": "anci1242",
|
| 21 |
+
"arc": "offi1241", "egy": "egyp1253", "hit": "hitt1242", "phn": "phoe1239",
|
| 22 |
+
"syc": "clas1252", "eus": "basq1248",
|
| 23 |
+
"ang": "olde1238", "non": "oldn1244", "goh": "oldh1241",
|
| 24 |
+
"sga": "oldi1245", "cym": "wels1247", "bre": "bret1244",
|
| 25 |
+
"lit": "lith1251", "chu": "chur1257", "rus": "russ1263",
|
| 26 |
+
"san": "sans1269", "ave": "aves1237", "fas": "west2369",
|
| 27 |
+
"osc": "osca1245", "xum": "umbr1253", "gmy": "myce1241",
|
| 28 |
+
"arb": "stan1318", "amh": "amha1245",
|
| 29 |
+
"otk": "oldt1247", "tur": "nucl1301", "aze": "nort2697",
|
| 30 |
+
"fin": "finn1318", "hun": "hung1274", "est": "esto1258",
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
if _LANGUAGE_MAP_PATH.exists():
|
| 34 |
+
with open(_LANGUAGE_MAP_PATH, encoding="utf-8") as _f:
|
| 35 |
+
_HARDCODED: dict[str, str] = json.load(_f)
|
| 36 |
+
logger.debug("Loaded %d language mappings from %s", len(_HARDCODED), _LANGUAGE_MAP_PATH)
|
| 37 |
+
else:
|
| 38 |
+
_HARDCODED = _HARDCODED_FALLBACK
|
| 39 |
+
logger.debug("Using fallback language map (%d entries)", len(_HARDCODED))
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
class LanguageResolver:
|
| 43 |
+
"""Resolve language identifiers to Glottocodes.
|
| 44 |
+
|
| 45 |
+
Resolution chain:
|
| 46 |
+
1. Direct Glottocode (4-char alpha + 4-digit pattern)
|
| 47 |
+
2. Hardcoded ancient language mappings
|
| 48 |
+
3. Glottolog tree lookup (by code, ISO-639-3, or name)
|
| 49 |
+
"""
|
| 50 |
+
|
| 51 |
+
def __init__(self, glottolog_tree: GlottologTree | None = None) -> None:
|
| 52 |
+
self._tree = glottolog_tree
|
| 53 |
+
|
| 54 |
+
def resolve(self, identifier: str) -> str:
|
| 55 |
+
"""Return a Glottocode for the given identifier, or '' if unresolved."""
|
| 56 |
+
identifier = identifier.strip()
|
| 57 |
+
if not identifier:
|
| 58 |
+
return ""
|
| 59 |
+
|
| 60 |
+
# Check if it's already a Glottocode pattern (xxxx1234)
|
| 61 |
+
if len(identifier) == 8 and identifier[:4].isalpha() and identifier[4:].isdigit():
|
| 62 |
+
return identifier
|
| 63 |
+
|
| 64 |
+
# Hardcoded mappings
|
| 65 |
+
if identifier.lower() in _HARDCODED:
|
| 66 |
+
return _HARDCODED[identifier.lower()]
|
| 67 |
+
|
| 68 |
+
# Glottolog tree lookup
|
| 69 |
+
if self._tree is not None:
|
| 70 |
+
lang = self._tree.lookup(identifier)
|
| 71 |
+
if lang is not None:
|
| 72 |
+
return lang.glottocode
|
| 73 |
+
|
| 74 |
+
logger.debug("Could not resolve language identifier: %s", identifier)
|
| 75 |
+
return ""
|
cognate_pipeline/src/cognate_pipeline/ingest/models.py
ADDED
|
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Data models for the ingestion layer."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
from dataclasses import dataclass, field
|
| 6 |
+
from typing import Any
|
| 7 |
+
|
| 8 |
+
from cognate_pipeline.provenance.tracker import ProvenanceRecord
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class TranscriptionType:
|
| 12 |
+
"""Constants for the type of phonetic representation stored."""
|
| 13 |
+
|
| 14 |
+
IPA = "ipa" # True IPA transcription
|
| 15 |
+
TRANSLITERATION = "transliteration" # Script transliteration (e.g. Ugaritic consonantal)
|
| 16 |
+
ORTHOGRAPHIC = "orthographic" # Standard orthography (e.g. Gothic Latin letters)
|
| 17 |
+
UNKNOWN = "unknown"
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
@dataclass
|
| 21 |
+
class RawLexeme:
|
| 22 |
+
"""A single lexical form as ingested from a source."""
|
| 23 |
+
|
| 24 |
+
id: str
|
| 25 |
+
language_id: str
|
| 26 |
+
glottocode: str
|
| 27 |
+
concept_id: str
|
| 28 |
+
form: str
|
| 29 |
+
phonetic_raw: str = ""
|
| 30 |
+
transcription_type: str = TranscriptionType.UNKNOWN
|
| 31 |
+
alternatives: list[str] = field(default_factory=list)
|
| 32 |
+
source_name: str = ""
|
| 33 |
+
provenance: ProvenanceRecord | None = None
|
| 34 |
+
extra: dict[str, Any] = field(default_factory=dict)
|
| 35 |
+
|
| 36 |
+
# Backward-compatible alias
|
| 37 |
+
@property
|
| 38 |
+
def ipa_raw(self) -> str:
|
| 39 |
+
return self.phonetic_raw
|
| 40 |
+
|
| 41 |
+
def to_dict(self) -> dict[str, Any]:
|
| 42 |
+
return {
|
| 43 |
+
"id": self.id,
|
| 44 |
+
"language_id": self.language_id,
|
| 45 |
+
"glottocode": self.glottocode,
|
| 46 |
+
"concept_id": self.concept_id,
|
| 47 |
+
"form": self.form,
|
| 48 |
+
"phonetic_raw": self.phonetic_raw,
|
| 49 |
+
"transcription_type": self.transcription_type,
|
| 50 |
+
"alternatives": self.alternatives,
|
| 51 |
+
"source_name": self.source_name,
|
| 52 |
+
"provenance": self.provenance.to_dict() if self.provenance else None,
|
| 53 |
+
"extra": self.extra,
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
@classmethod
|
| 57 |
+
def from_dict(cls, d: dict[str, Any]) -> RawLexeme:
|
| 58 |
+
prov = d.get("provenance")
|
| 59 |
+
return cls(
|
| 60 |
+
id=d["id"],
|
| 61 |
+
language_id=d["language_id"],
|
| 62 |
+
glottocode=d.get("glottocode", ""),
|
| 63 |
+
concept_id=d.get("concept_id", ""),
|
| 64 |
+
form=d["form"],
|
| 65 |
+
phonetic_raw=d.get("phonetic_raw", d.get("ipa_raw", "")),
|
| 66 |
+
transcription_type=d.get("transcription_type", TranscriptionType.UNKNOWN),
|
| 67 |
+
alternatives=d.get("alternatives", []),
|
| 68 |
+
source_name=d.get("source_name", ""),
|
| 69 |
+
provenance=ProvenanceRecord.from_dict(prov) if prov else None,
|
| 70 |
+
extra=d.get("extra", {}),
|
| 71 |
+
)
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
@dataclass
|
| 75 |
+
class RawNameForm:
|
| 76 |
+
"""A named entity form (place name, personal name, etc.)."""
|
| 77 |
+
|
| 78 |
+
id: str
|
| 79 |
+
entity_type: str
|
| 80 |
+
language_id: str
|
| 81 |
+
glottocode: str
|
| 82 |
+
name_string: str
|
| 83 |
+
ipa_raw: str = ""
|
| 84 |
+
source_name: str = ""
|
| 85 |
+
external_ids: dict[str, str] = field(default_factory=dict)
|
| 86 |
+
latitude: float | None = None
|
| 87 |
+
longitude: float | None = None
|
| 88 |
+
provenance: ProvenanceRecord | None = None
|
| 89 |
+
extra: dict[str, Any] = field(default_factory=dict)
|
| 90 |
+
|
| 91 |
+
def to_dict(self) -> dict[str, Any]:
|
| 92 |
+
return {
|
| 93 |
+
"id": self.id,
|
| 94 |
+
"entity_type": self.entity_type,
|
| 95 |
+
"language_id": self.language_id,
|
| 96 |
+
"glottocode": self.glottocode,
|
| 97 |
+
"name_string": self.name_string,
|
| 98 |
+
"ipa_raw": self.ipa_raw,
|
| 99 |
+
"source_name": self.source_name,
|
| 100 |
+
"external_ids": self.external_ids,
|
| 101 |
+
"latitude": self.latitude,
|
| 102 |
+
"longitude": self.longitude,
|
| 103 |
+
"provenance": self.provenance.to_dict() if self.provenance else None,
|
| 104 |
+
"extra": self.extra,
|
| 105 |
+
}
|
| 106 |
+
|
| 107 |
+
@classmethod
|
| 108 |
+
def from_dict(cls, d: dict[str, Any]) -> RawNameForm:
|
| 109 |
+
prov = d.get("provenance")
|
| 110 |
+
return cls(
|
| 111 |
+
id=d["id"],
|
| 112 |
+
entity_type=d["entity_type"],
|
| 113 |
+
language_id=d["language_id"],
|
| 114 |
+
glottocode=d.get("glottocode", ""),
|
| 115 |
+
name_string=d["name_string"],
|
| 116 |
+
ipa_raw=d.get("ipa_raw", ""),
|
| 117 |
+
source_name=d.get("source_name", ""),
|
| 118 |
+
external_ids=d.get("external_ids", {}),
|
| 119 |
+
latitude=d.get("latitude"),
|
| 120 |
+
longitude=d.get("longitude"),
|
| 121 |
+
provenance=ProvenanceRecord.from_dict(prov) if prov else None,
|
| 122 |
+
extra=d.get("extra", {}),
|
| 123 |
+
)
|
cognate_pipeline/src/cognate_pipeline/ingest/wiktionary_ingester.py
ADDED
|
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Wiktextract JSONL ingester."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import logging
|
| 6 |
+
from collections.abc import Iterator
|
| 7 |
+
from pathlib import Path
|
| 8 |
+
|
| 9 |
+
import orjson
|
| 10 |
+
|
| 11 |
+
from cognate_pipeline.config.schema import SourceDef
|
| 12 |
+
from cognate_pipeline.ingest.models import RawLexeme, TranscriptionType
|
| 13 |
+
from cognate_pipeline.provenance.tracker import ProvenanceRecord
|
| 14 |
+
|
| 15 |
+
logger = logging.getLogger(__name__)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class WiktionaryIngester:
|
| 19 |
+
"""Ingests Wiktextract JSONL exports.
|
| 20 |
+
|
| 21 |
+
Each line is a JSON object with at minimum:
|
| 22 |
+
- word: the headword
|
| 23 |
+
- lang: language name
|
| 24 |
+
- lang_code: Wiktionary language code
|
| 25 |
+
- pronunciations: list of {ipa: "...", ...}
|
| 26 |
+
- senses: list of {glosses: [...], ...}
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
def __init__(self, source_def: SourceDef) -> None:
|
| 30 |
+
self.source_def = source_def
|
| 31 |
+
|
| 32 |
+
def ingest(self) -> Iterator[RawLexeme]:
|
| 33 |
+
path = Path(self.source_def.path)
|
| 34 |
+
with path.open("rb") as fh:
|
| 35 |
+
for line_num, line in enumerate(fh, start=1):
|
| 36 |
+
line = line.strip()
|
| 37 |
+
if not line:
|
| 38 |
+
continue
|
| 39 |
+
try:
|
| 40 |
+
entry = orjson.loads(line)
|
| 41 |
+
except orjson.JSONDecodeError:
|
| 42 |
+
logger.warning("Skipping invalid JSON at line %d", line_num)
|
| 43 |
+
continue
|
| 44 |
+
|
| 45 |
+
word = entry.get("word", "").strip()
|
| 46 |
+
if not word:
|
| 47 |
+
continue
|
| 48 |
+
|
| 49 |
+
lang = entry.get("lang", "")
|
| 50 |
+
lang_code = entry.get("lang_code", "")
|
| 51 |
+
|
| 52 |
+
# Extract IPA from pronunciations (Wiktionary provides true IPA)
|
| 53 |
+
phonetic_raw = ""
|
| 54 |
+
transcription_type = TranscriptionType.ORTHOGRAPHIC
|
| 55 |
+
pronunciations = entry.get("pronunciations", entry.get("sounds", []))
|
| 56 |
+
if isinstance(pronunciations, list):
|
| 57 |
+
for pron in pronunciations:
|
| 58 |
+
if isinstance(pron, dict) and pron.get("ipa"):
|
| 59 |
+
phonetic_raw = pron["ipa"]
|
| 60 |
+
transcription_type = TranscriptionType.IPA
|
| 61 |
+
break
|
| 62 |
+
|
| 63 |
+
# Extract concept from first sense gloss
|
| 64 |
+
concept_id = ""
|
| 65 |
+
senses = entry.get("senses", [])
|
| 66 |
+
if senses and isinstance(senses, list):
|
| 67 |
+
first_sense = senses[0]
|
| 68 |
+
if isinstance(first_sense, dict):
|
| 69 |
+
glosses = first_sense.get("glosses", [])
|
| 70 |
+
if glosses:
|
| 71 |
+
concept_id = glosses[0]
|
| 72 |
+
|
| 73 |
+
# Extract etymology info
|
| 74 |
+
etymology = entry.get("etymology_text", "")
|
| 75 |
+
|
| 76 |
+
yield RawLexeme(
|
| 77 |
+
id=f"{self.source_def.name}_{line_num}",
|
| 78 |
+
language_id=lang_code or lang,
|
| 79 |
+
glottocode="",
|
| 80 |
+
concept_id=concept_id,
|
| 81 |
+
form=word,
|
| 82 |
+
phonetic_raw=phonetic_raw,
|
| 83 |
+
transcription_type=transcription_type,
|
| 84 |
+
source_name=self.source_def.name,
|
| 85 |
+
provenance=ProvenanceRecord(
|
| 86 |
+
source_name=self.source_def.name,
|
| 87 |
+
source_format="wiktionary",
|
| 88 |
+
original_id=f"line_{line_num}",
|
| 89 |
+
).add_step(
|
| 90 |
+
"ingest",
|
| 91 |
+
{"file": path.name, "line": line_num},
|
| 92 |
+
),
|
| 93 |
+
extra={"etymology": etymology} if etymology else {},
|
| 94 |
+
)
|
cognate_pipeline/src/cognate_pipeline/normalise/__init__.py
ADDED
|
File without changes
|
cognate_pipeline/src/cognate_pipeline/normalise/epitran_backend.py
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Epitran wrapper for grapheme-to-phoneme conversion."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import logging
|
| 6 |
+
from typing import Any
|
| 7 |
+
|
| 8 |
+
logger = logging.getLogger(__name__)
|
| 9 |
+
|
| 10 |
+
# Cache of Epitran instances by language code
|
| 11 |
+
_EPITRAN_CACHE: dict[str, Any] = {}
|
| 12 |
+
|
| 13 |
+
# Mapping from Glottocode/ISO to Epitran language codes
|
| 14 |
+
_LANG_TO_EPITRAN: dict[str, str] = {
|
| 15 |
+
"eng": "eng-Latn",
|
| 16 |
+
"deu": "deu-Latn",
|
| 17 |
+
"fra": "fra-Latn",
|
| 18 |
+
"spa": "spa-Latn",
|
| 19 |
+
"ita": "ita-Latn",
|
| 20 |
+
"por": "por-Latn",
|
| 21 |
+
"tur": "tur-Latn",
|
| 22 |
+
"ara": "ara-Arab",
|
| 23 |
+
"hin": "hin-Deva",
|
| 24 |
+
"rus": "rus-Cyrl",
|
| 25 |
+
}
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def _get_epitran(lang_code: str) -> Any | None:
|
| 29 |
+
"""Get or create an Epitran instance for the given language code."""
|
| 30 |
+
if lang_code in _EPITRAN_CACHE:
|
| 31 |
+
return _EPITRAN_CACHE[lang_code]
|
| 32 |
+
|
| 33 |
+
try:
|
| 34 |
+
import epitran
|
| 35 |
+
except ImportError:
|
| 36 |
+
logger.debug("epitran not installed")
|
| 37 |
+
return None
|
| 38 |
+
|
| 39 |
+
# Resolve to Epitran-format code
|
| 40 |
+
epitran_code = _LANG_TO_EPITRAN.get(lang_code, lang_code)
|
| 41 |
+
try:
|
| 42 |
+
epi = epitran.Epitran(epitran_code)
|
| 43 |
+
_EPITRAN_CACHE[lang_code] = epi
|
| 44 |
+
return epi
|
| 45 |
+
except Exception as exc:
|
| 46 |
+
logger.debug("Epitran failed for %s: %s", epitran_code, exc)
|
| 47 |
+
return None
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def transliterate(text: str, lang_code: str) -> str | None:
|
| 51 |
+
"""Convert orthographic text to IPA using Epitran.
|
| 52 |
+
|
| 53 |
+
Returns None if Epitran is unavailable or the language is unsupported.
|
| 54 |
+
"""
|
| 55 |
+
epi = _get_epitran(lang_code)
|
| 56 |
+
if epi is None:
|
| 57 |
+
return None
|
| 58 |
+
try:
|
| 59 |
+
return epi.transliterate(text)
|
| 60 |
+
except Exception as exc:
|
| 61 |
+
logger.warning("Epitran transliteration failed for '%s' (%s): %s", text, lang_code, exc)
|
| 62 |
+
return None
|