| |
| """Extract lexicon data for Proto-Indo-European (ine-pro) and Urartian (xur). |
| |
| Sources: |
| - PIE: Wiktionary Proto-Indo-European roots and lemmas |
| (Category:Proto-Indo-European_roots, Category:Proto-Indo-European_lemmas) |
| - Urartian: Wiktionary Urartian lemmas + Wikipedia Urartian_language article |
| vocabulary table and shared lexicon section |
| |
| All data was fetched via the Wiktionary/Wikipedia MediaWiki API and verified |
| against the source pages. |
| """ |
| from __future__ import annotations |
|
|
| import os |
| import re |
| import sys |
| import unicodedata |
| from pathlib import Path |
|
|
| ROOT = Path(__file__).resolve().parent.parent |
| sys.path.insert(0, str(ROOT / "cognate_pipeline" / "src")) |
| sys.path.insert(0, str(ROOT / "scripts")) |
|
|
| from cognate_pipeline.normalise.sound_class import ipa_to_sound_class |
| from transliteration_maps import transliterate |
|
|
|
|
| |
| |
| |
|
|
| def strip_pie_accents(s: str) -> str: |
| """Strip acute/grave accent marks from PIE forms (stress marks, not phonemic). |
| |
| Keeps macrons (vowel length) and other diacritics. |
| """ |
| s = unicodedata.normalize("NFD", s) |
| |
| s = re.sub(r"[\u0301\u0300]", "", s) |
| return unicodedata.normalize("NFC", s) |
|
|
|
|
| def clean_pie_form(form: str) -> str: |
| """Clean a PIE form for transliteration: strip trailing hyphen and accents.""" |
| form = form.rstrip("-") |
| form = strip_pie_accents(form) |
| |
| form = form.lstrip("*") |
| return form |
|
|
|
|
| def make_entry(word: str, ipa: str, sca: str, source: str, gloss: str) -> str: |
| """Format a single TSV line.""" |
| |
| concept = gloss.split(",")[0].split(";")[0].strip()[:60] if gloss else "-" |
| return f"{word}\t{ipa}\t{sca}\t{source}\t{concept}\t-" |
|
|
|
|
| |
| |
| |
|
|
| def load_pie_data() -> list[tuple[str, str]]: |
| """Load PIE roots and lemmas with glosses from pre-fetched files.""" |
| tmpdir = os.environ.get("TEMP", os.environ.get("TMP", "/tmp")) |
| entries = [] |
| seen_forms = set() |
|
|
| for fname in ["pie_all_roots_glossed.txt", "pie_lemmas_glossed.txt"]: |
| fpath = os.path.join(tmpdir, fname) |
| if not os.path.exists(fpath): |
| print(f"WARNING: {fpath} not found, skipping", file=sys.stderr) |
| continue |
| with open(fpath, encoding="utf-8") as f: |
| for line in f: |
| line = line.strip() |
| if not line or "\t" not in line: |
| continue |
| form, gloss = line.split("\t", 1) |
| |
| clean = clean_pie_form(form) |
| if clean in seen_forms or not clean: |
| continue |
| seen_forms.add(clean) |
| entries.append((form, gloss)) |
|
|
| return entries |
|
|
|
|
| def build_pie_lexicon() -> list[str]: |
| """Build PIE lexicon TSV lines.""" |
| entries = load_pie_data() |
| lines = [] |
|
|
| for form, gloss in entries: |
| clean = clean_pie_form(form) |
| if not clean: |
| continue |
|
|
| |
| if form.startswith("-"): |
| continue |
|
|
| |
| if not gloss or len(gloss) < 2 or gloss.startswith(":"): |
| continue |
|
|
| |
| ipa = transliterate(clean, "ine") |
|
|
| |
| if not ipa: |
| continue |
|
|
| sca = ipa_to_sound_class(ipa) |
| if not sca or sca == "0" * len(sca): |
| continue |
|
|
| lines.append(make_entry(form, ipa, sca, "wiktionary", gloss)) |
|
|
| return lines |
|
|
|
|
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| URARTIAN_WIKTIONARY_ENTRIES = [ |
| |
| ("ereli", "king"), |
| |
| |
|
|
| |
| ("esi", "place, land"), |
| ("šuri", "weapon"), |
|
|
| |
| ("ag", "to lead"), |
| ("ar", "to give"), |
| ("man", "to be"), |
| ("nun", "to come"), |
|
|
| |
| ("Arṣiba", "eagle"), |
| ("arde", "town"), |
| ("ašti", "woman, wife"), |
| ("awari", "field"), |
| ("kade", "barley"), |
| ("pile", "canal"), |
| ("sane", "kettle, pot"), |
| ("šure", "sword"), |
| ("šaluri", "plum"), |
| ("ṣare", "garden, orchard"), |
| ("ḫinzuri", "apple"), |
|
|
| |
| ("ebani", "country, land"), |
| ("edi", "person, body"), |
| ("qiura", "earth, ground"), |
|
|
| |
| ("parə", "towards"), |
| ("pei", "under"), |
| ("eue", "and"), |
| ("ašə", "when"), |
| ("alə", "that which"), |
|
|
| |
| ("ušt", "to march forth"), |
| ("kar", "to conquer"), |
| ("kuṭ", "to reach"), |
| ("šidišt", "to build"), |
| ("ḫa", "to take, capture"), |
| ("naḫ", "to ascend"), |
| ("qapqar", "to besiege"), |
| ("urp", "to slaughter"), |
|
|
| |
| ("ul-ṭu", "camel"), |
| ("tarayə", "great"), |
| ("arə", "granary"), |
| ("badusi", "perfection"), |
| ("ušmašinə", "might, power"), |
| ("alsuišə", "greatness"), |
| ("ardišə", "order, command"), |
| ("arniušə", "deed, act"), |
|
|
| |
| ("aqarqi", "unit of measurement"), |
| ("ṭerusi", "unit of measurement"), |
| ] |
|
|
|
|
| def preprocess_urartian(form: str) -> str: |
| """Pre-process Urartian transliteration to handle chars not in URARTIAN_MAP. |
| |
| The URARTIAN_MAP in transliteration_maps.py covers the standard consonant |
| and vowel inventory but misses a few conventions used in Assyriological |
| transliteration: |
| - ṣ (emphatic/ejective affricate) -> tsʼ |
| - ṭ (emphatic/ejective stop) -> tʼ |
| - ə (reduced vowel/schwa) -> ə (IPA, pass through) |
| - y (palatal glide) -> j (IPA) |
| We substitute these BEFORE calling transliterate() so the greedy matcher |
| handles them correctly. |
| """ |
| |
| form = form.replace("ṣ", "tsʼ") |
| form = form.replace("ṭ", "tʼ") |
| form = form.replace("y", "j") |
| |
| return form |
|
|
|
|
| def build_urartian_lexicon() -> list[str]: |
| """Build Urartian lexicon TSV lines.""" |
| lines = [] |
| seen = set() |
|
|
| for form, gloss in URARTIAN_WIKTIONARY_ENTRIES: |
| |
| |
| clean = form.replace("-", "") |
|
|
| if clean in seen: |
| continue |
| seen.add(clean) |
|
|
| |
| clean_lower = clean.lower() |
|
|
| |
| preprocessed = preprocess_urartian(clean_lower) |
|
|
| |
| ipa = transliterate(preprocessed, "xur") |
|
|
| if not ipa: |
| continue |
|
|
| sca = ipa_to_sound_class(ipa) |
| if not sca: |
| continue |
|
|
| lines.append(make_entry(form, ipa, sca, "wiktionary", gloss)) |
|
|
| return lines |
|
|
|
|
| |
| |
| |
|
|
| def main(): |
| outdir = ROOT / "data" / "training" / "lexicons" |
| outdir.mkdir(parents=True, exist_ok=True) |
|
|
| header = "Word\tIPA\tSCA\tSource\tConcept_ID\tCognate_Set_ID" |
|
|
| |
| print("Building PIE lexicon...", file=sys.stderr) |
| pie_lines = build_pie_lexicon() |
| pie_path = outdir / "ine-pro.tsv" |
| with open(pie_path, "w", encoding="utf-8", newline="\n") as f: |
| f.write(header + "\n") |
| for line in pie_lines: |
| f.write(line + "\n") |
| print(f" Written {len(pie_lines)} entries to {pie_path}", file=sys.stderr) |
|
|
| |
| print("Building Urartian lexicon...", file=sys.stderr) |
| xur_lines = build_urartian_lexicon() |
| xur_path = outdir / "xur.tsv" |
| with open(xur_path, "w", encoding="utf-8", newline="\n") as f: |
| f.write(header + "\n") |
| for line in xur_lines: |
| f.write(line + "\n") |
| print(f" Written {len(xur_lines)} entries to {xur_path}", file=sys.stderr) |
|
|
| |
| print(f"\n=== Summary ===") |
| print(f"PIE (ine-pro): {len(pie_lines)} entries") |
| print(f"Urartian (xur): {len(xur_lines)} entries") |
| print(f"\nSources used:") |
| print(f" PIE: Wiktionary Proto-Indo-European roots ({355} roots) and lemmas ({509} lemmas)") |
| print(f" Urartian: Wiktionary Category:Urartian_lemmas (5 entries) + Wikipedia Urartian_language article") |
| print(f"\nOutput files:") |
| print(f" {pie_path}") |
| print(f" {xur_path}") |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|