|
|
| """Extract IE-CoR (Indo-European Cognate Relationships) cognate pairs.
|
|
|
| Reads sources/iecor/cldf/cognates.csv + forms.csv + languages.csv.
|
| Standard CLDF CognateTable format.
|
|
|
| Output: staging/cognate_pairs/iecor_cognate_pairs.tsv (14-column schema)
|
| """
|
|
|
| from __future__ import annotations
|
|
|
| import csv
|
| import io
|
| import sys
|
| from collections import defaultdict
|
| from itertools import combinations
|
| from pathlib import Path
|
|
|
| sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding="utf-8")
|
| sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding="utf-8")
|
|
|
| ROOT = Path(__file__).resolve().parent.parent
|
| sys.path.insert(0, str(ROOT / "cognate_pipeline" / "src"))
|
| sys.path.insert(0, str(ROOT / "scripts"))
|
|
|
| from cognate_pipeline.normalise.sound_class import ipa_to_sound_class
|
|
|
| SOURCES_DIR = ROOT / "sources" / "iecor" / "cldf"
|
| STAGING_DIR = ROOT / "staging" / "cognate_pairs"
|
| STAGING_DIR.mkdir(parents=True, exist_ok=True)
|
|
|
| HEADER = (
|
| "Lang_A\tWord_A\tIPA_A\tLang_B\tWord_B\tIPA_B\tConcept_ID\t"
|
| "Relationship\tScore\tSource\tRelation_Detail\tDonor_Language\t"
|
| "Confidence\tSource_Record_ID\n"
|
| )
|
|
|
|
|
| def sca_similarity(ipa_a: str, ipa_b: str) -> float:
|
| """Compute normalised Levenshtein similarity on SCA strings."""
|
| try:
|
| sca_a = ipa_to_sound_class(ipa_a)
|
| sca_b = ipa_to_sound_class(ipa_b)
|
| except Exception:
|
| return 0.0
|
| if not sca_a or not sca_b:
|
| return 0.0
|
| m, n = len(sca_a), len(sca_b)
|
| if m == 0 or n == 0:
|
| return 0.0
|
| dp = list(range(n + 1))
|
| for i in range(1, m + 1):
|
| prev = dp[0]
|
| dp[0] = i
|
| for j in range(1, n + 1):
|
| temp = dp[j]
|
| if sca_a[i - 1] == sca_b[j - 1]:
|
| dp[j] = prev
|
| else:
|
| dp[j] = 1 + min(prev, dp[j], dp[j - 1])
|
| prev = temp
|
| dist = dp[n]
|
| return round(1.0 - dist / max(m, n), 4)
|
|
|
|
|
| def main():
|
| print("=" * 60)
|
| print("IE-CoR Cognate Extraction")
|
| print("=" * 60)
|
|
|
|
|
| lang_path = SOURCES_DIR / "languages.csv"
|
| lang_iso = {}
|
| with open(lang_path, "r", encoding="utf-8") as f:
|
| reader = csv.DictReader(f)
|
| for row in reader:
|
| lid = row["ID"]
|
| iso = row.get("ISO639P3code", "").strip()
|
| if iso:
|
| lang_iso[lid] = iso
|
| print(f" Languages with ISO codes: {len(lang_iso)}")
|
|
|
|
|
| params_path = SOURCES_DIR / "parameters.csv"
|
| param_concept = {}
|
| if params_path.exists():
|
| with open(params_path, "r", encoding="utf-8") as f:
|
| reader = csv.DictReader(f)
|
| for row in reader:
|
| pid = row["ID"]
|
| concept = row.get("Concepticon_Gloss", row.get("Name", pid)).strip()
|
| if concept:
|
| param_concept[pid] = concept
|
|
|
|
|
| forms_path = SOURCES_DIR / "forms.csv"
|
| forms = {}
|
| with open(forms_path, "r", encoding="utf-8") as f:
|
| reader = csv.DictReader(f)
|
| for row in reader:
|
| fid = row["ID"]
|
| lid = str(row["Language_ID"])
|
| iso = lang_iso.get(lid, "")
|
| if not iso:
|
| continue
|
| form = row.get("Form", row.get("Value", "")).strip()
|
| if not form:
|
| continue
|
|
|
| ipa = row.get("phon_form", "").strip()
|
| if not ipa:
|
| ipa = row.get("Phonemic", "").strip()
|
| if not ipa:
|
| ipa = form.lower()
|
| param_id = row.get("Parameter_ID", "").strip()
|
| concept = param_concept.get(param_id, param_id)
|
| forms[fid] = {
|
| "iso": iso,
|
| "word": form,
|
| "ipa": ipa,
|
| "concept": concept,
|
| }
|
| print(f" Forms loaded: {len(forms)}")
|
|
|
|
|
| loans_path = SOURCES_DIR / "loans.csv"
|
| loan_cogset_ids: set[str] = set()
|
| if loans_path.exists():
|
| with open(loans_path, "r", encoding="utf-8") as f:
|
| reader = csv.DictReader(f)
|
| for row in reader:
|
| cid = row.get("Cognateset_ID", "").strip()
|
| if cid:
|
| loan_cogset_ids.add(cid)
|
| print(f" Loan-involved cognate sets: {len(loan_cogset_ids)}")
|
|
|
|
|
| cognates_path = SOURCES_DIR / "cognates.csv"
|
| cogsets: dict[str, list[dict]] = defaultdict(list)
|
| doubt_count = 0
|
| total_rows = 0
|
| with open(cognates_path, "r", encoding="utf-8") as f:
|
| reader = csv.DictReader(f)
|
| for row in reader:
|
| total_rows += 1
|
| form_id = row["Form_ID"]
|
| cogset_id = row["Cognateset_ID"]
|
| doubt = row.get("Doubt", "false").strip().lower() == "true"
|
| if doubt:
|
| doubt_count += 1
|
| form_data = forms.get(form_id)
|
| if form_data is None:
|
| continue
|
| cogsets[cogset_id].append({
|
| **form_data,
|
| "doubt": doubt,
|
| "cogset_id": cogset_id,
|
| })
|
| print(f" Cognate rows: {total_rows}")
|
| print(f" Doubtful: {doubt_count}")
|
| print(f" Cognate sets: {len(cogsets)}")
|
|
|
|
|
| output_path = STAGING_DIR / "iecor_cognate_pairs.tsv"
|
| pair_count = 0
|
| loan_pair_count = 0
|
| inherited_pair_count = 0
|
| with open(output_path, "w", encoding="utf-8") as out:
|
| out.write(HEADER)
|
| for cogset_id, members in cogsets.items():
|
| is_loan = cogset_id in loan_cogset_ids
|
| relation_detail = "loan_involved" if is_loan else "inherited"
|
| for a, b in combinations(members, 2):
|
| if a["iso"] == b["iso"]:
|
| continue
|
| score = sca_similarity(a["ipa"], b["ipa"])
|
| confidence = "doubtful" if (a["doubt"] or b["doubt"]) else "certain"
|
| out.write(
|
| f"{a['iso']}\t{a['word']}\t{a['ipa']}\t"
|
| f"{b['iso']}\t{b['word']}\t{b['ipa']}\t"
|
| f"{a['concept']}\texpert_cognate\t{score}\tiecor\t"
|
| f"{relation_detail}\t-\t{confidence}\t{cogset_id}\n"
|
| )
|
| pair_count += 1
|
| if is_loan:
|
| loan_pair_count += 1
|
| else:
|
| inherited_pair_count += 1
|
| if pair_count % 100000 == 0:
|
| print(f" ... {pair_count:,} pairs written")
|
|
|
| print(f"\n Total pairs: {pair_count:,}")
|
| print(f" Loan-involved pairs: {loan_pair_count:,}")
|
| print(f" Purely inherited pairs: {inherited_pair_count:,}")
|
| print(f" Output: {output_path}")
|
| print("=" * 60)
|
|
|
|
|
| if __name__ == "__main__":
|
| main()
|
|
|