| |
| """Cross-reference lexicon entries against their CLDF source data. |
| |
| For each source (northeuralex, wold, sinotibetan), loads the source data, |
| then checks EVERY lexicon TSV entry attributed to that source. |
| |
| Reports: |
| - Total entries checked, verified, not found per source |
| - Per-language match rates |
| - All languages with >5% mismatch rate with specific mismatched words |
| """ |
|
|
| from __future__ import annotations |
|
|
| import csv |
| import io |
| import os |
| import sys |
| import unicodedata |
| from collections import defaultdict |
| from pathlib import Path |
|
|
| |
| sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding="utf-8", errors="replace") |
| sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding="utf-8", errors="replace") |
|
|
| BASE = Path(r"C:\Users\alvin\hf-ancient-scripts") |
| SOURCES = BASE / "sources" |
| LEXICONS = BASE / "data" / "training" / "lexicons" |
|
|
|
|
| |
|
|
| def read_cldf_csv(path: Path) -> list[dict[str, str]]: |
| if not path.exists(): |
| return [] |
| with open(path, encoding="utf-8", newline="") as f: |
| return list(csv.DictReader(f)) |
|
|
|
|
| def segments_to_ipa(segments: str) -> str: |
| if not segments or not segments.strip(): |
| return "" |
| parts = segments.split() |
| cleaned = [p for p in parts if p not in ("^", "$", "+", "#", "_")] |
| return "".join(cleaned) |
|
|
|
|
| def normalize_ipa(ipa: str) -> str: |
| ipa = unicodedata.normalize("NFC", ipa) |
| ipa = ipa.replace("\u02c8", "").replace("\u02cc", "") |
| ipa = ipa.replace(".", "") |
| return ipa.strip() |
|
|
|
|
| def strip_tone_marks(ipa: str) -> str: |
| """Aggressively strip combining tone/accent marks for fuzzy matching. |
| |
| The extraction pipeline may strip these marks during IPA normalization, |
| so we use this for secondary matching when exact match fails. |
| """ |
| nfd = unicodedata.normalize("NFD", ipa) |
| |
| |
| tone_marks = set("\u0300\u0301\u0302\u030C\u030B\u030F\u0303\u0304") |
| cleaned = "".join(c for c in nfd if c not in tone_marks) |
| return unicodedata.normalize("NFC", cleaned).strip() |
|
|
|
|
| |
|
|
| def build_northeuralex_lookup(): |
| """Build {iso_code: set((word, ipa))} from NorthEuraLex CLDF.""" |
| cldf_dir = SOURCES / "northeuralex" / "cldf" |
| if not cldf_dir.exists(): |
| print("ERROR: NorthEuraLex CLDF not found") |
| return {}, {}, {} |
|
|
| |
| lang_map = {} |
| for row in read_cldf_csv(cldf_dir / "languages.csv"): |
| nel_id = row["ID"] |
| iso = row.get("ISO639P3code", "") |
| if not iso: |
| iso = nel_id if len(nel_id) == 3 else "" |
| if iso: |
| lang_map[nel_id] = iso |
|
|
| |
| param_map = {} |
| for row in read_cldf_csv(cldf_dir / "parameters.csv"): |
| pid = row["ID"] |
| gloss = row.get("Concepticon_Gloss", row.get("Name", pid)) |
| param_map[pid] = gloss |
|
|
| |
| |
| |
| lookup: dict[str, set[tuple[str, str]]] = defaultdict(set) |
| word_lookup: dict[str, set[str]] = defaultdict(set) |
| ipa_lookup: dict[str, set[str]] = defaultdict(set) |
|
|
| for row in read_cldf_csv(cldf_dir / "forms.csv"): |
| lang_id = row.get("Language_ID", "") |
| segments = row.get("Segments", "") |
| param_id = row.get("Parameter_ID", "") |
| iso = lang_map.get(lang_id) |
| if not iso: |
| continue |
|
|
| ipa = segments_to_ipa(segments) |
| if not ipa: |
| continue |
| ipa_norm = normalize_ipa(ipa) |
| if not ipa_norm: |
| continue |
|
|
| value = row.get("Value", "") |
| form = row.get("Form", "") |
|
|
| |
| if value: |
| lookup[iso].add((value, ipa_norm)) |
| word_lookup[iso].add(value) |
| if form: |
| lookup[iso].add((form, ipa_norm)) |
| word_lookup[iso].add(form) |
| ipa_lookup[iso].add(ipa_norm) |
|
|
| n_entries = sum(len(v) for v in lookup.values()) |
| print(f" NorthEuraLex source: {n_entries:,} (word,ipa) pairs across {len(lookup)} languages") |
| return lookup, word_lookup, ipa_lookup |
|
|
|
|
| def build_wold_lookup(): |
| """Build {iso_code: set((word, ipa))} from WOLD CLDF.""" |
| cldf_dir = SOURCES / "wold" / "cldf" |
| if not cldf_dir.exists(): |
| print("ERROR: WOLD CLDF not found") |
| return {}, {}, {} |
|
|
| |
| wold_lang_map = {} |
| for row in read_cldf_csv(cldf_dir / "languages.csv"): |
| wold_id = row["ID"] |
| iso = row.get("ISO639P3code", "") |
| if iso: |
| wold_lang_map[wold_id] = iso |
|
|
| |
| |
| |
| lookup: dict[str, set[tuple[str, str]]] = defaultdict(set) |
| word_lookup: dict[str, set[str]] = defaultdict(set) |
| ipa_lookup: dict[str, set[str]] = defaultdict(set) |
|
|
| for row in read_cldf_csv(cldf_dir / "forms.csv"): |
| lang_id = row.get("Language_ID", "") |
| segments = row.get("Segments", "") |
| iso = wold_lang_map.get(lang_id) |
| if not iso: |
| continue |
|
|
| ipa = segments_to_ipa(segments) |
| if not ipa: |
| continue |
| ipa_norm = normalize_ipa(ipa) |
| if not ipa_norm: |
| continue |
|
|
| value = row.get("Value", "") |
| form = row.get("Form", "") |
|
|
| if value: |
| lookup[iso].add((value, ipa_norm)) |
| word_lookup[iso].add(value) |
| if form: |
| lookup[iso].add((form, ipa_norm)) |
| word_lookup[iso].add(form) |
| ipa_lookup[iso].add(ipa_norm) |
|
|
| n_entries = sum(len(v) for v in lookup.values()) |
| print(f" WOLD source: {n_entries:,} (word,ipa) pairs across {len(lookup)} languages") |
| return lookup, word_lookup, ipa_lookup |
|
|
|
|
| def build_sinotibetan_lookup(): |
| """Build {iso_code: set((concept, ipa))} from Sino-Tibetan dump.""" |
| dump_path = SOURCES / "sinotibetan" / "sinotibetan_dump.tsv" |
| if not dump_path.exists(): |
| dump_path = SOURCES / "sinotibetan" / "dumps" / "sinotibetan.tsv" |
| if not dump_path.exists(): |
| print("ERROR: Sino-Tibetan dump not found") |
| return {}, {}, {} |
|
|
| doculect_map = { |
| "Old_Chinese": "och", |
| "Japhug": "jya", |
| "Tibetan_Written": "bod", |
| "Old_Burmese": "obr", |
| "Jingpho": "kac", |
| "Lisu": "lis", |
| "Naxi": "nxq", |
| "Khaling": "klr", |
| "Limbu": "lif", |
| "Pumi_Lanping": "pmi", |
| "Qiang_Mawo": "qxs", |
| "Tujia": "tji", |
| "Dulong": "duu", |
| "Hakha": "cnh", |
| "Bai_Jianchuan": "bca", |
| } |
|
|
| lookup: dict[str, set[tuple[str, str]]] = defaultdict(set) |
| word_lookup: dict[str, set[str]] = defaultdict(set) |
| ipa_lookup: dict[str, set[str]] = defaultdict(set) |
|
|
| with open(dump_path, encoding="utf-8", newline="") as f: |
| reader = csv.DictReader(f, delimiter="\t") |
| for row in reader: |
| doculect = row.get("DOCULECT", "") |
| iso = doculect_map.get(doculect) |
| if not iso: |
| continue |
| concept = row.get("CONCEPT", "").strip() |
| ipa = row.get("IPA", "").strip() |
| if not ipa or not concept: |
| continue |
| ipa_norm = normalize_ipa(ipa) |
| if not ipa_norm: |
| continue |
| lookup[iso].add((concept, ipa_norm)) |
| word_lookup[iso].add(concept) |
| ipa_lookup[iso].add(ipa_norm) |
|
|
| n_entries = sum(len(v) for v in lookup.values()) |
| print(f" Sino-Tibetan source: {n_entries:,} (word,ipa) pairs across {len(lookup)} languages") |
| return lookup, word_lookup, ipa_lookup |
|
|
|
|
| |
|
|
| def load_lexicon_entries_by_source(source_name: str) -> dict[str, list[tuple[str, str, str]]]: |
| """Load {iso: [(word, ipa, concept_id), ...]} for entries from a given source.""" |
| result: dict[str, list[tuple[str, str, str]]] = defaultdict(list) |
| for fn in sorted(os.listdir(LEXICONS)): |
| if not fn.endswith(".tsv"): |
| continue |
| iso = fn[:-4] |
| fpath = LEXICONS / fn |
| with open(fpath, encoding="utf-8") as f: |
| header = f.readline().strip().split("\t") |
| if len(header) < 4: |
| continue |
| for line in f: |
| parts = line.rstrip("\n").split("\t") |
| if len(parts) < 4: |
| continue |
| word, ipa_col, sca, src = parts[0], parts[1], parts[2], parts[3] |
| concept_id = parts[4] if len(parts) > 4 else "-" |
| if src == source_name: |
| result[iso].append((word, ipa_col, concept_id)) |
| return result |
|
|
|
|
| |
|
|
| def verify_source( |
| source_name: str, |
| source_lookup: dict[str, set[tuple[str, str]]], |
| source_word_lookup: dict[str, set[str]], |
| source_ipa_lookup: dict[str, set[str]], |
| lexicon_entries: dict[str, list[tuple[str, str, str]]], |
| ): |
| """Verify all lexicon entries for a source. Returns stats dict.""" |
| total_checked = 0 |
| total_verified = 0 |
| total_fuzzy = 0 |
| total_not_found = 0 |
|
|
| lang_stats: dict[str, dict] = {} |
| high_mismatch_details: dict[str, list] = {} |
|
|
| |
| fuzzy_lookup: dict[str, set[tuple[str, str]]] = defaultdict(set) |
| for iso, pairs in source_lookup.items(): |
| for word, ipa in pairs: |
| fuzzy_lookup[iso].add((word, strip_tone_marks(ipa))) |
|
|
| for iso, entries in sorted(lexicon_entries.items()): |
| src_pairs = source_lookup.get(iso, set()) |
| src_words = source_word_lookup.get(iso, set()) |
| src_ipas = source_ipa_lookup.get(iso, set()) |
| fuzzy_pairs = fuzzy_lookup.get(iso, set()) |
|
|
| checked = 0 |
| verified = 0 |
| fuzzy_verified = 0 |
| word_only_match = 0 |
| ipa_only_match = 0 |
| both_match_not_paired = 0 |
| no_match = 0 |
| mismatched = [] |
|
|
| for word, ipa, concept in entries: |
| checked += 1 |
| total_checked += 1 |
|
|
| |
| if (word, ipa) in src_pairs: |
| verified += 1 |
| total_verified += 1 |
| |
| elif (word, strip_tone_marks(ipa)) in fuzzy_pairs: |
| fuzzy_verified += 1 |
| total_fuzzy += 1 |
| else: |
| |
| w_match = word in src_words |
| i_match = ipa in src_ipas |
| if w_match and i_match: |
| both_match_not_paired += 1 |
| elif w_match and not i_match: |
| word_only_match += 1 |
| elif i_match and not w_match: |
| ipa_only_match += 1 |
| else: |
| no_match += 1 |
|
|
| total_not_found += 1 |
| if len(mismatched) < 30: |
| mismatched.append((word, ipa, concept, w_match, i_match)) |
|
|
| if checked == 0: |
| continue |
|
|
| total_matched = verified + fuzzy_verified |
| match_rate = total_matched / checked * 100 |
| mismatch_rate = (checked - total_matched) / checked * 100 |
|
|
| lang_stats[iso] = { |
| "checked": checked, |
| "verified": verified, |
| "fuzzy": fuzzy_verified, |
| "total_matched": total_matched, |
| "word_only": word_only_match, |
| "ipa_only": ipa_only_match, |
| "both_not_paired": both_match_not_paired, |
| "no_match": no_match, |
| "match_rate": match_rate, |
| "mismatch_rate": mismatch_rate, |
| "in_source": iso in source_lookup, |
| } |
|
|
| if mismatch_rate > 5.0: |
| high_mismatch_details[iso] = mismatched |
|
|
| return { |
| "total_checked": total_checked, |
| "total_verified": total_verified, |
| "total_fuzzy": total_fuzzy, |
| "total_not_found": total_not_found, |
| "lang_stats": lang_stats, |
| "high_mismatch_details": high_mismatch_details, |
| } |
|
|
|
|
| def print_report(source_name: str, stats: dict): |
| """Print a detailed report for a source.""" |
| total = stats["total_checked"] |
| verified = stats["total_verified"] |
| fuzzy = stats.get("total_fuzzy", 0) |
| not_found = stats["total_not_found"] |
| total_matched = verified + fuzzy |
| match_pct = total_matched / total * 100 if total > 0 else 0 |
|
|
| print(f"\n{'=' * 100}") |
| print(f" SOURCE: {source_name.upper()}") |
| print(f"{'=' * 100}") |
| print(f" Total entries checked: {total:>8,}") |
| print(f" Verified (exact match): {verified:>8,} ({verified/total*100 if total else 0:.2f}%)") |
| print(f" Verified (fuzzy IPA match): {fuzzy:>8,} (tone marks stripped)") |
| print(f" Total verified: {total_matched:>8,} ({match_pct:.2f}%)") |
| print(f" Unverified: {not_found:>8,} ({100 - match_pct:.2f}%)") |
| print(f" Languages checked: {len(stats['lang_stats']):>8}") |
|
|
| |
| print(f"\n {'Lang':<8} {'Checked':>8} {'Exact':>8} {'Fuzzy':>6} {'Total%':>7} {'WdOnly':>7} {'IPAOnly':>7} {'BothNP':>7} {'None':>5} {'Src':>3}") |
| print(f" {'-'*8} {'-'*8} {'-'*8} {'-'*6} {'-'*7} {'-'*7} {'-'*7} {'-'*7} {'-'*5} {'-'*3}") |
|
|
| for iso, ls in sorted(stats["lang_stats"].items(), key=lambda x: x[1]["mismatch_rate"], reverse=True): |
| flag = " ***" if ls["mismatch_rate"] > 5.0 else "" |
| in_src = "Y" if ls["in_source"] else "N" |
| print( |
| f" {iso:<8} {ls['checked']:>8,} {ls['verified']:>8,} {ls['fuzzy']:>6}" |
| f" {ls['match_rate']:>6.1f}%" |
| f" {ls['word_only']:>7} {ls['ipa_only']:>7} {ls['both_not_paired']:>7}" |
| f" {ls['no_match']:>5} {in_src:>3}{flag}" |
| ) |
|
|
| |
| if stats["high_mismatch_details"]: |
| print(f"\n {'=' * 90}") |
| print(f" LANGUAGES WITH >5% MISMATCH RATE (up to 20 examples per language):") |
| print(f" {'=' * 90}") |
|
|
| for iso, mismatches in sorted(stats["high_mismatch_details"].items()): |
| ls = stats["lang_stats"][iso] |
| not_verified = ls["checked"] - ls["verified"] |
| print( |
| f"\n --- {iso} --- mismatch: {ls['mismatch_rate']:.1f}% ({not_verified}/{ls['checked']})" |
| f" [WdOnly={ls['word_only']}, IPAOnly={ls['ipa_only']}, BothNotPaired={ls['both_not_paired']}, None={ls['no_match']}]" |
| ) |
| print(f" {'Word':<30} {'IPA(lexicon)':<30} {'Concept':<22} {'WdInSrc':>7} {'IPAInSrc':>8}") |
| for word, ipa, concept, w_match, i_match in mismatches[:20]: |
| w_str = "Y" if w_match else "N" |
| i_str = "Y" if i_match else "N" |
| word_d = (word[:27] + "...") if len(word) > 30 else word |
| ipa_d = (ipa[:27] + "...") if len(ipa) > 30 else ipa |
| concept_d = (concept[:19] + "...") if len(concept) > 22 else concept |
| print(f" {word_d:<30} {ipa_d:<30} {concept_d:<22} {w_str:>7} {i_str:>8}") |
| else: |
| print(f"\n No languages with >5% mismatch rate.") |
|
|
|
|
| |
|
|
| def main(): |
| print("=" * 100) |
| print("LEXICON vs CLDF SOURCE CROSS-REFERENCE VERIFICATION") |
| print("=" * 100) |
| print(f"\nLexicon directory: {LEXICONS}") |
| print(f"Source directory: {SOURCES}") |
|
|
| |
| print("\nLoading source data...") |
| nel_lookup, nel_word, nel_ipa = build_northeuralex_lookup() |
| wold_lookup, wold_word, wold_ipa = build_wold_lookup() |
| st_lookup, st_word, st_ipa = build_sinotibetan_lookup() |
|
|
| |
| print("\nScanning ALL lexicon files for source-attributed entries...") |
| nel_entries = load_lexicon_entries_by_source("northeuralex") |
| wold_entries = load_lexicon_entries_by_source("wold") |
| st_entries = load_lexicon_entries_by_source("sinotibetan") |
|
|
| print(f" northeuralex: {sum(len(v) for v in nel_entries.values()):,} entries in {len(nel_entries)} languages") |
| print(f" wold: {sum(len(v) for v in wold_entries.values()):,} entries in {len(wold_entries)} languages") |
| print(f" sinotibetan: {sum(len(v) for v in st_entries.values()):,} entries in {len(st_entries)} languages") |
|
|
| |
| print("\n NorthEuraLex languages in lexicons:", sorted(nel_entries.keys())) |
| print(f" NorthEuraLex languages in source: {len(nel_lookup)} languages") |
| print(f" WOLD languages in lexicons: {sorted(wold_entries.keys())}") |
| print(f" WOLD languages in source: {len(wold_lookup)} languages") |
| print(f" SinoTibetan languages in lexicons: {sorted(st_entries.keys())}") |
| print(f" SinoTibetan languages in source: {len(st_lookup)} languages") |
|
|
| |
| print("\nVerifying northeuralex...") |
| nel_stats = verify_source("northeuralex", nel_lookup, nel_word, nel_ipa, nel_entries) |
|
|
| print("Verifying wold...") |
| wold_stats = verify_source("wold", wold_lookup, wold_word, wold_ipa, wold_entries) |
|
|
| print("Verifying sinotibetan...") |
| st_stats = verify_source("sinotibetan", st_lookup, st_word, st_ipa, st_entries) |
|
|
| |
| print_report("northeuralex", nel_stats) |
| print_report("wold", wold_stats) |
| print_report("sinotibetan", st_stats) |
|
|
| |
| grand_checked = nel_stats["total_checked"] + wold_stats["total_checked"] + st_stats["total_checked"] |
| grand_exact = nel_stats["total_verified"] + wold_stats["total_verified"] + st_stats["total_verified"] |
| grand_fuzzy = nel_stats.get("total_fuzzy", 0) + wold_stats.get("total_fuzzy", 0) + st_stats.get("total_fuzzy", 0) |
| grand_verified = grand_exact + grand_fuzzy |
| grand_not_found = nel_stats["total_not_found"] + wold_stats["total_not_found"] + st_stats["total_not_found"] |
|
|
| print(f"\n{'=' * 100}") |
| print("GRAND SUMMARY") |
| print(f"{'=' * 100}") |
| print(f" Total entries checked across all 3 sources: {grand_checked:>10,}") |
| print(f" Verified (exact word+IPA match): {grand_exact:>10,}") |
| print(f" Verified (fuzzy: tone marks stripped): {grand_fuzzy:>10,}") |
| print(f" Total verified: {grand_verified:>10,}") |
| print(f" Unverified (no source match found): {grand_not_found:>10,}") |
| if grand_checked > 0: |
| print(f" Overall verification rate: {grand_verified/grand_checked*100:>9.2f}%") |
| print() |
| total_high = ( |
| len(nel_stats["high_mismatch_details"]) |
| + len(wold_stats["high_mismatch_details"]) |
| + len(st_stats["high_mismatch_details"]) |
| ) |
| total_langs = ( |
| len(nel_stats["lang_stats"]) |
| + len(wold_stats["lang_stats"]) |
| + len(st_stats["lang_stats"]) |
| ) |
| print(f" Languages with >5% unverified: {total_high} out of {total_langs} total") |
|
|
| |
| if total_high > 0: |
| print(f"\n Flagged languages (>5% unverified):") |
| for source_name, stats in [("northeuralex", nel_stats), ("wold", wold_stats), ("sinotibetan", st_stats)]: |
| for iso in sorted(stats["high_mismatch_details"].keys()): |
| ls = stats["lang_stats"][iso] |
| unverified = ls["checked"] - ls["total_matched"] |
| print(f" {source_name:15s} / {iso}: {ls['mismatch_rate']:.1f}% ({unverified}/{ls['checked']})") |
|
|
| print(f"\n{'=' * 100}") |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|