| |
| """ |
| Verification script for ancient language database sources. |
| Checks that source websites are real/accessible and spot-checks entries. |
| """ |
|
|
| import json |
| import os |
| import random |
| import time |
| import csv |
| import sys |
| import io |
| from pathlib import Path |
| from collections import defaultdict |
|
|
| |
| sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8', errors='replace') |
| sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding='utf-8', errors='replace') |
|
|
| import requests |
| import urllib3 |
| urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) |
| from urllib.parse import quote |
|
|
| |
| LEXICONS_DIR = Path(r"C:\Users\alvin\hf-ancient-scripts\data\training\lexicons") |
| AUDIT_DIR = Path(r"C:\Users\alvin\hf-ancient-scripts\data\training\audit_trails") |
|
|
| |
| REQUEST_DELAY = 1.5 |
|
|
| random.seed(42) |
|
|
| |
| |
| |
|
|
| SOURCE_URLS = { |
| "palaeolexicon": "https://www.palaeolexicon.com", |
| "ediana": "https://ediana.gwi.uni-muenchen.de", |
| "oracc_ecut": "https://oracc.museum.upenn.edu", |
| "tir_raetica": "https://www.univie.ac.at/raetica/", |
| "avesta_org": "https://avesta.org", |
| } |
|
|
| |
| ENTRY_URL_TEMPLATES = { |
| "palaeolexicon": "https://www.palaeolexicon.com/Word/Show/{word_id}", |
| "ediana": "https://ediana.gwi.uni-muenchen.de/dictionary/lemma/{entry_id}", |
| |
| "oracc_ecut": "https://oracc.museum.upenn.edu/ecut/", |
| |
| "tir_raetica": "https://www.univie.ac.at/raetica/wiki/Main_Page", |
| "avesta_org": "https://avesta.org", |
| } |
|
|
| HEADERS = { |
| "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 " |
| "(KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36" |
| } |
|
|
|
|
| def check_url(url, timeout=15, label=""): |
| """Check if a URL is accessible. Returns (status_code, ok, error_msg).""" |
| try: |
| resp = requests.get(url, headers=HEADERS, timeout=timeout, allow_redirects=True, verify=False) |
| return resp.status_code, resp.ok, None |
| except requests.exceptions.SSLError as e: |
| return None, False, f"SSL Error: {e}" |
| except requests.exceptions.ConnectionError as e: |
| return None, False, f"Connection Error: {e}" |
| except requests.exceptions.Timeout: |
| return None, False, "Timeout" |
| except Exception as e: |
| return None, False, f"Error: {e}" |
|
|
|
|
| def check_url_contains(url, search_text, timeout=15): |
| """Check if a URL's content contains specific text.""" |
| try: |
| resp = requests.get(url, headers=HEADERS, timeout=timeout, allow_redirects=True, verify=False) |
| if resp.ok: |
| return search_text.lower() in resp.text.lower(), resp.status_code |
| return False, resp.status_code |
| except Exception as e: |
| return False, str(e) |
|
|
|
|
| def verify_source_websites(): |
| """Check accessibility of all source websites.""" |
| print("=" * 70) |
| print("PHASE 1: SOURCE WEBSITE ACCESSIBILITY") |
| print("=" * 70) |
| results = {} |
| for source, url in SOURCE_URLS.items(): |
| status, ok, err = check_url(url) |
| results[source] = {"url": url, "status": status, "ok": ok, "error": err} |
| if ok: |
| print(f" [OK] {source:20s} -> {url} (HTTP {status})") |
| else: |
| print(f" [FAIL] {source:20s} -> {url} (status={status}, error={err})") |
| time.sleep(REQUEST_DELAY) |
| return results |
|
|
|
|
| |
| |
| |
|
|
| def load_audit_trail(filepath): |
| """Load all entries from a JSONL audit trail file.""" |
| entries = [] |
| with open(filepath, "r", encoding="utf-8") as f: |
| for line in f: |
| line = line.strip() |
| if line: |
| try: |
| entries.append(json.loads(line)) |
| except json.JSONDecodeError: |
| pass |
| return entries |
|
|
|
|
| def sample_entries(entries, n=10): |
| """Sample n random entries.""" |
| if len(entries) <= n: |
| return entries |
| return random.sample(entries, n) |
|
|
|
|
| |
| |
| |
|
|
| def verify_palaeolexicon_entries(): |
| """Verify palaeolexicon entries by checking word_id URLs.""" |
| print("\n" + "=" * 70) |
| print("PALAEOLEXICON VERIFICATION") |
| print("=" * 70) |
|
|
| audit_files = sorted(AUDIT_DIR.glob("palaeolexicon_*.jsonl")) |
| print(f" Audit trail files: {[f.name for f in audit_files]}") |
|
|
| all_results = {} |
| for af in audit_files: |
| lang = af.stem.replace("palaeolexicon_", "") |
| entries = load_audit_trail(af) |
| print(f"\n --- Language: {lang} ({len(entries)} entries) ---") |
|
|
| samples = sample_entries(entries, 10) |
| lang_results = [] |
| for entry in samples: |
| word = entry.get("word", "?") |
| gloss = entry.get("gloss", "?") |
| word_id = entry.get("word_id") |
|
|
| if word_id: |
| url = ENTRY_URL_TEMPLATES["palaeolexicon"].format(word_id=word_id) |
| found, status = check_url_contains(url, word, timeout=15) |
| result = { |
| "word": word, |
| "gloss": gloss, |
| "word_id": word_id, |
| "url": url, |
| "http_status": status, |
| "word_found_in_page": found, |
| } |
| status_str = "FOUND" if found else f"NOT FOUND (HTTP {status})" |
| print(f" word_id={word_id:6d} word={word:20s} gloss={gloss:30s} -> {status_str}") |
| lang_results.append(result) |
| time.sleep(REQUEST_DELAY) |
| else: |
| print(f" [NO ID] word={word:20s} gloss={gloss}") |
| lang_results.append({ |
| "word": word, "gloss": gloss, "word_id": None, |
| "url": None, "verified": "no_id" |
| }) |
|
|
| all_results[lang] = lang_results |
| return all_results |
|
|
|
|
| def verify_ediana_entries(): |
| """Verify eDiAna entries by checking entry_id URLs.""" |
| print("\n" + "=" * 70) |
| print("eDiAna VERIFICATION") |
| print("=" * 70) |
|
|
| audit_files = sorted(AUDIT_DIR.glob("ediana_*.jsonl")) |
| print(f" Audit trail files: {[f.name for f in audit_files]}") |
|
|
| all_results = {} |
| for af in audit_files: |
| lang = af.stem.replace("ediana_", "") |
| entries = load_audit_trail(af) |
| if not entries: |
| print(f"\n --- Language: {lang} (EMPTY audit trail) ---") |
| all_results[lang] = [] |
| continue |
|
|
| print(f"\n --- Language: {lang} ({len(entries)} entries) ---") |
| samples = sample_entries(entries, 10) |
| lang_results = [] |
|
|
| for entry in samples: |
| word = entry.get("word", "?") |
| gloss = entry.get("gloss", "?") |
| entry_id = entry.get("entry_id") |
| lang_detail = entry.get("language_detail", "?") |
|
|
| if entry_id: |
| url = ENTRY_URL_TEMPLATES["ediana"].format(entry_id=entry_id) |
| status_code, ok, err = check_url(url, timeout=15) |
| result = { |
| "word": word, |
| "gloss": gloss, |
| "entry_id": entry_id, |
| "url": url, |
| "http_status": status_code, |
| "accessible": ok, |
| } |
| status_str = f"HTTP {status_code}" if status_code else err |
| print(f" entry_id={entry_id:5s} word={word:20s} gloss={gloss:35s} -> {status_str}") |
| lang_results.append(result) |
| time.sleep(REQUEST_DELAY) |
| else: |
| print(f" [NO ID] word={word:20s} gloss={gloss}") |
| lang_results.append({ |
| "word": word, "gloss": gloss, "entry_id": None, "verified": "no_id" |
| }) |
|
|
| all_results[lang] = lang_results |
| return all_results |
|
|
|
|
| def verify_oracc_entries(): |
| """Verify ORACC/eCUT entries.""" |
| print("\n" + "=" * 70) |
| print("ORACC/eCUT VERIFICATION") |
| print("=" * 70) |
|
|
| af = AUDIT_DIR / "oracc_ecut_xur.jsonl" |
| entries = load_audit_trail(af) |
| print(f" Audit trail: {af.name} ({len(entries)} entries)") |
|
|
| samples = sample_entries(entries, 10) |
| results = [] |
|
|
| |
| glossary_url = "https://oracc.museum.upenn.edu/ecut/akk/index.html" |
| print(f"\n Checking glossary index: {glossary_url}") |
| status_code, ok, err = check_url(glossary_url, timeout=15) |
| print(f" -> HTTP {status_code}" if status_code else f" -> {err}") |
| time.sleep(REQUEST_DELAY) |
|
|
| |
| json_url = "https://oracc.museum.upenn.edu/ecut/akk/sortindex.json" |
| print(f" Checking glossary JSON: {json_url}") |
| status_code, ok, err = check_url(json_url, timeout=15) |
| print(f" -> HTTP {status_code}" if status_code else f" -> {err}") |
| time.sleep(REQUEST_DELAY) |
|
|
| |
| main_url = "https://oracc.museum.upenn.edu/ecut/" |
| print(f" Checking main eCUT page: {main_url}") |
| status_code, ok, err = check_url(main_url, timeout=15) |
| print(f" -> HTTP {status_code}" if status_code else f" -> {err}") |
| time.sleep(REQUEST_DELAY) |
|
|
| print(f"\n Sample entries from audit trail:") |
| for entry in samples: |
| word = entry.get("word", "?") |
| gloss = entry.get("gloss", "?") |
| print(f" word={word:20s} gloss={gloss}") |
| results.append({"word": word, "gloss": gloss, "source": "oracc_ecut"}) |
|
|
| return results |
|
|
|
|
| def verify_tir_raetica_entries(): |
| """Verify TIR (Thesaurus Inscriptionum Raeticarum) entries.""" |
| print("\n" + "=" * 70) |
| print("TIR RAETICA VERIFICATION") |
| print("=" * 70) |
|
|
| af = AUDIT_DIR / "tir_raetica_xrr.jsonl" |
| entries = load_audit_trail(af) |
| print(f" Audit trail: {af.name} ({len(entries)} entries)") |
|
|
| samples = sample_entries(entries, 10) |
| results = [] |
|
|
| |
| urls_to_check = [ |
| "https://www.univie.ac.at/raetica/wiki/Main_Page", |
| "https://www.univie.ac.at/raetica/wiki/Category:Lexicon", |
| "https://www.univie.ac.at/raetica/wiki/Category:Words", |
| ] |
| for url in urls_to_check: |
| print(f" Checking: {url}") |
| status_code, ok, err = check_url(url, timeout=15) |
| if status_code: |
| print(f" -> HTTP {status_code}") |
| else: |
| print(f" -> {err}") |
| time.sleep(REQUEST_DELAY) |
|
|
| print(f"\n Sample entries from audit trail:") |
| for entry in samples: |
| word = entry.get("word", "?") |
| gloss = entry.get("gloss", "?") |
| word_type = entry.get("word_type", "") |
| |
| wiki_url = f"https://www.univie.ac.at/raetica/wiki/{quote(word)}" |
| status_code, ok, err = check_url(wiki_url, timeout=15) |
| status_str = f"HTTP {status_code}" if status_code else err |
| print(f" word={word:20s} gloss={gloss:25s} wiki_url -> {status_str}") |
| results.append({ |
| "word": word, "gloss": gloss, "wiki_url": wiki_url, |
| "http_status": status_code, "accessible": ok |
| }) |
| time.sleep(REQUEST_DELAY) |
|
|
| return results |
|
|
|
|
| def verify_avesta_entries(): |
| """Verify avesta.org entries.""" |
| print("\n" + "=" * 70) |
| print("AVESTA.ORG VERIFICATION") |
| print("=" * 70) |
|
|
| af = AUDIT_DIR / "avesta_org_ave.jsonl" |
| entries = load_audit_trail(af) |
| print(f" Audit trail: {af.name} ({len(entries)} entries)") |
|
|
| |
| main_url = "https://avesta.org" |
| print(f"\n Checking main site: {main_url}") |
| status_code, ok, err = check_url(main_url, timeout=15) |
| if status_code: |
| print(f" -> HTTP {status_code}") |
| else: |
| print(f" -> {err}") |
| time.sleep(REQUEST_DELAY) |
|
|
| |
| dict_urls = [ |
| "https://avesta.org/dictionary.html", |
| "https://avesta.org/avdict/avdict.htm", |
| "https://avesta.org/avdict.htm", |
| ] |
| for url in dict_urls: |
| print(f" Checking dictionary page: {url}") |
| status_code, ok, err = check_url(url, timeout=15) |
| if status_code: |
| print(f" -> HTTP {status_code}") |
| else: |
| print(f" -> {err}") |
| time.sleep(REQUEST_DELAY) |
|
|
| print(f"\n Sample entries from audit trail:") |
| |
| real_entries = [e for e in entries if e.get("word") not in ("NOTE", "Example", "swift")] |
| samples = sample_entries(real_entries, min(10, len(real_entries))) |
| results = [] |
| for entry in samples: |
| word = entry.get("word", "?") |
| gloss = entry.get("gloss", "?") |
| print(f" word={word:20s} gloss={gloss}") |
| results.append({"word": word, "gloss": gloss, "source": "avesta.org"}) |
|
|
| return results |
|
|
|
|
| |
| |
| |
|
|
| def report_audit_trail_structures(): |
| """Report the structure of each audit trail file.""" |
| print("\n" + "=" * 70) |
| print("AUDIT TRAIL STRUCTURE REPORT") |
| print("=" * 70) |
|
|
| for af in sorted(AUDIT_DIR.glob("*.jsonl")): |
| entries = load_audit_trail(af) |
| if not entries: |
| print(f"\n {af.name}: EMPTY (0 entries)") |
| continue |
|
|
| |
| all_keys = set() |
| for e in entries: |
| all_keys.update(e.keys()) |
|
|
| |
| url_fields = [k for k in all_keys if any(x in k.lower() for x in ["url", "link", "source", "id", "page"])] |
|
|
| print(f"\n {af.name}: {len(entries)} entries") |
| print(f" Fields: {sorted(all_keys)}") |
| print(f" URL/provenance fields: {sorted(url_fields)}") |
| |
| print(f" Example: {json.dumps(entries[0], ensure_ascii=False)[:200]}") |
|
|
| |
| if "word_id" in all_keys: |
| ids_present = sum(1 for e in entries if e.get("word_id")) |
| print(f" word_id coverage: {ids_present}/{len(entries)} entries have word_id") |
|
|
| |
| if "entry_id" in all_keys: |
| ids_present = sum(1 for e in entries if e.get("entry_id")) |
| print(f" entry_id coverage: {ids_present}/{len(entries)} entries have entry_id") |
|
|
|
|
| |
| |
| |
|
|
| def cross_check_audit_vs_lexicon(): |
| """Check that audit trail entries appear in the lexicon TSVs.""" |
| print("\n" + "=" * 70) |
| print("CROSS-CHECK: AUDIT TRAILS vs LEXICON TSVs") |
| print("=" * 70) |
|
|
| source_lang_map = { |
| "palaeolexicon": ["ett", "xcr", "xhu", "xlc", "xld", "xlw"], |
| "ediana": ["xcr", "xlc", "xld", "xlw"], |
| "oracc_ecut": ["xur"], |
| "tir_raetica": ["xrr"], |
| "avesta_org": ["ave"], |
| } |
|
|
| for source, langs in source_lang_map.items(): |
| for lang in langs: |
| audit_file = AUDIT_DIR / f"{source}_{lang}.jsonl" |
| lexicon_file = LEXICONS_DIR / f"{lang}.tsv" |
|
|
| if not audit_file.exists(): |
| print(f" {source}/{lang}: audit trail MISSING") |
| continue |
| if not lexicon_file.exists(): |
| print(f" {source}/{lang}: lexicon TSV MISSING") |
| continue |
|
|
| audit_entries = load_audit_trail(audit_file) |
| if not audit_entries: |
| print(f" {source}/{lang}: audit trail EMPTY") |
| continue |
|
|
| |
| lexicon_words = set() |
| with open(lexicon_file, "r", encoding="utf-8") as f: |
| reader = csv.reader(f, delimiter="\t") |
| header = next(reader) |
| word_idx = 0 |
| source_idx = 3 |
| for row in reader: |
| if len(row) > source_idx: |
| src_name = row[source_idx].strip() |
| |
| if source == "palaeolexicon" and src_name == "palaeolexicon": |
| lexicon_words.add(row[word_idx].strip()) |
| elif source == "ediana" and src_name == "ediana": |
| lexicon_words.add(row[word_idx].strip()) |
| elif source == "oracc_ecut" and src_name == "oracc_ecut": |
| lexicon_words.add(row[word_idx].strip()) |
| elif source == "tir_raetica" and src_name == "tir_raetica": |
| lexicon_words.add(row[word_idx].strip()) |
|
|
| |
| audit_words = {e.get("word", "").strip() for e in audit_entries} |
| in_both = audit_words & lexicon_words |
| only_audit = audit_words - lexicon_words |
| only_lexicon = lexicon_words - audit_words |
|
|
| print(f" {source}/{lang}: audit={len(audit_words)}, lexicon={len(lexicon_words)}, " |
| f"overlap={len(in_both)}, audit_only={len(only_audit)}, lexicon_only={len(only_lexicon)}") |
| if only_audit and len(only_audit) <= 5: |
| print(f" Audit-only words: {only_audit}") |
| if only_lexicon and len(only_lexicon) <= 5: |
| print(f" Lexicon-only words: {only_lexicon}") |
|
|
|
|
| |
| |
| |
|
|
| def verify_wiktionary_expansion(): |
| """Spot-check wiktionary expansion audit trails for URL structure.""" |
| print("\n" + "=" * 70) |
| print("WIKTIONARY EXPANSION SPOT CHECK") |
| print("=" * 70) |
|
|
| wikt_files = sorted(AUDIT_DIR.glob("wiktionary_expansion_*.jsonl")) |
| print(f" Found {len(wikt_files)} wiktionary expansion audit files") |
|
|
| |
| for af in wikt_files[:4]: |
| entries = load_audit_trail(af) |
| if not entries: |
| continue |
|
|
| lang = af.stem.replace("wiktionary_expansion_", "") |
| print(f"\n --- {lang} ({len(entries)} entries) ---") |
| samples = sample_entries(entries, 3) |
|
|
| for entry in samples: |
| word = entry.get("word", "?") |
| page_title = entry.get("page_title", "") |
| gloss = entry.get("gloss", "?") |
|
|
| if page_title: |
| |
| url = f"https://en.wiktionary.org/wiki/{quote(page_title)}" |
| status_code, ok, err = check_url(url, timeout=15) |
| status_str = f"HTTP {status_code}" if status_code else err |
| print(f" word={word:25s} page_title={page_title:25s} -> {status_str}") |
| time.sleep(REQUEST_DELAY) |
| else: |
| print(f" word={word:25s} [no page_title] gloss={gloss}") |
|
|
|
|
| |
| |
| |
|
|
| def check_avesta_data_quality(): |
| """Check quality of avesta.org data (it looked suspicious).""" |
| print("\n" + "=" * 70) |
| print("AVESTA.ORG DATA QUALITY CHECK") |
| print("=" * 70) |
|
|
| af = AUDIT_DIR / "avesta_org_ave.jsonl" |
| entries = load_audit_trail(af) |
|
|
| print(f" Total entries: {len(entries)}") |
|
|
| |
| suspicious = [] |
| for entry in entries: |
| word = entry.get("word", "") |
| gloss = entry.get("gloss", "") |
| |
| if word in ("NOTE", "Example", "swift") or not word: |
| suspicious.append(entry) |
| |
| if gloss and not any(c == ' ' for c in gloss) and gloss.isalpha() and len(gloss) < 15: |
| |
| pass |
|
|
| print(f" Suspicious entries (metadata/artifacts): {len(suspicious)}") |
| for s in suspicious: |
| print(f" -> {json.dumps(s, ensure_ascii=False)}") |
|
|
| print(f"\n Sample of normal entries:") |
| normal = [e for e in entries if e.get("word") not in ("NOTE", "Example", "swift")] |
| for entry in sample_entries(normal, 10): |
| print(f" word={entry.get('word','?'):20s} gloss={entry.get('gloss','?')}") |
|
|
|
|
| |
| |
| |
|
|
| def main(): |
| print("ANCIENT LANGUAGE DATABASE SOURCE VERIFICATION") |
| print("=" * 70) |
| print(f"Date: {time.strftime('%Y-%m-%d %H:%M:%S')}") |
| print(f"Lexicons dir: {LEXICONS_DIR}") |
| print(f"Audit trails dir: {AUDIT_DIR}") |
| print() |
|
|
| |
| website_results = verify_source_websites() |
|
|
| |
| report_audit_trail_structures() |
|
|
| |
| paleo_results = verify_palaeolexicon_entries() |
| ediana_results = verify_ediana_entries() |
| oracc_results = verify_oracc_entries() |
| tir_results = verify_tir_raetica_entries() |
| avesta_results = verify_avesta_entries() |
|
|
| |
| check_avesta_data_quality() |
|
|
| |
| verify_wiktionary_expansion() |
|
|
| |
| cross_check_audit_vs_lexicon() |
|
|
| |
| |
| |
| print("\n" + "=" * 70) |
| print("FINAL SUMMARY") |
| print("=" * 70) |
|
|
| print("\n Website Accessibility:") |
| for source, res in website_results.items(): |
| status = "ACCESSIBLE" if res["ok"] else "INACCESSIBLE" |
| print(f" {source:20s}: {status}") |
|
|
| print("\n Palaeolexicon Entry Verification:") |
| for lang, results in paleo_results.items(): |
| verified = sum(1 for r in results if r.get("word_found_in_page")) |
| total = sum(1 for r in results if r.get("url")) |
| no_id = sum(1 for r in results if r.get("verified") == "no_id") |
| print(f" {lang}: {verified}/{total} verified via URL" + (f", {no_id} without word_id" if no_id else "")) |
|
|
| print("\n eDiAna Entry Verification:") |
| for lang, results in ediana_results.items(): |
| if not results: |
| print(f" {lang}: EMPTY audit trail") |
| continue |
| accessible = sum(1 for r in results if r.get("accessible")) |
| total = sum(1 for r in results if r.get("url")) |
| print(f" {lang}: {accessible}/{total} URLs accessible") |
|
|
| print("\n ORACC/eCUT: entries sampled but no per-word URL verification possible") |
| print(f" TIR Raetica: {len(tir_results)} entries checked via wiki URLs") |
| print(f" Avesta.org: {len(avesta_results)} entries sampled") |
|
|
| print("\n KEY FINDINGS:") |
| |
| inaccessible = [s for s, r in website_results.items() if not r["ok"]] |
| if inaccessible: |
| print(f" [WARNING] Inaccessible websites: {', '.join(inaccessible)}") |
| else: |
| print(f" [OK] All source websites are accessible") |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|