#!/usr/bin/env python3 """ Verification script for ancient language database sources. Checks that source websites are real/accessible and spot-checks entries. """ import json import os import random import time import csv import sys import io from pathlib import Path from collections import defaultdict # Fix Windows console encoding sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8', errors='replace') sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding='utf-8', errors='replace') import requests import urllib3 urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) from urllib.parse import quote # Paths LEXICONS_DIR = Path(r"C:\Users\alvin\hf-ancient-scripts\data\training\lexicons") AUDIT_DIR = Path(r"C:\Users\alvin\hf-ancient-scripts\data\training\audit_trails") # Rate limiting REQUEST_DELAY = 1.5 # seconds between requests random.seed(42) # ============================================================ # 1. Source Website Accessibility Check # ============================================================ SOURCE_URLS = { "palaeolexicon": "https://www.palaeolexicon.com", "ediana": "https://ediana.gwi.uni-muenchen.de", "oracc_ecut": "https://oracc.museum.upenn.edu", "tir_raetica": "https://www.univie.ac.at/raetica/", "avesta_org": "https://avesta.org", } # Per-entry verification URL templates (where possible) ENTRY_URL_TEMPLATES = { "palaeolexicon": "https://www.palaeolexicon.com/Word/Show/{word_id}", "ediana": "https://ediana.gwi.uni-muenchen.de/dictionary/lemma/{entry_id}", # oracc doesn't have simple per-word URLs; we check the glossary pages "oracc_ecut": "https://oracc.museum.upenn.edu/ecut/", # TIR doesn't have per-word URLs in a simple pattern "tir_raetica": "https://www.univie.ac.at/raetica/wiki/Main_Page", "avesta_org": "https://avesta.org", } HEADERS = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 " "(KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36" } def check_url(url, timeout=15, label=""): """Check if a URL is accessible. Returns (status_code, ok, error_msg).""" try: resp = requests.get(url, headers=HEADERS, timeout=timeout, allow_redirects=True, verify=False) return resp.status_code, resp.ok, None except requests.exceptions.SSLError as e: return None, False, f"SSL Error: {e}" except requests.exceptions.ConnectionError as e: return None, False, f"Connection Error: {e}" except requests.exceptions.Timeout: return None, False, "Timeout" except Exception as e: return None, False, f"Error: {e}" def check_url_contains(url, search_text, timeout=15): """Check if a URL's content contains specific text.""" try: resp = requests.get(url, headers=HEADERS, timeout=timeout, allow_redirects=True, verify=False) if resp.ok: return search_text.lower() in resp.text.lower(), resp.status_code return False, resp.status_code except Exception as e: return False, str(e) def verify_source_websites(): """Check accessibility of all source websites.""" print("=" * 70) print("PHASE 1: SOURCE WEBSITE ACCESSIBILITY") print("=" * 70) results = {} for source, url in SOURCE_URLS.items(): status, ok, err = check_url(url) results[source] = {"url": url, "status": status, "ok": ok, "error": err} if ok: print(f" [OK] {source:20s} -> {url} (HTTP {status})") else: print(f" [FAIL] {source:20s} -> {url} (status={status}, error={err})") time.sleep(REQUEST_DELAY) return results # ============================================================ # 2. Load and Sample Entries from Audit Trails # ============================================================ def load_audit_trail(filepath): """Load all entries from a JSONL audit trail file.""" entries = [] with open(filepath, "r", encoding="utf-8") as f: for line in f: line = line.strip() if line: try: entries.append(json.loads(line)) except json.JSONDecodeError: pass return entries def sample_entries(entries, n=10): """Sample n random entries.""" if len(entries) <= n: return entries return random.sample(entries, n) # ============================================================ # 3. Per-Source Verification # ============================================================ def verify_palaeolexicon_entries(): """Verify palaeolexicon entries by checking word_id URLs.""" print("\n" + "=" * 70) print("PALAEOLEXICON VERIFICATION") print("=" * 70) audit_files = sorted(AUDIT_DIR.glob("palaeolexicon_*.jsonl")) print(f" Audit trail files: {[f.name for f in audit_files]}") all_results = {} for af in audit_files: lang = af.stem.replace("palaeolexicon_", "") entries = load_audit_trail(af) print(f"\n --- Language: {lang} ({len(entries)} entries) ---") samples = sample_entries(entries, 10) lang_results = [] for entry in samples: word = entry.get("word", "?") gloss = entry.get("gloss", "?") word_id = entry.get("word_id") if word_id: url = ENTRY_URL_TEMPLATES["palaeolexicon"].format(word_id=word_id) found, status = check_url_contains(url, word, timeout=15) result = { "word": word, "gloss": gloss, "word_id": word_id, "url": url, "http_status": status, "word_found_in_page": found, } status_str = "FOUND" if found else f"NOT FOUND (HTTP {status})" print(f" word_id={word_id:6d} word={word:20s} gloss={gloss:30s} -> {status_str}") lang_results.append(result) time.sleep(REQUEST_DELAY) else: print(f" [NO ID] word={word:20s} gloss={gloss}") lang_results.append({ "word": word, "gloss": gloss, "word_id": None, "url": None, "verified": "no_id" }) all_results[lang] = lang_results return all_results def verify_ediana_entries(): """Verify eDiAna entries by checking entry_id URLs.""" print("\n" + "=" * 70) print("eDiAna VERIFICATION") print("=" * 70) audit_files = sorted(AUDIT_DIR.glob("ediana_*.jsonl")) print(f" Audit trail files: {[f.name for f in audit_files]}") all_results = {} for af in audit_files: lang = af.stem.replace("ediana_", "") entries = load_audit_trail(af) if not entries: print(f"\n --- Language: {lang} (EMPTY audit trail) ---") all_results[lang] = [] continue print(f"\n --- Language: {lang} ({len(entries)} entries) ---") samples = sample_entries(entries, 10) lang_results = [] for entry in samples: word = entry.get("word", "?") gloss = entry.get("gloss", "?") entry_id = entry.get("entry_id") lang_detail = entry.get("language_detail", "?") if entry_id: url = ENTRY_URL_TEMPLATES["ediana"].format(entry_id=entry_id) status_code, ok, err = check_url(url, timeout=15) result = { "word": word, "gloss": gloss, "entry_id": entry_id, "url": url, "http_status": status_code, "accessible": ok, } status_str = f"HTTP {status_code}" if status_code else err print(f" entry_id={entry_id:5s} word={word:20s} gloss={gloss:35s} -> {status_str}") lang_results.append(result) time.sleep(REQUEST_DELAY) else: print(f" [NO ID] word={word:20s} gloss={gloss}") lang_results.append({ "word": word, "gloss": gloss, "entry_id": None, "verified": "no_id" }) all_results[lang] = lang_results return all_results def verify_oracc_entries(): """Verify ORACC/eCUT entries.""" print("\n" + "=" * 70) print("ORACC/eCUT VERIFICATION") print("=" * 70) af = AUDIT_DIR / "oracc_ecut_xur.jsonl" entries = load_audit_trail(af) print(f" Audit trail: {af.name} ({len(entries)} entries)") samples = sample_entries(entries, 10) results = [] # First check the glossary index page glossary_url = "https://oracc.museum.upenn.edu/ecut/akk/index.html" print(f"\n Checking glossary index: {glossary_url}") status_code, ok, err = check_url(glossary_url, timeout=15) print(f" -> HTTP {status_code}" if status_code else f" -> {err}") time.sleep(REQUEST_DELAY) # Try the JSON API endpoint json_url = "https://oracc.museum.upenn.edu/ecut/akk/sortindex.json" print(f" Checking glossary JSON: {json_url}") status_code, ok, err = check_url(json_url, timeout=15) print(f" -> HTTP {status_code}" if status_code else f" -> {err}") time.sleep(REQUEST_DELAY) # Also try the main eCUT page main_url = "https://oracc.museum.upenn.edu/ecut/" print(f" Checking main eCUT page: {main_url}") status_code, ok, err = check_url(main_url, timeout=15) print(f" -> HTTP {status_code}" if status_code else f" -> {err}") time.sleep(REQUEST_DELAY) print(f"\n Sample entries from audit trail:") for entry in samples: word = entry.get("word", "?") gloss = entry.get("gloss", "?") print(f" word={word:20s} gloss={gloss}") results.append({"word": word, "gloss": gloss, "source": "oracc_ecut"}) return results def verify_tir_raetica_entries(): """Verify TIR (Thesaurus Inscriptionum Raeticarum) entries.""" print("\n" + "=" * 70) print("TIR RAETICA VERIFICATION") print("=" * 70) af = AUDIT_DIR / "tir_raetica_xrr.jsonl" entries = load_audit_trail(af) print(f" Audit trail: {af.name} ({len(entries)} entries)") samples = sample_entries(entries, 10) results = [] # Check main TIR wiki pages urls_to_check = [ "https://www.univie.ac.at/raetica/wiki/Main_Page", "https://www.univie.ac.at/raetica/wiki/Category:Lexicon", "https://www.univie.ac.at/raetica/wiki/Category:Words", ] for url in urls_to_check: print(f" Checking: {url}") status_code, ok, err = check_url(url, timeout=15) if status_code: print(f" -> HTTP {status_code}") else: print(f" -> {err}") time.sleep(REQUEST_DELAY) print(f"\n Sample entries from audit trail:") for entry in samples: word = entry.get("word", "?") gloss = entry.get("gloss", "?") word_type = entry.get("word_type", "") # Try to construct a wiki URL for the word wiki_url = f"https://www.univie.ac.at/raetica/wiki/{quote(word)}" status_code, ok, err = check_url(wiki_url, timeout=15) status_str = f"HTTP {status_code}" if status_code else err print(f" word={word:20s} gloss={gloss:25s} wiki_url -> {status_str}") results.append({ "word": word, "gloss": gloss, "wiki_url": wiki_url, "http_status": status_code, "accessible": ok }) time.sleep(REQUEST_DELAY) return results def verify_avesta_entries(): """Verify avesta.org entries.""" print("\n" + "=" * 70) print("AVESTA.ORG VERIFICATION") print("=" * 70) af = AUDIT_DIR / "avesta_org_ave.jsonl" entries = load_audit_trail(af) print(f" Audit trail: {af.name} ({len(entries)} entries)") # Check main site main_url = "https://avesta.org" print(f"\n Checking main site: {main_url}") status_code, ok, err = check_url(main_url, timeout=15) if status_code: print(f" -> HTTP {status_code}") else: print(f" -> {err}") time.sleep(REQUEST_DELAY) # Check dictionary/glossary page dict_urls = [ "https://avesta.org/dictionary.html", "https://avesta.org/avdict/avdict.htm", "https://avesta.org/avdict.htm", ] for url in dict_urls: print(f" Checking dictionary page: {url}") status_code, ok, err = check_url(url, timeout=15) if status_code: print(f" -> HTTP {status_code}") else: print(f" -> {err}") time.sleep(REQUEST_DELAY) print(f"\n Sample entries from audit trail:") # Filter out obvious bad entries (NOTE, Example) real_entries = [e for e in entries if e.get("word") not in ("NOTE", "Example", "swift")] samples = sample_entries(real_entries, min(10, len(real_entries))) results = [] for entry in samples: word = entry.get("word", "?") gloss = entry.get("gloss", "?") print(f" word={word:20s} gloss={gloss}") results.append({"word": word, "gloss": gloss, "source": "avesta.org"}) return results # ============================================================ # 4. Audit Trail Structure Report # ============================================================ def report_audit_trail_structures(): """Report the structure of each audit trail file.""" print("\n" + "=" * 70) print("AUDIT TRAIL STRUCTURE REPORT") print("=" * 70) for af in sorted(AUDIT_DIR.glob("*.jsonl")): entries = load_audit_trail(af) if not entries: print(f"\n {af.name}: EMPTY (0 entries)") continue # Get all keys across entries all_keys = set() for e in entries: all_keys.update(e.keys()) # Check for URL/provenance fields url_fields = [k for k in all_keys if any(x in k.lower() for x in ["url", "link", "source", "id", "page"])] print(f"\n {af.name}: {len(entries)} entries") print(f" Fields: {sorted(all_keys)}") print(f" URL/provenance fields: {sorted(url_fields)}") # Show first entry as example print(f" Example: {json.dumps(entries[0], ensure_ascii=False)[:200]}") # Check for word_id presence (palaeolexicon) if "word_id" in all_keys: ids_present = sum(1 for e in entries if e.get("word_id")) print(f" word_id coverage: {ids_present}/{len(entries)} entries have word_id") # Check for entry_id presence (ediana) if "entry_id" in all_keys: ids_present = sum(1 for e in entries if e.get("entry_id")) print(f" entry_id coverage: {ids_present}/{len(entries)} entries have entry_id") # ============================================================ # 5. Cross-check: audit trail entries vs lexicon TSV entries # ============================================================ def cross_check_audit_vs_lexicon(): """Check that audit trail entries appear in the lexicon TSVs.""" print("\n" + "=" * 70) print("CROSS-CHECK: AUDIT TRAILS vs LEXICON TSVs") print("=" * 70) source_lang_map = { "palaeolexicon": ["ett", "xcr", "xhu", "xlc", "xld", "xlw"], "ediana": ["xcr", "xlc", "xld", "xlw"], "oracc_ecut": ["xur"], "tir_raetica": ["xrr"], "avesta_org": ["ave"], } for source, langs in source_lang_map.items(): for lang in langs: audit_file = AUDIT_DIR / f"{source}_{lang}.jsonl" lexicon_file = LEXICONS_DIR / f"{lang}.tsv" if not audit_file.exists(): print(f" {source}/{lang}: audit trail MISSING") continue if not lexicon_file.exists(): print(f" {source}/{lang}: lexicon TSV MISSING") continue audit_entries = load_audit_trail(audit_file) if not audit_entries: print(f" {source}/{lang}: audit trail EMPTY") continue # Load lexicon words for this source lexicon_words = set() with open(lexicon_file, "r", encoding="utf-8") as f: reader = csv.reader(f, delimiter="\t") header = next(reader) word_idx = 0 # Word column source_idx = 3 # Source column for row in reader: if len(row) > source_idx: src_name = row[source_idx].strip() # Map source names if source == "palaeolexicon" and src_name == "palaeolexicon": lexicon_words.add(row[word_idx].strip()) elif source == "ediana" and src_name == "ediana": lexicon_words.add(row[word_idx].strip()) elif source == "oracc_ecut" and src_name == "oracc_ecut": lexicon_words.add(row[word_idx].strip()) elif source == "tir_raetica" and src_name == "tir_raetica": lexicon_words.add(row[word_idx].strip()) # Check overlap audit_words = {e.get("word", "").strip() for e in audit_entries} in_both = audit_words & lexicon_words only_audit = audit_words - lexicon_words only_lexicon = lexicon_words - audit_words print(f" {source}/{lang}: audit={len(audit_words)}, lexicon={len(lexicon_words)}, " f"overlap={len(in_both)}, audit_only={len(only_audit)}, lexicon_only={len(only_lexicon)}") if only_audit and len(only_audit) <= 5: print(f" Audit-only words: {only_audit}") if only_lexicon and len(only_lexicon) <= 5: print(f" Lexicon-only words: {only_lexicon}") # ============================================================ # 6. Wiktionary Expansion Audit Trail Spot Check # ============================================================ def verify_wiktionary_expansion(): """Spot-check wiktionary expansion audit trails for URL structure.""" print("\n" + "=" * 70) print("WIKTIONARY EXPANSION SPOT CHECK") print("=" * 70) wikt_files = sorted(AUDIT_DIR.glob("wiktionary_expansion_*.jsonl")) print(f" Found {len(wikt_files)} wiktionary expansion audit files") # Sample from a few files for af in wikt_files[:4]: entries = load_audit_trail(af) if not entries: continue lang = af.stem.replace("wiktionary_expansion_", "") print(f"\n --- {lang} ({len(entries)} entries) ---") samples = sample_entries(entries, 3) for entry in samples: word = entry.get("word", "?") page_title = entry.get("page_title", "") gloss = entry.get("gloss", "?") if page_title: # Wiktionary URL url = f"https://en.wiktionary.org/wiki/{quote(page_title)}" status_code, ok, err = check_url(url, timeout=15) status_str = f"HTTP {status_code}" if status_code else err print(f" word={word:25s} page_title={page_title:25s} -> {status_str}") time.sleep(REQUEST_DELAY) else: print(f" word={word:25s} [no page_title] gloss={gloss}") # ============================================================ # 7. Data Quality Checks for avesta.org # ============================================================ def check_avesta_data_quality(): """Check quality of avesta.org data (it looked suspicious).""" print("\n" + "=" * 70) print("AVESTA.ORG DATA QUALITY CHECK") print("=" * 70) af = AUDIT_DIR / "avesta_org_ave.jsonl" entries = load_audit_trail(af) print(f" Total entries: {len(entries)}") # Check for obvious problems suspicious = [] for entry in entries: word = entry.get("word", "") gloss = entry.get("gloss", "") # Check for entries that look like scraping artifacts if word in ("NOTE", "Example", "swift") or not word: suspicious.append(entry) # Check for entries where gloss looks like it's another word, not a translation if gloss and not any(c == ' ' for c in gloss) and gloss.isalpha() and len(gloss) < 15: # Could be fine (single-word gloss) or could be bad parse pass print(f" Suspicious entries (metadata/artifacts): {len(suspicious)}") for s in suspicious: print(f" -> {json.dumps(s, ensure_ascii=False)}") print(f"\n Sample of normal entries:") normal = [e for e in entries if e.get("word") not in ("NOTE", "Example", "swift")] for entry in sample_entries(normal, 10): print(f" word={entry.get('word','?'):20s} gloss={entry.get('gloss','?')}") # ============================================================ # MAIN # ============================================================ def main(): print("ANCIENT LANGUAGE DATABASE SOURCE VERIFICATION") print("=" * 70) print(f"Date: {time.strftime('%Y-%m-%d %H:%M:%S')}") print(f"Lexicons dir: {LEXICONS_DIR}") print(f"Audit trails dir: {AUDIT_DIR}") print() # Phase 1: Website accessibility website_results = verify_source_websites() # Phase 2: Audit trail structure report report_audit_trail_structures() # Phase 3: Per-source entry verification paleo_results = verify_palaeolexicon_entries() ediana_results = verify_ediana_entries() oracc_results = verify_oracc_entries() tir_results = verify_tir_raetica_entries() avesta_results = verify_avesta_entries() # Phase 4: Data quality checks check_avesta_data_quality() # Phase 5: Wiktionary expansion spot check verify_wiktionary_expansion() # Phase 6: Cross-check audit trails vs lexicons cross_check_audit_vs_lexicon() # ============================================================ # SUMMARY # ============================================================ print("\n" + "=" * 70) print("FINAL SUMMARY") print("=" * 70) print("\n Website Accessibility:") for source, res in website_results.items(): status = "ACCESSIBLE" if res["ok"] else "INACCESSIBLE" print(f" {source:20s}: {status}") print("\n Palaeolexicon Entry Verification:") for lang, results in paleo_results.items(): verified = sum(1 for r in results if r.get("word_found_in_page")) total = sum(1 for r in results if r.get("url")) no_id = sum(1 for r in results if r.get("verified") == "no_id") print(f" {lang}: {verified}/{total} verified via URL" + (f", {no_id} without word_id" if no_id else "")) print("\n eDiAna Entry Verification:") for lang, results in ediana_results.items(): if not results: print(f" {lang}: EMPTY audit trail") continue accessible = sum(1 for r in results if r.get("accessible")) total = sum(1 for r in results if r.get("url")) print(f" {lang}: {accessible}/{total} URLs accessible") print("\n ORACC/eCUT: entries sampled but no per-word URL verification possible") print(f" TIR Raetica: {len(tir_results)} entries checked via wiki URLs") print(f" Avesta.org: {len(avesta_results)} entries sampled") print("\n KEY FINDINGS:") # Summarize any issues inaccessible = [s for s, r in website_results.items() if not r["ok"]] if inaccessible: print(f" [WARNING] Inaccessible websites: {', '.join(inaccessible)}") else: print(f" [OK] All source websites are accessible") if __name__ == "__main__": main()