| """ |
| Adversarial audit of Team A's avesta.org extraction. |
| Checks 1-6: Source verification, code inspection, parse verification, |
| transformation verification, output format, audit trail integrity. |
| """ |
| from __future__ import annotations |
|
|
| import csv |
| import hashlib |
| import html |
| import json |
| import os |
| import random |
| import re |
| import sys |
| import unicodedata |
| import urllib.request |
| from pathlib import Path |
|
|
| |
| if sys.platform == "win32": |
| sys.stdout.reconfigure(encoding="utf-8", errors="replace") |
| sys.stderr.reconfigure(encoding="utf-8", errors="replace") |
|
|
| PROJECT_ROOT = Path(__file__).resolve().parent.parent |
| RAW_HTML = PROJECT_ROOT / "data" / "training" / "raw" / "avesta_org_raw.html" |
| EXTRACTED_TSV = PROJECT_ROOT / "data" / "training" / "raw" / "avesta_org_extracted.tsv" |
| AUDIT_JSONL = PROJECT_ROOT / "data" / "training" / "audit_trails" / "avesta_org_ave_new.jsonl" |
| SCRIPT_PATH = PROJECT_ROOT / "scripts" / "scrape_avesta_org.py" |
|
|
| URL = "https://avesta.org/avdict/avdict.htm" |
|
|
| random.seed(42) |
|
|
| results = {} |
|
|
| |
| |
| |
| print("=" * 70) |
| print("CHECK 1: Source Verification") |
| print("=" * 70) |
|
|
| try: |
| |
| with open(RAW_HTML, "rb") as f: |
| cached_bytes = f.read() |
| cached_hash = hashlib.sha256(cached_bytes).hexdigest() |
| cached_size = len(cached_bytes) |
| print(f" Cached HTML size: {cached_size:,} bytes") |
| print(f" Cached SHA256: {cached_hash[:16]}...") |
|
|
| |
| print(f" Re-fetching {URL} ...") |
| req = urllib.request.Request(URL, headers={"User-Agent": "Mozilla/5.0 (audit scraper)"}) |
| resp = urllib.request.urlopen(req, timeout=60) |
| live_bytes = resp.read() |
| live_hash = hashlib.sha256(live_bytes).hexdigest() |
| live_size = len(live_bytes) |
| print(f" Live HTML size: {live_size:,} bytes") |
| print(f" Live SHA256: {live_hash[:16]}...") |
|
|
| |
| if cached_hash == live_hash: |
| print(" EXACT MATCH: Cached HTML is byte-identical to live source.") |
| check1_pass = True |
| else: |
| |
| |
| cached_text = cached_bytes.decode("utf-8", errors="replace") |
| live_text = live_bytes.decode("utf-8", errors="replace") |
|
|
| |
| has_title_cached = "Avestan Dictionary" in cached_text |
| has_title_live = "Avestan Dictionary" in live_text |
| has_dcta_cached = 'NAME="dcta"' in cached_text or 'name="dcta"' in cached_text.lower() |
| has_dcta_live = 'NAME="dcta"' in live_text or 'name="dcta"' in live_text.lower() |
| has_dt_cached = "<DT>" in cached_text |
| has_dt_live = "<DT>" in live_text |
|
|
| |
| dt_count_cached = cached_text.upper().count("<DT>") |
| dt_count_live = live_text.upper().count("<DT>") |
|
|
| print(f" NOT byte-identical (page may have been updated)") |
| print(f" Cached: title={'Y' if has_title_cached else 'N'}, dcta={'Y' if has_dcta_cached else 'N'}, DT count={dt_count_cached}") |
| print(f" Live: title={'Y' if has_title_live else 'N'}, dcta={'Y' if has_dcta_live else 'N'}, DT count={dt_count_live}") |
|
|
| |
| if (has_title_cached and has_title_live and |
| has_dcta_cached and has_dcta_live and |
| has_dt_cached and has_dt_live and |
| abs(dt_count_cached - dt_count_live) < 50): |
| print(" STRUCTURAL MATCH: Both are legitimate Avestan dictionary pages.") |
| check1_pass = True |
| else: |
| print(" FAIL: Significant structural differences between cached and live HTML") |
| check1_pass = False |
|
|
| |
| cached_text = cached_bytes.decode("utf-8", errors="replace") |
| has_author = "Joseph H. Peterson" in cached_text |
| has_dict_entries = cached_text.upper().count("<DT>") > 100 |
| has_avestan_words = any(w in cached_text for w in ["baodh", "ahura", "mazda", "zarathu"]) |
|
|
| print(f" Content checks: author={'Y' if has_author else 'N'}, entries={'Y' if has_dict_entries else 'N'}, avestan_words={'Y' if has_avestan_words else 'N'}") |
|
|
| if not (has_author and has_dict_entries and has_avestan_words): |
| print(" FAIL: Cached HTML does not look like a legitimate Avestan dictionary") |
| check1_pass = False |
|
|
| except Exception as e: |
| print(f" ERROR: {e}") |
| check1_pass = False |
|
|
| results["Check 1: Source Verification"] = "PASS" if check1_pass else "FAIL" |
| print(f"\n RESULT: {'PASS' if check1_pass else 'FAIL'}") |
|
|
| |
| |
| |
| print("\n" + "=" * 70) |
| print("CHECK 2: Code Inspection") |
| print("=" * 70) |
|
|
| with open(SCRIPT_PATH, "r", encoding="utf-8") as f: |
| script_code = f.read() |
|
|
| check2_issues = [] |
|
|
| |
| has_urllib = "urllib.request" in script_code |
| has_requests = "import requests" in script_code |
| if not (has_urllib or has_requests): |
| check2_issues.append("No HTTP library import found (urllib or requests)") |
| print(f" HTTP library: urllib={'Y' if has_urllib else 'N'}, requests={'Y' if has_requests else 'N'}") |
|
|
| |
| has_urlopen = "urlopen" in script_code or "requests.get" in script_code |
| if not has_urlopen: |
| check2_issues.append("No urlopen() or requests.get() call found") |
| print(f" URL fetch call: {'Y' if has_urlopen else 'N'}") |
|
|
| |
| |
| hardcoded_list_pattern = re.findall(r'\[(?:"[^"]+",?\s*){10,}\]', script_code) |
| if hardcoded_list_pattern: |
| check2_issues.append(f"Found {len(hardcoded_list_pattern)} suspicious hardcoded word lists") |
| print(f" Hardcoded word lists: {len(hardcoded_list_pattern)} found") |
|
|
| |
| |
| |
| |
| literal_data_writes = re.findall(r'f\.write\(["\'][a-z]+\\t[a-z]+\\t', script_code) |
| |
| literal_data_writes_ci = re.findall(r'f\.write\(["\'][a-z]+\\t[a-z]+\\t', script_code, re.IGNORECASE) |
| |
| header_writes = len(literal_data_writes_ci) - len(literal_data_writes) |
| if literal_data_writes: |
| check2_issues.append(f"Found {len(literal_data_writes)} hardcoded data write patterns (excluding headers)") |
| print(f" Literal data writes: {len(literal_data_writes)} actual data, {header_writes} header(s) (OK)") |
|
|
| |
| |
| |
| large_dict_pattern = re.findall(r'\{(?:["\'][a-z]{3,}["\']:\s*["\'][^"\']+["\'],?\s*){20,}\}', script_code, re.IGNORECASE) |
| if large_dict_pattern: |
| check2_issues.append(f"Found {len(large_dict_pattern)} suspiciously large embedded data dicts") |
| print(f" Large embedded dicts: {len(large_dict_pattern)} found") |
|
|
| |
| has_regex_parsing = "re.compile" in script_code or "re.search" in script_code or "re.findall" in script_code |
| has_html_parsing = "html.unescape" in script_code or "BeautifulSoup" in script_code or "<DT>" in script_code |
| if not (has_regex_parsing and has_html_parsing): |
| check2_issues.append("No HTML parsing logic found") |
| print(f" HTML parsing: regex={'Y' if has_regex_parsing else 'N'}, html_handling={'Y' if has_html_parsing else 'N'}") |
|
|
| |
| script_lines = script_code.count('\n') |
| print(f" Script length: {script_lines} lines") |
| if script_lines < 50: |
| check2_issues.append("Script suspiciously short for a real parser") |
|
|
| check2_pass = len(check2_issues) == 0 |
| if check2_issues: |
| for issue in check2_issues: |
| print(f" ISSUE: {issue}") |
|
|
| results["Check 2: Code Inspection"] = "PASS" if check2_pass else "FAIL" |
| print(f"\n RESULT: {'PASS' if check2_pass else 'FAIL'}") |
|
|
| |
| |
| |
| print("\n" + "=" * 70) |
| print("CHECK 3: Parse Verification (20 sampled entries)") |
| print("=" * 70) |
|
|
| |
| tsv_entries = [] |
| with open(EXTRACTED_TSV, "r", encoding="utf-8") as f: |
| reader = csv.DictReader(f, delimiter="\t") |
| for row in reader: |
| tsv_entries.append(row) |
|
|
| print(f" Loaded {len(tsv_entries)} TSV entries") |
|
|
| |
| audit_entries = [] |
| with open(AUDIT_JSONL, "r", encoding="utf-8") as f: |
| for line in f: |
| audit_entries.append(json.loads(line)) |
|
|
| print(f" Loaded {len(audit_entries)} audit entries") |
|
|
| |
| audit_by_word = {} |
| for ae in audit_entries: |
| audit_by_word[ae["word"]] = ae |
|
|
| |
| with open(RAW_HTML, "r", encoding="utf-8", errors="replace") as f: |
| raw_html_text = f.read() |
|
|
| |
| decoded_html = html.unescape(raw_html_text) |
|
|
| |
| sample_indices = sorted(random.sample(range(len(tsv_entries)), min(20, len(tsv_entries)))) |
| check3_failures = [] |
| check3_details = [] |
|
|
| for idx in sample_indices: |
| entry = tsv_entries[idx] |
| word = entry["Word"] |
|
|
| |
| audit_rec = audit_by_word.get(word) |
| if not audit_rec: |
| check3_failures.append(f" [{idx}] {word!r}: No audit record found") |
| check3_details.append({"idx": idx, "word": word, "status": "NO_AUDIT"}) |
| continue |
|
|
| |
| raw_dt = audit_rec.get("raw_dt", "") |
| word_raw = audit_rec.get("word_raw_avesta_org", "") |
|
|
| |
| |
| |
| found_in_html = False |
|
|
| |
| if raw_dt and raw_dt in decoded_html: |
| found_in_html = True |
|
|
| |
| if not found_in_html and word_raw: |
| |
| if word_raw in decoded_html: |
| found_in_html = True |
|
|
| |
| |
| |
| if not found_in_html and word_raw: |
| |
| if word_raw in raw_html_text: |
| found_in_html = True |
|
|
| |
| if not found_in_html and raw_dt: |
| |
| dt_word_part = raw_dt.split("[")[0].strip().lstrip(".") |
| dt_word_part = dt_word_part.strip() |
| if dt_word_part and dt_word_part in decoded_html: |
| found_in_html = True |
|
|
| status = "FOUND" if found_in_html else "NOT_FOUND" |
| detail = {"idx": idx, "word": word, "word_raw": word_raw, "raw_dt": raw_dt, "status": status} |
| check3_details.append(detail) |
|
|
| if found_in_html: |
| print(f" [{idx:4d}] {word:<25s} raw={word_raw!r:<25s} -> FOUND in HTML") |
| else: |
| check3_failures.append(f" [{idx:4d}] {word:<25s} raw={word_raw!r:<25s} -> NOT FOUND") |
| print(f" [{idx:4d}] {word:<25s} raw={word_raw!r:<25s} -> NOT FOUND ***") |
|
|
| check3_pass = len(check3_failures) == 0 |
| if check3_failures: |
| print(f"\n FAILURES ({len(check3_failures)}):") |
| for fail in check3_failures: |
| print(f" {fail}") |
|
|
| results["Check 3: Parse Verification"] = "PASS" if check3_pass else f"FAIL ({len(check3_failures)}/20 not found)" |
| print(f"\n RESULT: {'PASS' if check3_pass else 'FAIL'}") |
|
|
| |
| |
| |
| print("\n" + "=" * 70) |
| print("CHECK 4: Transformation Verification (10 sampled entries)") |
| print("=" * 70) |
|
|
| |
| sys.path.insert(0, str(PROJECT_ROOT / "scripts")) |
| sys.path.insert(0, str(PROJECT_ROOT / "cognate_pipeline" / "src")) |
| from transliteration_maps import transliterate, AVESTAN_MAP |
|
|
| |
| |
|
|
| def audit_normalize_chars(text: str) -> str: |
| """Independent re-implementation of character normalization.""" |
| replacements = { |
| "\u00e2": "\u0101", |
| "\u00ea": "\u0113", |
| "\u00ee": "\u012b", |
| "\u00f4": "\u014d", |
| "\u00fb": "\u016b", |
| "\u00e3": "\u0105", |
| "\u00e5": "\u0105", |
| "\u00f1": "\u0144", |
| "\u00fd": "y", |
| } |
| return "".join(replacements.get(ch, ch) for ch in text) |
|
|
|
|
| def audit_convert_digraphs(text: str) -> str: |
| """Independent re-implementation of digraph conversion.""" |
| digraphs = { |
| "ngh": "\u014Bh", |
| "ngv": "\u014B\u1D5B", |
| "ng": "\u014B", |
| "sh": "\u0161", |
| "zh": "\u017E", |
| "th": "\u03B8", |
| "dh": "\u03B4", |
| "hv": "x\u1D5B", |
| "kh": "\u03B3", |
| "h'": "h", |
| } |
| for digraph, replacement in sorted(digraphs.items(), key=lambda x: -len(x[0])): |
| text = text.replace(digraph, replacement) |
| return text |
|
|
|
|
| def audit_process_word(word_raw: str) -> str: |
| """Independent IPA computation for verification.""" |
| word = audit_normalize_chars(word_raw) |
| word = audit_convert_digraphs(word) |
| word = word.strip() |
| ipa = transliterate(word, "ave") |
| return ipa |
|
|
|
|
| |
| sample_indices_4 = sorted(random.sample(range(len(tsv_entries)), min(10, len(tsv_entries)))) |
| check4_failures = [] |
|
|
| for idx in sample_indices_4: |
| entry = tsv_entries[idx] |
| word = entry["Word"] |
| reported_ipa = entry["IPA"] |
|
|
| |
| audit_rec = audit_by_word.get(word) |
| if not audit_rec: |
| check4_failures.append(f" [{idx}] {word}: No audit record for raw word lookup") |
| continue |
|
|
| word_raw = audit_rec.get("word_raw_avesta_org", word) |
|
|
| |
| expected_ipa = audit_process_word(word_raw) |
|
|
| match = (expected_ipa == reported_ipa) |
|
|
| |
| |
| has_transformable = any(c in word_raw for c in ["\u00e2", "\u00ea", "\u00ee", "\u00f4", "\u00fb"]) |
| has_digraphs = any(d in word_raw for d in ["sh", "zh", "th", "dh"]) |
|
|
| if has_transformable or has_digraphs: |
| ipa_differs = (reported_ipa != word_raw) |
| if not ipa_differs: |
| check4_failures.append( |
| f" [{idx}] {word}: IPA={reported_ipa!r} EQUALS raw word {word_raw!r} " |
| f"(should differ due to transformable chars)") |
|
|
| if match: |
| print(f" [{idx:4d}] raw={word_raw!r:<25s} -> IPA={reported_ipa!r:<25s} MATCH") |
| else: |
| print(f" [{idx:4d}] raw={word_raw!r:<25s} -> reported={reported_ipa!r}, expected={expected_ipa!r} MISMATCH ***") |
| check4_failures.append( |
| f" [{idx}] {word}: reported IPA={reported_ipa!r} != expected={expected_ipa!r}") |
|
|
| check4_pass = len(check4_failures) == 0 |
| if check4_failures: |
| print(f"\n FAILURES ({len(check4_failures)}):") |
| for fail in check4_failures: |
| print(f" {fail}") |
|
|
| results["Check 4: Transformation Verification"] = "PASS" if check4_pass else f"FAIL ({len(check4_failures)} issues)" |
| print(f"\n RESULT: {'PASS' if check4_pass else 'FAIL'}") |
|
|
| |
| |
| |
| print("\n" + "=" * 70) |
| print("CHECK 5: Output Format") |
| print("=" * 70) |
|
|
| check5_issues = [] |
|
|
| |
| with open(EXTRACTED_TSV, "r", encoding="utf-8") as f: |
| header_line = f.readline().strip() |
| expected_header = "Word\tIPA\tSCA\tSource\tConcept_ID\tCognate_Set_ID" |
| if header_line == expected_header: |
| print(f" Header: CORRECT") |
| else: |
| check5_issues.append(f"Header mismatch: got {header_line!r}") |
| print(f" Header: WRONG (got {header_line!r})") |
|
|
| |
| non_avesta_sources = [e for e in tsv_entries if e["Source"] != "avesta_org"] |
| if non_avesta_sources: |
| check5_issues.append(f"{len(non_avesta_sources)} entries have wrong Source") |
| print(f" Source column: {len(non_avesta_sources)} entries NOT 'avesta_org'") |
| else: |
| print(f" Source column: ALL {len(tsv_entries)} entries are 'avesta_org'") |
|
|
| |
| words = [e["Word"] for e in tsv_entries] |
| word_counts = {} |
| for w in words: |
| word_counts[w] = word_counts.get(w, 0) + 1 |
| duplicates = {w: c for w, c in word_counts.items() if c > 1} |
| if duplicates: |
| check5_issues.append(f"{len(duplicates)} duplicate words found") |
| print(f" Duplicates: {len(duplicates)} duplicate words") |
| for w, c in list(duplicates.items())[:5]: |
| print(f" '{w}' appears {c} times") |
| else: |
| print(f" Duplicates: NONE (all {len(tsv_entries)} words unique)") |
|
|
| |
| entry_count = len(tsv_entries) |
| is_round = (entry_count % 100 == 0) or (entry_count % 1000 == 0) or (entry_count % 500 == 0) |
| print(f" Entry count: {entry_count} (suspiciously round: {'YES' if is_round else 'NO'})") |
| if is_round: |
| check5_issues.append(f"Entry count {entry_count} is suspiciously round") |
|
|
| |
| malformed_rows = 0 |
| with open(EXTRACTED_TSV, "r", encoding="utf-8") as f: |
| for i, line in enumerate(f): |
| if i == 0: |
| continue |
| fields = line.strip().split("\t") |
| if len(fields) != 6: |
| malformed_rows += 1 |
| if malformed_rows <= 3: |
| print(f" Row {i}: {len(fields)} fields (expected 6): {line.strip()[:80]}") |
| if malformed_rows: |
| check5_issues.append(f"{malformed_rows} malformed rows") |
| print(f" Malformed rows: {malformed_rows}") |
| else: |
| print(f" Row format: ALL rows have 6 fields") |
|
|
| |
| non_dash_concept = sum(1 for e in tsv_entries if e["Concept_ID"] != "-") |
| non_dash_cognate = sum(1 for e in tsv_entries if e["Cognate_Set_ID"] != "-") |
| print(f" Concept_ID='-': {len(tsv_entries) - non_dash_concept}/{len(tsv_entries)}") |
| print(f" Cognate_Set_ID='-': {len(tsv_entries) - non_dash_cognate}/{len(tsv_entries)}") |
|
|
| check5_pass = len(check5_issues) == 0 |
| if check5_issues: |
| for issue in check5_issues: |
| print(f" ISSUE: {issue}") |
|
|
| results["Check 5: Output Format"] = "PASS" if check5_pass else f"FAIL ({len(check5_issues)} issues)" |
| print(f"\n RESULT: {'PASS' if check5_pass else 'FAIL'}") |
|
|
| |
| |
| |
| print("\n" + "=" * 70) |
| print("CHECK 6: Audit Trail Integrity (20 sampled records)") |
| print("=" * 70) |
|
|
| check6_issues = [] |
|
|
| |
| required_fields = {"word", "word_raw_avesta_org", "ipa", "sca", "source", "raw_dt", "raw_dd"} |
| for i, rec in enumerate(audit_entries[:5]): |
| missing = required_fields - set(rec.keys()) |
| if missing: |
| check6_issues.append(f"Record {i} missing fields: {missing}") |
| print(f" Record {i}: MISSING {missing}") |
| else: |
| if i == 0: |
| print(f" Structure check: all required fields present") |
|
|
| |
| sample_indices_6 = sorted(random.sample(range(len(audit_entries)), min(20, len(audit_entries)))) |
| found_count = 0 |
| not_found_details = [] |
|
|
| for idx in sample_indices_6: |
| rec = audit_entries[idx] |
| raw_dt = rec.get("raw_dt", "") |
| raw_dd = rec.get("raw_dd", "") |
| word = rec.get("word", "") |
| word_raw = rec.get("word_raw_avesta_org", "") |
|
|
| |
| dt_found = False |
| dd_found = False |
|
|
| |
| if raw_dt: |
| |
| dt_search = raw_dt.lstrip(". ").strip() |
| if dt_search in decoded_html: |
| dt_found = True |
| |
| elif word_raw and word_raw in decoded_html: |
| dt_found = True |
|
|
| if raw_dd: |
| |
| |
| dd_search = raw_dd[:60].strip() |
| if dd_search in decoded_html: |
| dd_found = True |
| |
| elif len(raw_dd) > 15: |
| dd_short = raw_dd[:30].strip() |
| if dd_short in decoded_html: |
| dd_found = True |
|
|
| if dt_found: |
| found_count += 1 |
| print(f" [{idx:4d}] word={word!r:<20s} raw_dt found, raw_dd {'found' if dd_found else 'NOT found'}") |
| else: |
| dt_preview = repr(raw_dt[:50]) |
| not_found_details.append(f" [{idx:4d}] word={word!r}, raw_dt={dt_preview}") |
| print(f" [{idx:4d}] word={word!r:<20s} raw_dt NOT FOUND ***") |
|
|
| if not_found_details: |
| check6_issues.append(f"{len(not_found_details)}/20 raw_dt values not found in HTML") |
|
|
| |
| if len(audit_entries) != len(tsv_entries): |
| check6_issues.append( |
| f"Audit trail has {len(audit_entries)} records but TSV has {len(tsv_entries)} entries") |
| print(f" Count mismatch: audit={len(audit_entries)}, TSV={len(tsv_entries)}") |
| else: |
| print(f" Count match: {len(audit_entries)} audit records = {len(tsv_entries)} TSV entries") |
|
|
| |
| audit_words = set(ae["word"] for ae in audit_entries) |
| tsv_words = set(e["Word"] for e in tsv_entries) |
| only_in_audit = audit_words - tsv_words |
| only_in_tsv = tsv_words - audit_words |
| if only_in_audit or only_in_tsv: |
| check6_issues.append( |
| f"Word mismatch: {len(only_in_audit)} only in audit, {len(only_in_tsv)} only in TSV") |
| print(f" Word mismatch: {len(only_in_audit)} audit-only, {len(only_in_tsv)} TSV-only") |
| else: |
| print(f" Word sets match perfectly between audit and TSV") |
|
|
| check6_pass = len(check6_issues) == 0 |
| if check6_issues: |
| for issue in check6_issues: |
| print(f" ISSUE: {issue}") |
|
|
| results["Check 6: Audit Trail Integrity"] = "PASS" if check6_pass else f"FAIL ({len(check6_issues)} issues)" |
| print(f"\n RESULT: {'PASS' if check6_pass else 'FAIL'}") |
|
|
| |
| |
| |
| print("\n" + "=" * 70) |
| print("AUDIT REPORT SUMMARY") |
| print("=" * 70) |
|
|
| all_pass = True |
| for check_name, result in results.items(): |
| status_char = "PASS" if result == "PASS" else "FAIL" |
| if status_char == "FAIL": |
| all_pass = False |
| print(f" {check_name:<50s} {result}") |
|
|
| print("\n" + "-" * 70) |
| overall = "PASS -- Data extraction is LEGITIMATE" if all_pass else "FAIL -- Issues found (see above)" |
| print(f" OVERALL VERDICT: {overall}") |
| print("=" * 70) |
|
|