ancient-scripts-datasets / scripts /audit_avesta_org.py
Alvin
Add complete dataset: all sources, metadata, scripts, docs, and phylo enrichment
26786e3
"""
Adversarial audit of Team A's avesta.org extraction.
Checks 1-6: Source verification, code inspection, parse verification,
transformation verification, output format, audit trail integrity.
"""
from __future__ import annotations
import csv
import hashlib
import html
import json
import os
import random
import re
import sys
import unicodedata
import urllib.request
from pathlib import Path
# Force UTF-8 on Windows
if sys.platform == "win32":
sys.stdout.reconfigure(encoding="utf-8", errors="replace")
sys.stderr.reconfigure(encoding="utf-8", errors="replace")
PROJECT_ROOT = Path(__file__).resolve().parent.parent
RAW_HTML = PROJECT_ROOT / "data" / "training" / "raw" / "avesta_org_raw.html"
EXTRACTED_TSV = PROJECT_ROOT / "data" / "training" / "raw" / "avesta_org_extracted.tsv"
AUDIT_JSONL = PROJECT_ROOT / "data" / "training" / "audit_trails" / "avesta_org_ave_new.jsonl"
SCRIPT_PATH = PROJECT_ROOT / "scripts" / "scrape_avesta_org.py"
URL = "https://avesta.org/avdict/avdict.htm"
random.seed(42) # Reproducible sampling
results = {}
# ===========================================================================
# CHECK 1: Source Verification
# ===========================================================================
print("=" * 70)
print("CHECK 1: Source Verification")
print("=" * 70)
try:
# Read the cached HTML
with open(RAW_HTML, "rb") as f:
cached_bytes = f.read()
cached_hash = hashlib.sha256(cached_bytes).hexdigest()
cached_size = len(cached_bytes)
print(f" Cached HTML size: {cached_size:,} bytes")
print(f" Cached SHA256: {cached_hash[:16]}...")
# Re-fetch from the live URL
print(f" Re-fetching {URL} ...")
req = urllib.request.Request(URL, headers={"User-Agent": "Mozilla/5.0 (audit scraper)"})
resp = urllib.request.urlopen(req, timeout=60)
live_bytes = resp.read()
live_hash = hashlib.sha256(live_bytes).hexdigest()
live_size = len(live_bytes)
print(f" Live HTML size: {live_size:,} bytes")
print(f" Live SHA256: {live_hash[:16]}...")
# Compare
if cached_hash == live_hash:
print(" EXACT MATCH: Cached HTML is byte-identical to live source.")
check1_pass = True
else:
# Allow for minor differences (page may have been updated)
# But check for structural similarity
cached_text = cached_bytes.decode("utf-8", errors="replace")
live_text = live_bytes.decode("utf-8", errors="replace")
# Check that the core dictionary content is present in both
has_title_cached = "Avestan Dictionary" in cached_text
has_title_live = "Avestan Dictionary" in live_text
has_dcta_cached = 'NAME="dcta"' in cached_text or 'name="dcta"' in cached_text.lower()
has_dcta_live = 'NAME="dcta"' in live_text or 'name="dcta"' in live_text.lower()
has_dt_cached = "<DT>" in cached_text
has_dt_live = "<DT>" in live_text
# Count DT entries in both
dt_count_cached = cached_text.upper().count("<DT>")
dt_count_live = live_text.upper().count("<DT>")
print(f" NOT byte-identical (page may have been updated)")
print(f" Cached: title={'Y' if has_title_cached else 'N'}, dcta={'Y' if has_dcta_cached else 'N'}, DT count={dt_count_cached}")
print(f" Live: title={'Y' if has_title_live else 'N'}, dcta={'Y' if has_dcta_live else 'N'}, DT count={dt_count_live}")
# Structural match: same title, same anchor, similar DT count
if (has_title_cached and has_title_live and
has_dcta_cached and has_dcta_live and
has_dt_cached and has_dt_live and
abs(dt_count_cached - dt_count_live) < 50):
print(" STRUCTURAL MATCH: Both are legitimate Avestan dictionary pages.")
check1_pass = True
else:
print(" FAIL: Significant structural differences between cached and live HTML")
check1_pass = False
# Verify it's actually a dictionary (not a random page)
cached_text = cached_bytes.decode("utf-8", errors="replace")
has_author = "Joseph H. Peterson" in cached_text
has_dict_entries = cached_text.upper().count("<DT>") > 100
has_avestan_words = any(w in cached_text for w in ["baodh", "ahura", "mazda", "zarathu"])
print(f" Content checks: author={'Y' if has_author else 'N'}, entries={'Y' if has_dict_entries else 'N'}, avestan_words={'Y' if has_avestan_words else 'N'}")
if not (has_author and has_dict_entries and has_avestan_words):
print(" FAIL: Cached HTML does not look like a legitimate Avestan dictionary")
check1_pass = False
except Exception as e:
print(f" ERROR: {e}")
check1_pass = False
results["Check 1: Source Verification"] = "PASS" if check1_pass else "FAIL"
print(f"\n RESULT: {'PASS' if check1_pass else 'FAIL'}")
# ===========================================================================
# CHECK 2: Code Inspection
# ===========================================================================
print("\n" + "=" * 70)
print("CHECK 2: Code Inspection")
print("=" * 70)
with open(SCRIPT_PATH, "r", encoding="utf-8") as f:
script_code = f.read()
check2_issues = []
# 2a: Must use urllib or requests for HTTP
has_urllib = "urllib.request" in script_code
has_requests = "import requests" in script_code
if not (has_urllib or has_requests):
check2_issues.append("No HTTP library import found (urllib or requests)")
print(f" HTTP library: urllib={'Y' if has_urllib else 'N'}, requests={'Y' if has_requests else 'N'}")
# 2b: Check for actual URL fetch call
has_urlopen = "urlopen" in script_code or "requests.get" in script_code
if not has_urlopen:
check2_issues.append("No urlopen() or requests.get() call found")
print(f" URL fetch call: {'Y' if has_urlopen else 'N'}")
# 2c: Check for hardcoded word lists (suspiciously long string literals or list literals)
# Look for patterns like: ["word1", "word2", ...] with more than 10 items
hardcoded_list_pattern = re.findall(r'\[(?:"[^"]+",?\s*){10,}\]', script_code)
if hardcoded_list_pattern:
check2_issues.append(f"Found {len(hardcoded_list_pattern)} suspicious hardcoded word lists")
print(f" Hardcoded word lists: {len(hardcoded_list_pattern)} found")
# 2d: Check for f.write with literal tab-separated data rows
# Pattern: f.write("someword\tsomeipa\t...") -- but exclude header writes
# A header write like f.write("Word\tIPA\tSCA\t...") is legitimate; we look for
# lowercase word-like content that looks like actual dictionary entries
literal_data_writes = re.findall(r'f\.write\(["\'][a-z]+\\t[a-z]+\\t', script_code)
# Exclude the header line (which contains "Word\tIPA\tSCA..." -- capitalized field names)
literal_data_writes_ci = re.findall(r'f\.write\(["\'][a-z]+\\t[a-z]+\\t', script_code, re.IGNORECASE)
# The difference tells us about actual lowercase data writes vs header writes
header_writes = len(literal_data_writes_ci) - len(literal_data_writes)
if literal_data_writes:
check2_issues.append(f"Found {len(literal_data_writes)} hardcoded data write patterns (excluding headers)")
print(f" Literal data writes: {len(literal_data_writes)} actual data, {header_writes} header(s) (OK)")
# 2e: Check for embedded data dictionaries with more than 20 word entries
# (DIGRAPH_MAP and AVESTA_ORG_CHAR_MAP are legitimate small maps)
# Look for dict literals with word-like keys
large_dict_pattern = re.findall(r'\{(?:["\'][a-z]{3,}["\']:\s*["\'][^"\']+["\'],?\s*){20,}\}', script_code, re.IGNORECASE)
if large_dict_pattern:
check2_issues.append(f"Found {len(large_dict_pattern)} suspiciously large embedded data dicts")
print(f" Large embedded dicts: {len(large_dict_pattern)} found")
# 2f: Verify the script parses HTML, not just generates data
has_regex_parsing = "re.compile" in script_code or "re.search" in script_code or "re.findall" in script_code
has_html_parsing = "html.unescape" in script_code or "BeautifulSoup" in script_code or "<DT>" in script_code
if not (has_regex_parsing and has_html_parsing):
check2_issues.append("No HTML parsing logic found")
print(f" HTML parsing: regex={'Y' if has_regex_parsing else 'N'}, html_handling={'Y' if has_html_parsing else 'N'}")
# 2g: Check line count - a data-fabrication script would be much shorter
script_lines = script_code.count('\n')
print(f" Script length: {script_lines} lines")
if script_lines < 50:
check2_issues.append("Script suspiciously short for a real parser")
check2_pass = len(check2_issues) == 0
if check2_issues:
for issue in check2_issues:
print(f" ISSUE: {issue}")
results["Check 2: Code Inspection"] = "PASS" if check2_pass else "FAIL"
print(f"\n RESULT: {'PASS' if check2_pass else 'FAIL'}")
# ===========================================================================
# CHECK 3: Parse Verification (sample 20 entries)
# ===========================================================================
print("\n" + "=" * 70)
print("CHECK 3: Parse Verification (20 sampled entries)")
print("=" * 70)
# Load TSV entries
tsv_entries = []
with open(EXTRACTED_TSV, "r", encoding="utf-8") as f:
reader = csv.DictReader(f, delimiter="\t")
for row in reader:
tsv_entries.append(row)
print(f" Loaded {len(tsv_entries)} TSV entries")
# Load audit trail (for raw_dt values)
audit_entries = []
with open(AUDIT_JSONL, "r", encoding="utf-8") as f:
for line in f:
audit_entries.append(json.loads(line))
print(f" Loaded {len(audit_entries)} audit entries")
# Build lookup from word -> audit record
audit_by_word = {}
for ae in audit_entries:
audit_by_word[ae["word"]] = ae
# Load raw HTML
with open(RAW_HTML, "r", encoding="utf-8", errors="replace") as f:
raw_html_text = f.read()
# HTML-unescape for comparison
decoded_html = html.unescape(raw_html_text)
# Sample 20 random entries from TSV
sample_indices = sorted(random.sample(range(len(tsv_entries)), min(20, len(tsv_entries))))
check3_failures = []
check3_details = []
for idx in sample_indices:
entry = tsv_entries[idx]
word = entry["Word"]
# Find the corresponding audit record
audit_rec = audit_by_word.get(word)
if not audit_rec:
check3_failures.append(f" [{idx}] {word!r}: No audit record found")
check3_details.append({"idx": idx, "word": word, "status": "NO_AUDIT"})
continue
# Get the raw_dt from audit (what was in the HTML)
raw_dt = audit_rec.get("raw_dt", "")
word_raw = audit_rec.get("word_raw_avesta_org", "")
# Search for word_raw in the decoded HTML content
# The word_raw should appear in a DT context
# We need to handle that the HTML uses entities like &ocirc; that get decoded
found_in_html = False
# Strategy 1: Search for raw_dt text in decoded HTML
if raw_dt and raw_dt in decoded_html:
found_in_html = True
# Strategy 2: Search for word_raw in decoded HTML (may have been transformed)
if not found_in_html and word_raw:
# Check if word_raw appears anywhere in the decoded HTML
if word_raw in decoded_html:
found_in_html = True
# Strategy 3: For words that use digraphs (dh -> delta, sh -> s-hacek),
# the word_raw in the audit trail is the avesta.org form BEFORE our normalization
# So search for word_raw in the original HTML (before unescape too)
if not found_in_html and word_raw:
# HTML entities version - convert back
if word_raw in raw_html_text:
found_in_html = True
# Strategy 4: Search for the raw_dt content which should appear verbatim
if not found_in_html and raw_dt:
# raw_dt might have [root] stripped, search for just the word part
dt_word_part = raw_dt.split("[")[0].strip().lstrip(".")
dt_word_part = dt_word_part.strip()
if dt_word_part and dt_word_part in decoded_html:
found_in_html = True
status = "FOUND" if found_in_html else "NOT_FOUND"
detail = {"idx": idx, "word": word, "word_raw": word_raw, "raw_dt": raw_dt, "status": status}
check3_details.append(detail)
if found_in_html:
print(f" [{idx:4d}] {word:<25s} raw={word_raw!r:<25s} -> FOUND in HTML")
else:
check3_failures.append(f" [{idx:4d}] {word:<25s} raw={word_raw!r:<25s} -> NOT FOUND")
print(f" [{idx:4d}] {word:<25s} raw={word_raw!r:<25s} -> NOT FOUND ***")
check3_pass = len(check3_failures) == 0
if check3_failures:
print(f"\n FAILURES ({len(check3_failures)}):")
for fail in check3_failures:
print(f" {fail}")
results["Check 3: Parse Verification"] = "PASS" if check3_pass else f"FAIL ({len(check3_failures)}/20 not found)"
print(f"\n RESULT: {'PASS' if check3_pass else 'FAIL'}")
# ===========================================================================
# CHECK 4: Transformation Verification (sample 10 entries)
# ===========================================================================
print("\n" + "=" * 70)
print("CHECK 4: Transformation Verification (10 sampled entries)")
print("=" * 70)
# Import the transliteration module
sys.path.insert(0, str(PROJECT_ROOT / "scripts"))
sys.path.insert(0, str(PROJECT_ROOT / "cognate_pipeline" / "src"))
from transliteration_maps import transliterate, AVESTAN_MAP
# Also import the normalization functions from the scraper
# We'll re-implement them here to verify independently
def audit_normalize_chars(text: str) -> str:
"""Independent re-implementation of character normalization."""
replacements = {
"\u00e2": "\u0101", # a-circumflex -> a-macron (long a)
"\u00ea": "\u0113", # e-circumflex -> e-macron
"\u00ee": "\u012b", # i-circumflex -> i-macron
"\u00f4": "\u014d", # o-circumflex -> o-macron
"\u00fb": "\u016b", # u-circumflex -> u-macron
"\u00e3": "\u0105", # a-tilde -> a-ogonek (nasalized)
"\u00e5": "\u0105", # a-ring -> a-ogonek
"\u00f1": "\u0144", # n-tilde -> n-acute
"\u00fd": "y", # y-acute -> y
}
return "".join(replacements.get(ch, ch) for ch in text)
def audit_convert_digraphs(text: str) -> str:
"""Independent re-implementation of digraph conversion."""
digraphs = {
"ngh": "\u014Bh", # ng + h
"ngv": "\u014B\u1D5B", # ng + labialized
"ng": "\u014B", # velar nasal
"sh": "\u0161", # s-hacek
"zh": "\u017E", # z-hacek
"th": "\u03B8", # theta
"dh": "\u03B4", # delta
"hv": "x\u1D5B", # labialized x
"kh": "\u03B3", # gamma
"h'": "h",
}
for digraph, replacement in sorted(digraphs.items(), key=lambda x: -len(x[0])):
text = text.replace(digraph, replacement)
return text
def audit_process_word(word_raw: str) -> str:
"""Independent IPA computation for verification."""
word = audit_normalize_chars(word_raw)
word = audit_convert_digraphs(word)
word = word.strip()
ipa = transliterate(word, "ave")
return ipa
# Sample 10 entries for transformation check
sample_indices_4 = sorted(random.sample(range(len(tsv_entries)), min(10, len(tsv_entries))))
check4_failures = []
for idx in sample_indices_4:
entry = tsv_entries[idx]
word = entry["Word"]
reported_ipa = entry["IPA"]
# Get the raw word from audit trail
audit_rec = audit_by_word.get(word)
if not audit_rec:
check4_failures.append(f" [{idx}] {word}: No audit record for raw word lookup")
continue
word_raw = audit_rec.get("word_raw_avesta_org", word)
# Compute IPA independently
expected_ipa = audit_process_word(word_raw)
match = (expected_ipa == reported_ipa)
# Check that IPA differs from word for entries where map should transform
# (e.g., words with 'sh' should get s-hacek, words with long vowels should get colon)
has_transformable = any(c in word_raw for c in ["\u00e2", "\u00ea", "\u00ee", "\u00f4", "\u00fb"])
has_digraphs = any(d in word_raw for d in ["sh", "zh", "th", "dh"])
if has_transformable or has_digraphs:
ipa_differs = (reported_ipa != word_raw)
if not ipa_differs:
check4_failures.append(
f" [{idx}] {word}: IPA={reported_ipa!r} EQUALS raw word {word_raw!r} "
f"(should differ due to transformable chars)")
if match:
print(f" [{idx:4d}] raw={word_raw!r:<25s} -> IPA={reported_ipa!r:<25s} MATCH")
else:
print(f" [{idx:4d}] raw={word_raw!r:<25s} -> reported={reported_ipa!r}, expected={expected_ipa!r} MISMATCH ***")
check4_failures.append(
f" [{idx}] {word}: reported IPA={reported_ipa!r} != expected={expected_ipa!r}")
check4_pass = len(check4_failures) == 0
if check4_failures:
print(f"\n FAILURES ({len(check4_failures)}):")
for fail in check4_failures:
print(f" {fail}")
results["Check 4: Transformation Verification"] = "PASS" if check4_pass else f"FAIL ({len(check4_failures)} issues)"
print(f"\n RESULT: {'PASS' if check4_pass else 'FAIL'}")
# ===========================================================================
# CHECK 5: Output Format
# ===========================================================================
print("\n" + "=" * 70)
print("CHECK 5: Output Format")
print("=" * 70)
check5_issues = []
# 5a: Verify header
with open(EXTRACTED_TSV, "r", encoding="utf-8") as f:
header_line = f.readline().strip()
expected_header = "Word\tIPA\tSCA\tSource\tConcept_ID\tCognate_Set_ID"
if header_line == expected_header:
print(f" Header: CORRECT")
else:
check5_issues.append(f"Header mismatch: got {header_line!r}")
print(f" Header: WRONG (got {header_line!r})")
# 5b: Verify Source column is "avesta_org" for ALL entries
non_avesta_sources = [e for e in tsv_entries if e["Source"] != "avesta_org"]
if non_avesta_sources:
check5_issues.append(f"{len(non_avesta_sources)} entries have wrong Source")
print(f" Source column: {len(non_avesta_sources)} entries NOT 'avesta_org'")
else:
print(f" Source column: ALL {len(tsv_entries)} entries are 'avesta_org'")
# 5c: Check for duplicates (by Word column)
words = [e["Word"] for e in tsv_entries]
word_counts = {}
for w in words:
word_counts[w] = word_counts.get(w, 0) + 1
duplicates = {w: c for w, c in word_counts.items() if c > 1}
if duplicates:
check5_issues.append(f"{len(duplicates)} duplicate words found")
print(f" Duplicates: {len(duplicates)} duplicate words")
for w, c in list(duplicates.items())[:5]:
print(f" '{w}' appears {c} times")
else:
print(f" Duplicates: NONE (all {len(tsv_entries)} words unique)")
# 5d: Verify entry count is not suspiciously round
entry_count = len(tsv_entries)
is_round = (entry_count % 100 == 0) or (entry_count % 1000 == 0) or (entry_count % 500 == 0)
print(f" Entry count: {entry_count} (suspiciously round: {'YES' if is_round else 'NO'})")
if is_round:
check5_issues.append(f"Entry count {entry_count} is suspiciously round")
# 5e: Check all rows have correct number of fields
malformed_rows = 0
with open(EXTRACTED_TSV, "r", encoding="utf-8") as f:
for i, line in enumerate(f):
if i == 0:
continue # skip header
fields = line.strip().split("\t")
if len(fields) != 6:
malformed_rows += 1
if malformed_rows <= 3:
print(f" Row {i}: {len(fields)} fields (expected 6): {line.strip()[:80]}")
if malformed_rows:
check5_issues.append(f"{malformed_rows} malformed rows")
print(f" Malformed rows: {malformed_rows}")
else:
print(f" Row format: ALL rows have 6 fields")
# 5f: Check Concept_ID and Cognate_Set_ID are "-" (expected for this source)
non_dash_concept = sum(1 for e in tsv_entries if e["Concept_ID"] != "-")
non_dash_cognate = sum(1 for e in tsv_entries if e["Cognate_Set_ID"] != "-")
print(f" Concept_ID='-': {len(tsv_entries) - non_dash_concept}/{len(tsv_entries)}")
print(f" Cognate_Set_ID='-': {len(tsv_entries) - non_dash_cognate}/{len(tsv_entries)}")
check5_pass = len(check5_issues) == 0
if check5_issues:
for issue in check5_issues:
print(f" ISSUE: {issue}")
results["Check 5: Output Format"] = "PASS" if check5_pass else f"FAIL ({len(check5_issues)} issues)"
print(f"\n RESULT: {'PASS' if check5_pass else 'FAIL'}")
# ===========================================================================
# CHECK 6: Audit Trail Integrity
# ===========================================================================
print("\n" + "=" * 70)
print("CHECK 6: Audit Trail Integrity (20 sampled records)")
print("=" * 70)
check6_issues = []
# 6a: Verify JSONL structure
required_fields = {"word", "word_raw_avesta_org", "ipa", "sca", "source", "raw_dt", "raw_dd"}
for i, rec in enumerate(audit_entries[:5]):
missing = required_fields - set(rec.keys())
if missing:
check6_issues.append(f"Record {i} missing fields: {missing}")
print(f" Record {i}: MISSING {missing}")
else:
if i == 0:
print(f" Structure check: all required fields present")
# 6b: Sample 20 audit records and verify raw_dt/raw_dd appear in HTML
sample_indices_6 = sorted(random.sample(range(len(audit_entries)), min(20, len(audit_entries))))
found_count = 0
not_found_details = []
for idx in sample_indices_6:
rec = audit_entries[idx]
raw_dt = rec.get("raw_dt", "")
raw_dd = rec.get("raw_dd", "")
word = rec.get("word", "")
word_raw = rec.get("word_raw_avesta_org", "")
# The raw_dt should appear in the decoded HTML
dt_found = False
dd_found = False
# raw_dt may be like "adha [-]" which appears in the decoded HTML
if raw_dt:
# For continuation entries, raw_dt starts with "... "
dt_search = raw_dt.lstrip(". ").strip()
if dt_search in decoded_html:
dt_found = True
# Also try the word_raw directly
elif word_raw and word_raw in decoded_html:
dt_found = True
if raw_dd:
# DD content should appear in the decoded HTML
# Take the first 30 chars of the DD for search (may have trailing junk)
dd_search = raw_dd[:60].strip()
if dd_search in decoded_html:
dd_found = True
# Try shorter prefix
elif len(raw_dd) > 15:
dd_short = raw_dd[:30].strip()
if dd_short in decoded_html:
dd_found = True
if dt_found:
found_count += 1
print(f" [{idx:4d}] word={word!r:<20s} raw_dt found, raw_dd {'found' if dd_found else 'NOT found'}")
else:
dt_preview = repr(raw_dt[:50])
not_found_details.append(f" [{idx:4d}] word={word!r}, raw_dt={dt_preview}")
print(f" [{idx:4d}] word={word!r:<20s} raw_dt NOT FOUND ***")
if not_found_details:
check6_issues.append(f"{len(not_found_details)}/20 raw_dt values not found in HTML")
# 6c: Verify audit count matches TSV count
if len(audit_entries) != len(tsv_entries):
check6_issues.append(
f"Audit trail has {len(audit_entries)} records but TSV has {len(tsv_entries)} entries")
print(f" Count mismatch: audit={len(audit_entries)}, TSV={len(tsv_entries)}")
else:
print(f" Count match: {len(audit_entries)} audit records = {len(tsv_entries)} TSV entries")
# 6d: Verify words match between audit and TSV
audit_words = set(ae["word"] for ae in audit_entries)
tsv_words = set(e["Word"] for e in tsv_entries)
only_in_audit = audit_words - tsv_words
only_in_tsv = tsv_words - audit_words
if only_in_audit or only_in_tsv:
check6_issues.append(
f"Word mismatch: {len(only_in_audit)} only in audit, {len(only_in_tsv)} only in TSV")
print(f" Word mismatch: {len(only_in_audit)} audit-only, {len(only_in_tsv)} TSV-only")
else:
print(f" Word sets match perfectly between audit and TSV")
check6_pass = len(check6_issues) == 0
if check6_issues:
for issue in check6_issues:
print(f" ISSUE: {issue}")
results["Check 6: Audit Trail Integrity"] = "PASS" if check6_pass else f"FAIL ({len(check6_issues)} issues)"
print(f"\n RESULT: {'PASS' if check6_pass else 'FAIL'}")
# ===========================================================================
# FINAL REPORT
# ===========================================================================
print("\n" + "=" * 70)
print("AUDIT REPORT SUMMARY")
print("=" * 70)
all_pass = True
for check_name, result in results.items():
status_char = "PASS" if result == "PASS" else "FAIL"
if status_char == "FAIL":
all_pass = False
print(f" {check_name:<50s} {result}")
print("\n" + "-" * 70)
overall = "PASS -- Data extraction is LEGITIMATE" if all_pass else "FAIL -- Issues found (see above)"
print(f" OVERALL VERDICT: {overall}")
print("=" * 70)