File size: 12,273 Bytes
26786e3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
#!/usr/bin/env python3
"""
Phase 2 verification: corrected URL patterns for eDiAna and deeper checks.
"""

import json
import os
import random
import time
import sys
import io
from pathlib import Path

sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8', errors='replace')
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding='utf-8', errors='replace')

import requests
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
from urllib.parse import quote

AUDIT_DIR = Path(r"C:\Users\alvin\hf-ancient-scripts\data\training\audit_trails")

HEADERS = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 "
                  "(KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
}
REQUEST_DELAY = 1.5
random.seed(42)


def load_audit_trail(filepath):
    entries = []
    with open(filepath, "r", encoding="utf-8") as f:
        for line in f:
            line = line.strip()
            if line:
                try:
                    entries.append(json.loads(line))
                except json.JSONDecodeError:
                    pass
    return entries


def check_url(url, timeout=15):
    try:
        resp = requests.get(url, headers=HEADERS, timeout=timeout, allow_redirects=True, verify=False)
        return resp.status_code, resp.ok, resp.text[:5000] if resp.ok else ""
    except Exception as e:
        return None, False, str(e)


# ============================================================
# 1. eDiAna with CORRECT URL pattern
# ============================================================
def verify_ediana_corrected():
    print("=" * 70)
    print("eDiAna VERIFICATION (Corrected URL: /dictionary.php?lemma=ID)")
    print("=" * 70)

    audit_files = sorted(AUDIT_DIR.glob("ediana_*.jsonl"))

    for af in audit_files:
        lang = af.stem.replace("ediana_", "")
        entries = load_audit_trail(af)
        if not entries:
            print(f"\n  --- {lang}: EMPTY ---")
            continue

        print(f"\n  --- Language: {lang} ({len(entries)} entries) ---")
        samples = random.sample(entries, min(5, len(entries)))

        for entry in samples:
            word = entry.get("word", "?")
            gloss = entry.get("gloss", "?")
            entry_id = entry.get("entry_id", "")

            if entry_id:
                url = f"https://ediana.gwi.uni-muenchen.de/dictionary.php?lemma={entry_id}"
                status, ok, text = check_url(url, timeout=15)

                # Check if the word appears on the page
                word_found = False
                if ok and text:
                    # Clean word for matching (remove special chars)
                    clean_word = word.replace("*", "").replace("-", "").replace("(", "").replace(")", "").strip()
                    if clean_word and len(clean_word) >= 2:
                        word_found = clean_word.lower() in text.lower()

                status_str = f"HTTP {status}"
                if ok:
                    status_str += f" | word_on_page={'YES' if word_found else 'NO'}"
                print(f"    id={entry_id:5s}  word={word[:25]:25s}  gloss={gloss[:30]:30s}  -> {status_str}")
                time.sleep(REQUEST_DELAY)


# ============================================================
# 2. Palaeolexicon deeper verification (content match)
# ============================================================
def verify_palaeolexicon_deep():
    print("\n" + "=" * 70)
    print("PALAEOLEXICON DEEP VERIFICATION (gloss + word match)")
    print("=" * 70)

    # Pick Etruscan (ett) for deep check
    entries = load_audit_trail(AUDIT_DIR / "palaeolexicon_ett.jsonl")
    print(f"\n  Etruscan: {len(entries)} entries")

    # Pick well-known Etruscan words
    known_words = ["ais", "clan", "sec", "huth", "avil", "tur", "lautni"]
    matches = [e for e in entries if e.get("word") in known_words]

    if not matches:
        # Just sample some
        matches = random.sample(entries, min(5, len(entries)))

    for entry in matches[:7]:
        word = entry.get("word", "?")
        gloss = entry.get("gloss", "?")
        word_id = entry.get("word_id")

        if word_id:
            url = f"https://www.palaeolexicon.com/Word/Show/{word_id}"
            status, ok, text = check_url(url, timeout=15)

            word_found = ok and word.lower() in text.lower()
            gloss_parts = [g.strip() for g in gloss.split(",")]
            gloss_found = ok and any(g.lower() in text.lower() for g in gloss_parts if len(g) > 2)

            print(f"    word={word:15s}  gloss={gloss:30s}  word_id={word_id}")
            print(f"      URL: {url}")
            print(f"      word_match={word_found}  gloss_match={gloss_found}  HTTP={status}")
            time.sleep(REQUEST_DELAY)


# ============================================================
# 3. TIR Raetica with corrected URL (tir.univie.ac.at)
# ============================================================
def verify_tir_corrected():
    print("\n" + "=" * 70)
    print("TIR RAETICA (Corrected URL: tir.univie.ac.at)")
    print("=" * 70)

    entries = load_audit_trail(AUDIT_DIR / "tir_raetica_xrr.jsonl")
    print(f"  {len(entries)} entries")

    # Check if wiki has a Word category
    cat_url = "https://tir.univie.ac.at/wiki/Category:Word"
    status, ok, text = check_url(cat_url)
    print(f"\n  Category:Word -> HTTP {status}")

    # Try some word pages
    samples = random.sample(entries, min(5, len(entries)))
    for entry in samples:
        word = entry.get("word", "?")
        gloss = entry.get("gloss", "?")

        # TIR wiki words might be under specific naming conventions
        # Try the raw word
        url = f"https://tir.univie.ac.at/wiki/{quote(word)}"
        status, ok, text = check_url(url, timeout=15)

        word_found = ok and word.lower() in text.lower() if ok else False
        status_str = f"HTTP {status}"
        if ok:
            # Check if the page is a real content page or just a search redirect
            is_search = "There were no results" in text or "Search results" in text
            status_str += f" | content={'SEARCH_REDIRECT' if is_search else 'CONTENT_PAGE'}"

        print(f"    word={word:20s}  gloss={gloss[:25]:25s}  -> {status_str}")
        time.sleep(REQUEST_DELAY)


# ============================================================
# 4. ORACC/eCUT glossary verification
# ============================================================
def verify_oracc_deep():
    print("\n" + "=" * 70)
    print("ORACC/eCUT DEEP VERIFICATION")
    print("=" * 70)

    entries = load_audit_trail(AUDIT_DIR / "oracc_ecut_xur.jsonl")
    print(f"  {len(entries)} entries")

    # Try various ORACC URL patterns for eCUT glossaries
    urls_to_try = [
        "https://oracc.museum.upenn.edu/ecut/",
        "https://oracc.museum.upenn.edu/ecut/corpus",
        "https://oracc.museum.upenn.edu/ecut/pager",
        # Try the Urartian-specific glossary
        "https://oracc.museum.upenn.edu/ecut/xur/index.html",
        "https://oracc.museum.upenn.edu/ecut/glossary/xur",
    ]

    for url in urls_to_try:
        status, ok, text = check_url(url, timeout=15)
        has_content = len(text) > 500 if ok else False
        print(f"  {url}")
        print(f"    -> HTTP {status} | content={'YES' if has_content else 'NO/SMALL'}")
        time.sleep(REQUEST_DELAY)

    # Also try the main ORACC search
    # ORACC has a JSON API: /ecut/cbd/xxx/xxx.json
    json_urls = [
        "https://oracc.museum.upenn.edu/ecut/signlist",
    ]
    for url in json_urls:
        status, ok, text = check_url(url, timeout=15)
        print(f"  {url}")
        print(f"    -> HTTP {status}")
        time.sleep(REQUEST_DELAY)

    print(f"\n  Sample Urartian words in audit trail:")
    samples = random.sample(entries, min(10, len(entries)))
    for entry in samples:
        word = entry.get("word", "?")
        gloss = entry.get("gloss", "?")
        print(f"    word={word:20s}  gloss={gloss}")


# ============================================================
# 5. Avesta.org data quality deep check
# ============================================================
def verify_avesta_deep():
    print("\n" + "=" * 70)
    print("AVESTA.ORG DEEP VERIFICATION")
    print("=" * 70)

    entries = load_audit_trail(AUDIT_DIR / "avesta_org_ave.jsonl")

    # Check the dictionary page for known Avestan words
    dict_url = "https://avesta.org/avdict/avdict.htm"
    status, ok, text = check_url(dict_url, timeout=15)
    print(f"  Dictionary page: HTTP {status} | length={len(text) if ok else 0} chars")

    # Check if audit trail words appear in the dictionary
    real_entries = [e for e in entries if e.get("word") not in ("NOTE", "Example", "swift", "AVESTA")]
    print(f"\n  Checking {len(real_entries)} entries against dictionary page:")

    found_count = 0
    for entry in real_entries:
        word = entry.get("word", "")
        gloss = entry.get("gloss", "")
        if ok and word and len(word) >= 2:
            in_dict = word.lower() in text.lower()
            if in_dict:
                found_count += 1
            print(f"    word={word:20s}  gloss={gloss:20s}  in_dictionary={'YES' if in_dict else 'NO'}")

    print(f"\n  Found {found_count}/{len(real_entries)} words in dictionary page")

    # Critical check: the entries look like they were scraped as word-pairs
    # e.g., word="aiwi" gloss="druxt" -- this looks like the scraper grabbed
    # consecutive words from a text, not word-definition pairs
    print(f"\n  DATA QUALITY ASSESSMENT:")
    print(f"  The avesta.org audit trail entries have a suspicious pattern:")
    print(f"  - 'word' and 'gloss' fields often look like adjacent Avestan words,")
    print(f"    not word-definition pairs.")
    print(f"  - Examples: word='aiwi' gloss='druxt' (both are Avestan words)")
    print(f"  - Examples: word='aurvat' gloss='aspa' (both are Avestan words)")
    print(f"  - The 'gloss' field appears to contain the NEXT word in the text,")
    print(f"    not an English translation.")
    print(f"  - NOTE: avesta.org data is NOT used in the ave.tsv lexicon (which uses wiktionary)")
    print(f"  - The audit trail exists but was apparently not imported into the lexicon")


# ============================================================
# 6. Wiktionary expansion deep verification
# ============================================================
def verify_wiktionary_deep():
    print("\n" + "=" * 70)
    print("WIKTIONARY EXPANSION DEEP VERIFICATION")
    print("=" * 70)

    # Check PIE reconstructions
    entries = load_audit_trail(AUDIT_DIR / "wiktionary_expansion_ine-pro.jsonl")
    print(f"\n  PIE (ine-pro): {len(entries)} entries")

    samples = random.sample(entries, min(5, len(entries)))
    for entry in samples:
        word = entry.get("word", "?")
        page_title = entry.get("page_title", "")
        if page_title:
            url = f"https://en.wiktionary.org/wiki/{quote(page_title)}"
            status, ok, text = check_url(url, timeout=15)
            word_found = ok and word in text
            print(f"    word={word:20s}  page={page_title[:40]:40s}  HTTP={status}  found={word_found}")
            time.sleep(REQUEST_DELAY)

    # Check Ugaritic
    entries = load_audit_trail(AUDIT_DIR / "wiktionary_expansion_uga.jsonl")
    print(f"\n  Ugaritic (uga): {len(entries)} entries")

    samples = random.sample(entries, min(5, len(entries)))
    for entry in samples:
        word = entry.get("word", "?")
        page_title = entry.get("page_title", "")
        if page_title:
            url = f"https://en.wiktionary.org/wiki/{quote(page_title)}"
            status, ok, text = check_url(url, timeout=15)
            print(f"    word={word:20s}  page={page_title[:40]:40s}  HTTP={status}")
            time.sleep(REQUEST_DELAY)


def main():
    print("PHASE 2: DEEP SOURCE VERIFICATION")
    print("=" * 70)
    print(f"Date: {time.strftime('%Y-%m-%d %H:%M:%S')}\n")

    verify_ediana_corrected()
    verify_palaeolexicon_deep()
    verify_tir_corrected()
    verify_oracc_deep()
    verify_avesta_deep()
    verify_wiktionary_deep()

    print("\n" + "=" * 70)
    print("PHASE 2 COMPLETE")
    print("=" * 70)


if __name__ == "__main__":
    main()