ancient-scripts-datasets / audit_sources.py
Alvin
Add complete dataset: all sources, metadata, scripts, docs, and phylo enrichment
26786e3
"""
Exhaustive audit of Source values across ALL 1,135 lexicon TSV files.
Checks EVERY ROW in EVERY FILE.
"""
import csv
import os
import sys
from collections import defaultdict, Counter
LEXICON_DIR = r"C:\Users\alvin\hf-ancient-scripts\data\training\lexicons"
VALID_SOURCES = {
"abvd", "wikipron", "northeuralex", "wold", "wiktionary", "wiktionary_cat",
"sinotibetan", "palaeolexicon", "ediana", "oracc_ecut", "tir_raetica",
"kaikki", "kassian2010", "kassian2010_basic", "seed", "wikipedia", "avesta_org"
}
# Counters
total_entries = 0
valid_source_count = 0
empty_source_count = 0
dash_source_count = 0
unknown_source_count = 0
whitespace_only_count = 0
# Track all unique source values
all_sources = Counter()
# Track unknown source values and where they appear
unknown_sources = defaultdict(list) # source_val -> [(file, row_num, word)]
# Track files with problems
files_with_empty = defaultdict(list) # file -> [(row_num, word, ipa)]
files_with_dash = defaultdict(list) # file -> [(row_num, word, ipa)]
files_with_unknown = defaultdict(list) # file -> [(row_num, word, source_val)]
# Per-file stats
file_stats = {} # file -> {total, valid, empty, dash, unknown}
# Files with no Source column at all
files_missing_source_col = []
tsv_files = sorted([f for f in os.listdir(LEXICON_DIR) if f.endswith(".tsv")])
print(f"Found {len(tsv_files)} TSV files to audit.\n")
for fname in tsv_files:
fpath = os.path.join(LEXICON_DIR, fname)
f_total = 0
f_valid = 0
f_empty = 0
f_dash = 0
f_unknown = 0
f_whitespace = 0
with open(fpath, "r", encoding="utf-8") as f:
reader = csv.DictReader(f, delimiter="\t")
# Check if Source column exists
if reader.fieldnames is None or "Source" not in reader.fieldnames:
files_missing_source_col.append(fname)
# Still count rows
for row in reader:
total_entries += 1
f_total += 1
empty_source_count += 1
f_empty += 1
file_stats[fname] = {"total": f_total, "valid": 0, "empty": f_total, "dash": 0, "unknown": 0, "whitespace": 0}
continue
for row_num, row in enumerate(reader, start=2): # row 1 is header
total_entries += 1
f_total += 1
source = row.get("Source", "")
word = row.get("Word", "???")
ipa = row.get("IPA", "???")
if source is None:
source = ""
raw_source = source
source_stripped = source.strip()
# Track all source values
all_sources[raw_source] += 1
if source_stripped == "":
empty_source_count += 1
f_empty += 1
files_with_empty[fname].append((row_num, word, ipa))
elif source_stripped == "-":
dash_source_count += 1
f_dash += 1
files_with_dash[fname].append((row_num, word, ipa))
elif source_stripped != raw_source:
# Has leading/trailing whitespace but non-empty
whitespace_only_count += 1
f_whitespace += 1
if source_stripped in VALID_SOURCES:
valid_source_count += 1
f_valid += 1
else:
unknown_source_count += 1
f_unknown += 1
unknown_sources[raw_source].append((fname, row_num, word))
files_with_unknown[fname].append((row_num, word, raw_source))
elif source_stripped in VALID_SOURCES:
valid_source_count += 1
f_valid += 1
else:
unknown_source_count += 1
f_unknown += 1
unknown_sources[source_stripped].append((fname, row_num, word))
files_with_unknown[fname].append((row_num, word, source_stripped))
file_stats[fname] = {
"total": f_total, "valid": f_valid, "empty": f_empty,
"dash": f_dash, "unknown": f_unknown, "whitespace": f_whitespace
}
# ============================================================
# REPORT
# ============================================================
print("=" * 80)
print("EXHAUSTIVE SOURCE AUDIT REPORT")
print("=" * 80)
print(f"\nFiles scanned: {len(tsv_files)}")
print(f"Total entries (rows): {total_entries:,}")
print(f" Valid source: {valid_source_count:,} ({100*valid_source_count/total_entries:.2f}%)")
print(f" Empty/missing source: {empty_source_count:,} ({100*empty_source_count/total_entries:.4f}%)")
print(f" Dash '-' as source: {dash_source_count:,} ({100*dash_source_count/total_entries:.4f}%)")
print(f" Unknown source value: {unknown_source_count:,} ({100*unknown_source_count/total_entries:.4f}%)")
print(f" (Whitespace-padded): {whitespace_only_count:,}")
# Missing Source column
if files_missing_source_col:
print(f"\n{'='*80}")
print(f"FILES MISSING 'Source' COLUMN ENTIRELY: {len(files_missing_source_col)}")
print("="*80)
for f in files_missing_source_col:
print(f" - {f}")
# All unique source values
print(f"\n{'='*80}")
print(f"ALL UNIQUE SOURCE VALUES (across entire database):")
print("="*80)
for src, count in sorted(all_sources.items(), key=lambda x: -x[1]):
marker = ""
s = src.strip()
if s == "":
marker = " <-- EMPTY"
elif s == "-":
marker = " <-- DASH"
elif s not in VALID_SOURCES:
marker = " <-- UNKNOWN"
print(f" '{src}' : {count:,} entries{marker}")
# Unknown source values detail
if unknown_sources:
print(f"\n{'='*80}")
print(f"UNKNOWN SOURCE VALUES (not in valid list):")
print("="*80)
for src_val, occurrences in sorted(unknown_sources.items(), key=lambda x: -len(x[1])):
print(f"\n Source value: '{src_val}' ({len(occurrences)} occurrences)")
for fname, row_num, word in occurrences[:10]:
print(f" - {fname} row {row_num}: word='{word}'")
if len(occurrences) > 10:
print(f" ... and {len(occurrences)-10} more")
# Files with empty sources
if files_with_empty:
print(f"\n{'='*80}")
print(f"FILES WITH EMPTY SOURCE ENTRIES:")
print("="*80)
for fname, entries in sorted(files_with_empty.items()):
stats = file_stats[fname]
pct = 100 * len(entries) / stats["total"] if stats["total"] > 0 else 0
print(f"\n {fname}: {len(entries)} empty / {stats['total']} total ({pct:.1f}%)")
if pct > 1.0:
print(f" ** >1% empty — listing all entries:")
for row_num, word, ipa in entries:
print(f" Row {row_num}: Word='{word}', IPA='{ipa}'")
else:
for row_num, word, ipa in entries[:5]:
print(f" Row {row_num}: Word='{word}', IPA='{ipa}'")
if len(entries) > 5:
print(f" ... and {len(entries)-5} more")
# Files with dash sources
if files_with_dash:
print(f"\n{'='*80}")
print(f"FILES WITH DASH '-' AS SOURCE ({len(files_with_dash)} files):")
print("="*80)
for fname, entries in sorted(files_with_dash.items()):
stats = file_stats[fname]
pct = 100 * len(entries) / stats["total"] if stats["total"] > 0 else 0
print(f" {fname}: {len(entries)} dash / {stats['total']} total ({pct:.1f}%)")
if len(entries) <= 5:
for row_num, word, ipa in entries:
print(f" Row {row_num}: Word='{word}', IPA='{ipa}'")
# Files with unknown sources
if files_with_unknown:
print(f"\n{'='*80}")
print(f"FILES WITH UNKNOWN SOURCE VALUES ({len(files_with_unknown)} files):")
print("="*80)
for fname, entries in sorted(files_with_unknown.items()):
stats = file_stats[fname]
print(f"\n {fname}: {len(entries)} unknown / {stats['total']} total")
for row_num, word, src_val in entries[:10]:
print(f" Row {row_num}: Word='{word}', Source='{src_val}'")
if len(entries) > 10:
print(f" ... and {len(entries)-10} more")
# Summary of problematic files
problem_files = set()
problem_files.update(files_with_empty.keys())
problem_files.update(files_with_dash.keys())
problem_files.update(files_with_unknown.keys())
problem_files.update(files_missing_source_col)
print(f"\n{'='*80}")
print(f"SUMMARY")
print("="*80)
print(f"Total files: {len(tsv_files)}")
print(f"Files with problems: {len(problem_files)}")
print(f" - Missing Source column: {len(files_missing_source_col)}")
print(f" - With empty sources: {len(files_with_empty)}")
print(f" - With dash sources: {len(files_with_dash)}")
print(f" - With unknown sources: {len(files_with_unknown)}")
print(f"Clean files: {len(tsv_files) - len(problem_files)}")
if not problem_files:
print("\n*** ALL FILES PASS: Every row in every file has a valid source. ***")
else:
print(f"\n*** {len(problem_files)} FILES HAVE ISSUES — see details above. ***")