| """ |
| Exhaustive audit of Source values across ALL 1,135 lexicon TSV files. |
| Checks EVERY ROW in EVERY FILE. |
| """ |
| import csv |
| import os |
| import sys |
| from collections import defaultdict, Counter |
|
|
| LEXICON_DIR = r"C:\Users\alvin\hf-ancient-scripts\data\training\lexicons" |
|
|
| VALID_SOURCES = { |
| "abvd", "wikipron", "northeuralex", "wold", "wiktionary", "wiktionary_cat", |
| "sinotibetan", "palaeolexicon", "ediana", "oracc_ecut", "tir_raetica", |
| "kaikki", "kassian2010", "kassian2010_basic", "seed", "wikipedia", "avesta_org" |
| } |
|
|
| |
| total_entries = 0 |
| valid_source_count = 0 |
| empty_source_count = 0 |
| dash_source_count = 0 |
| unknown_source_count = 0 |
| whitespace_only_count = 0 |
|
|
| |
| all_sources = Counter() |
|
|
| |
| unknown_sources = defaultdict(list) |
|
|
| |
| files_with_empty = defaultdict(list) |
| files_with_dash = defaultdict(list) |
| files_with_unknown = defaultdict(list) |
|
|
| |
| file_stats = {} |
|
|
| |
| files_missing_source_col = [] |
|
|
| tsv_files = sorted([f for f in os.listdir(LEXICON_DIR) if f.endswith(".tsv")]) |
| print(f"Found {len(tsv_files)} TSV files to audit.\n") |
|
|
| for fname in tsv_files: |
| fpath = os.path.join(LEXICON_DIR, fname) |
| f_total = 0 |
| f_valid = 0 |
| f_empty = 0 |
| f_dash = 0 |
| f_unknown = 0 |
| f_whitespace = 0 |
|
|
| with open(fpath, "r", encoding="utf-8") as f: |
| reader = csv.DictReader(f, delimiter="\t") |
|
|
| |
| if reader.fieldnames is None or "Source" not in reader.fieldnames: |
| files_missing_source_col.append(fname) |
| |
| for row in reader: |
| total_entries += 1 |
| f_total += 1 |
| empty_source_count += 1 |
| f_empty += 1 |
| file_stats[fname] = {"total": f_total, "valid": 0, "empty": f_total, "dash": 0, "unknown": 0, "whitespace": 0} |
| continue |
|
|
| for row_num, row in enumerate(reader, start=2): |
| total_entries += 1 |
| f_total += 1 |
|
|
| source = row.get("Source", "") |
| word = row.get("Word", "???") |
| ipa = row.get("IPA", "???") |
|
|
| if source is None: |
| source = "" |
|
|
| raw_source = source |
| source_stripped = source.strip() |
|
|
| |
| all_sources[raw_source] += 1 |
|
|
| if source_stripped == "": |
| empty_source_count += 1 |
| f_empty += 1 |
| files_with_empty[fname].append((row_num, word, ipa)) |
| elif source_stripped == "-": |
| dash_source_count += 1 |
| f_dash += 1 |
| files_with_dash[fname].append((row_num, word, ipa)) |
| elif source_stripped != raw_source: |
| |
| whitespace_only_count += 1 |
| f_whitespace += 1 |
| if source_stripped in VALID_SOURCES: |
| valid_source_count += 1 |
| f_valid += 1 |
| else: |
| unknown_source_count += 1 |
| f_unknown += 1 |
| unknown_sources[raw_source].append((fname, row_num, word)) |
| files_with_unknown[fname].append((row_num, word, raw_source)) |
| elif source_stripped in VALID_SOURCES: |
| valid_source_count += 1 |
| f_valid += 1 |
| else: |
| unknown_source_count += 1 |
| f_unknown += 1 |
| unknown_sources[source_stripped].append((fname, row_num, word)) |
| files_with_unknown[fname].append((row_num, word, source_stripped)) |
|
|
| file_stats[fname] = { |
| "total": f_total, "valid": f_valid, "empty": f_empty, |
| "dash": f_dash, "unknown": f_unknown, "whitespace": f_whitespace |
| } |
|
|
| |
| |
| |
| print("=" * 80) |
| print("EXHAUSTIVE SOURCE AUDIT REPORT") |
| print("=" * 80) |
|
|
| print(f"\nFiles scanned: {len(tsv_files)}") |
| print(f"Total entries (rows): {total_entries:,}") |
| print(f" Valid source: {valid_source_count:,} ({100*valid_source_count/total_entries:.2f}%)") |
| print(f" Empty/missing source: {empty_source_count:,} ({100*empty_source_count/total_entries:.4f}%)") |
| print(f" Dash '-' as source: {dash_source_count:,} ({100*dash_source_count/total_entries:.4f}%)") |
| print(f" Unknown source value: {unknown_source_count:,} ({100*unknown_source_count/total_entries:.4f}%)") |
| print(f" (Whitespace-padded): {whitespace_only_count:,}") |
|
|
| |
| if files_missing_source_col: |
| print(f"\n{'='*80}") |
| print(f"FILES MISSING 'Source' COLUMN ENTIRELY: {len(files_missing_source_col)}") |
| print("="*80) |
| for f in files_missing_source_col: |
| print(f" - {f}") |
|
|
| |
| print(f"\n{'='*80}") |
| print(f"ALL UNIQUE SOURCE VALUES (across entire database):") |
| print("="*80) |
| for src, count in sorted(all_sources.items(), key=lambda x: -x[1]): |
| marker = "" |
| s = src.strip() |
| if s == "": |
| marker = " <-- EMPTY" |
| elif s == "-": |
| marker = " <-- DASH" |
| elif s not in VALID_SOURCES: |
| marker = " <-- UNKNOWN" |
| print(f" '{src}' : {count:,} entries{marker}") |
|
|
| |
| if unknown_sources: |
| print(f"\n{'='*80}") |
| print(f"UNKNOWN SOURCE VALUES (not in valid list):") |
| print("="*80) |
| for src_val, occurrences in sorted(unknown_sources.items(), key=lambda x: -len(x[1])): |
| print(f"\n Source value: '{src_val}' ({len(occurrences)} occurrences)") |
| for fname, row_num, word in occurrences[:10]: |
| print(f" - {fname} row {row_num}: word='{word}'") |
| if len(occurrences) > 10: |
| print(f" ... and {len(occurrences)-10} more") |
|
|
| |
| if files_with_empty: |
| print(f"\n{'='*80}") |
| print(f"FILES WITH EMPTY SOURCE ENTRIES:") |
| print("="*80) |
| for fname, entries in sorted(files_with_empty.items()): |
| stats = file_stats[fname] |
| pct = 100 * len(entries) / stats["total"] if stats["total"] > 0 else 0 |
| print(f"\n {fname}: {len(entries)} empty / {stats['total']} total ({pct:.1f}%)") |
| if pct > 1.0: |
| print(f" ** >1% empty — listing all entries:") |
| for row_num, word, ipa in entries: |
| print(f" Row {row_num}: Word='{word}', IPA='{ipa}'") |
| else: |
| for row_num, word, ipa in entries[:5]: |
| print(f" Row {row_num}: Word='{word}', IPA='{ipa}'") |
| if len(entries) > 5: |
| print(f" ... and {len(entries)-5} more") |
|
|
| |
| if files_with_dash: |
| print(f"\n{'='*80}") |
| print(f"FILES WITH DASH '-' AS SOURCE ({len(files_with_dash)} files):") |
| print("="*80) |
| for fname, entries in sorted(files_with_dash.items()): |
| stats = file_stats[fname] |
| pct = 100 * len(entries) / stats["total"] if stats["total"] > 0 else 0 |
| print(f" {fname}: {len(entries)} dash / {stats['total']} total ({pct:.1f}%)") |
| if len(entries) <= 5: |
| for row_num, word, ipa in entries: |
| print(f" Row {row_num}: Word='{word}', IPA='{ipa}'") |
|
|
| |
| if files_with_unknown: |
| print(f"\n{'='*80}") |
| print(f"FILES WITH UNKNOWN SOURCE VALUES ({len(files_with_unknown)} files):") |
| print("="*80) |
| for fname, entries in sorted(files_with_unknown.items()): |
| stats = file_stats[fname] |
| print(f"\n {fname}: {len(entries)} unknown / {stats['total']} total") |
| for row_num, word, src_val in entries[:10]: |
| print(f" Row {row_num}: Word='{word}', Source='{src_val}'") |
| if len(entries) > 10: |
| print(f" ... and {len(entries)-10} more") |
|
|
| |
| problem_files = set() |
| problem_files.update(files_with_empty.keys()) |
| problem_files.update(files_with_dash.keys()) |
| problem_files.update(files_with_unknown.keys()) |
| problem_files.update(files_missing_source_col) |
|
|
| print(f"\n{'='*80}") |
| print(f"SUMMARY") |
| print("="*80) |
| print(f"Total files: {len(tsv_files)}") |
| print(f"Files with problems: {len(problem_files)}") |
| print(f" - Missing Source column: {len(files_missing_source_col)}") |
| print(f" - With empty sources: {len(files_with_empty)}") |
| print(f" - With dash sources: {len(files_with_dash)}") |
| print(f" - With unknown sources: {len(files_with_unknown)}") |
| print(f"Clean files: {len(tsv_files) - len(problem_files)}") |
|
|
| if not problem_files: |
| print("\n*** ALL FILES PASS: Every row in every file has a valid source. ***") |
| else: |
| print(f"\n*** {len(problem_files)} FILES HAVE ISSUES — see details above. ***") |
|
|