| import sys, os |
| sys.stdout.reconfigure(encoding='utf-8') |
| sys.stderr.reconfigure(encoding='utf-8') |
| from huggingface_hub import HfApi |
| from pathlib import Path |
|
|
| api = HfApi() |
| repo = "Nacryos/ancient-scripts-datasets" |
| local_base = Path("C:/Users/alvin/hf-ancient-scripts") |
|
|
| checks = [ |
| ("docs", 20), |
| ("scripts", 37), |
| ("cognate_pipeline", 91), |
| ("data/validation", 58), |
| ("data/training/validation", 53), |
| ("data/training/audit_trails", 27), |
| ("data/training/language_profiles", 1110), |
| ("data/training/raw", 14), |
| ("data/cited_sources", 5), |
| ] |
|
|
| all_pass = True |
| for path, expected in checks: |
| local_dir = local_base / path |
| |
| local_count = 0 |
| local_names = set() |
| for root, dirs, files in os.walk(local_dir): |
| dirs[:] = [d for d in dirs if d not in ('__pycache__', '.git')] |
| for f in files: |
| if not f.endswith('.pyc'): |
| local_count += 1 |
| rel = os.path.relpath(os.path.join(root, f), local_dir).replace("\\", "/") |
| local_names.add(rel) |
|
|
| |
| try: |
| hf_items = list(api.list_repo_tree(repo, path_in_repo=path, repo_type="dataset", recursive=True)) |
| hf_names = set() |
| for item in hf_items: |
| if hasattr(item, 'size'): |
| rel = item.path.replace(path + "/", "", 1) |
| hf_names.add(rel) |
| hf_count = len(hf_names) |
| except Exception as e: |
| hf_count = 0 |
| hf_names = set() |
| print(f"ERROR listing {path}: {e}") |
|
|
| missing = local_names - hf_names |
| extra = hf_names - local_names |
| status = "PASS" if local_count == hf_count and not missing else "FAIL" |
| if status == "FAIL": |
| all_pass = False |
|
|
| print(f"{path:40s} local={local_count:5d} hf={hf_count:5d} expected={expected:5d} {status}") |
| if missing and len(missing) <= 10: |
| for f in sorted(missing): |
| print(f" MISSING on HF: {f}") |
| elif missing: |
| print(f" MISSING on HF: {len(missing)} files (too many to list)") |
| if extra and len(extra) <= 10: |
| for f in sorted(extra): |
| print(f" EXTRA on HF: {f}") |
| elif extra: |
| print(f" EXTRA on HF: {len(extra)} files") |
|
|
| |
| print(f"\n{'='*60}") |
| print("METADATA PROVENANCE CHECK:") |
| from huggingface_hub import hf_hub_download |
| lang_path = hf_hub_download(repo_id=repo, filename="data/training/metadata/languages.tsv", repo_type="dataset") |
| with open(lang_path, "r", encoding="utf-8") as f: |
| lines = f.readlines() |
| header = lines[0].strip().split("\t") |
| print(f" languages.tsv columns: {header}") |
| print(f" languages.tsv entries: {len(lines)-1}") |
| |
| if "Sources" in header: |
| src_idx = header.index("Sources") |
| sources_present = sum(1 for line in lines[1:] if line.strip().split("\t")[src_idx].strip()) |
| print(f" Entries with Sources: {sources_present}/{len(lines)-1}") |
| if sources_present > 0: |
| print(f" STATUS: PASS") |
| else: |
| print(f" STATUS: FAIL - no source data") |
| else: |
| print(f" STATUS: FAIL - no Sources column") |
|
|
| |
| print(f"\n{'='*60}") |
| print("LEXICON PROVENANCE CHECK (Source column in TSVs):") |
| for iso in ["hit", "ave", "ine-pro", "xlc", "txb"]: |
| lex_path = hf_hub_download(repo_id=repo, filename=f"data/training/lexicons/{iso}.tsv", repo_type="dataset") |
| with open(lex_path, "r", encoding="utf-8") as f: |
| lex_lines = f.readlines() |
| lex_header = lex_lines[0].strip().split("\t") |
| if "Source" in lex_header: |
| src_idx = lex_header.index("Source") |
| sources = set() |
| for line in lex_lines[1:]: |
| parts = line.strip().split("\t") |
| if len(parts) > src_idx: |
| sources.add(parts[src_idx]) |
| print(f" {iso:10s} entries={len(lex_lines)-1:5d} sources={sources}") |
| else: |
| print(f" {iso:10s} FAIL - no Source column") |
|
|
| print(f"\n{'='*60}") |
| if all_pass: |
| print("OVERALL: PASS") |
| else: |
| print("OVERALL: FAIL") |
|
|