isv-corpus / scripts /validate_corpus.py
interslavic-oss's picture
Initial upload: Interslavic MT corpus with monolingual data and documentation
8a49bd9 verified
#!/usr/bin/env python3
"""
Validate Interslavic MT corpus format.
- Monolingual: id, sentence, source; no duplicate ids.
- Parallel: id, isv, target, translator, method, review_status; ids exist in monolingual; isv matches.
Exits 0 on success, 1 on validation failure.
"""
from pathlib import Path
import os
import sys
try:
import pandas as pd
except ImportError:
print("Requires pandas and pyarrow. Install with: pip install -r requirements.txt", file=sys.stderr)
sys.exit(2)
REPO_ROOT = Path(__file__).resolve().parent.parent
MONOLINGUAL_PATH = REPO_ROOT / "monolingual" / "isv_sentences.parquet"
PARALLEL_DIR = REPO_ROOT / "parallel"
MONO_COLUMNS = {"id", "sentence", "source"}
PARALLEL_COLUMNS = {"id", "isv", "target", "translator", "method", "review_status"}
METHOD_VALUES = {"human", "machine_raw", "machine_postedited"}
def main() -> None:
errors: list[str] = []
ci_mode = os.environ.get("GITHUB_ACTIONS", "").lower() in ("true", "1")
# --- Monolingual (single read) ---
df_mono = None
mono_ids: set[str] = set()
mono_id_to_sentence: dict[str, str] = {}
if not MONOLINGUAL_PATH.exists():
errors.append(f"Missing monolingual file: {MONOLINGUAL_PATH}")
else:
df_mono = pd.read_parquet(MONOLINGUAL_PATH)
cols = set(df_mono.columns)
if cols != MONO_COLUMNS:
missing = MONO_COLUMNS - cols
extra = cols - MONO_COLUMNS
if missing:
errors.append(f"monolingual: missing columns: {missing}")
if extra:
errors.append(f"monolingual: unexpected columns: {extra}")
if df_mono["id"].duplicated().any():
dupes = df_mono[df_mono["id"].duplicated(keep=False)]["id"].unique().tolist()
errors.append(f"monolingual: duplicate ids: {dupes}")
if df_mono["id"].isna().any() or df_mono["sentence"].isna().any() or df_mono["source"].isna().any():
errors.append("monolingual: null values in id, sentence, or source")
mono_ids = set(df_mono["id"].astype(str))
mono_id_to_sentence = dict(zip(df_mono["id"].astype(str), df_mono["sentence"].astype(str)))
# --- Parallel ---
for path in sorted(PARALLEL_DIR.glob("*.parquet")):
name = path.name
df = pd.read_parquet(path)
cols = set(df.columns)
if cols != PARALLEL_COLUMNS:
missing = PARALLEL_COLUMNS - cols
extra = cols - PARALLEL_COLUMNS
if missing:
errors.append(f"{name}: missing columns: {missing}")
if extra:
errors.append(f"{name}: unexpected columns: {extra}")
if "id" in df.columns and "isv" in df.columns and mono_id_to_sentence:
missing_ids = set(df["id"].astype(str)) - mono_ids
if missing_ids:
errors.append(f"{name}: ids not in monolingual: {list(missing_ids)[:10]}{'...' if len(missing_ids) > 10 else ''}")
# Vectorized isv match check (only for ids that exist in monolingual)
df_ids = df["id"].astype(str)
df_isv = df["isv"].astype(str).str.strip()
expected = df_ids.map(mono_id_to_sentence.get)
in_mono = df_ids.isin(mono_ids)
mismatch = in_mono & (df_isv != expected.str.strip())
if mismatch.any():
bad_ids = df.loc[mismatch, "id"].astype(str).tolist()
if ci_mode:
errors.append(f"{name}: id {bad_ids[0]} has isv text that does not match monolingual sentence")
else:
for rid in bad_ids:
errors.append(f"{name}: id {rid} has isv text that does not match monolingual sentence")
if "method" in df.columns and not set(df["method"].dropna().unique()).issubset(METHOD_VALUES):
bad = set(df["method"].dropna().unique()) - METHOD_VALUES
errors.append(f"{name}: invalid method values: {bad}")
if errors:
for e in errors:
print(e, file=sys.stderr)
sys.exit(1)
print("Validation passed.")
if __name__ == "__main__":
main()