Datasets:

Languages:
English
ArXiv:
License:
DefExtra / scripts /compare_defextra_csvs.py
bitwise31337's picture
Upload folder using huggingface_hub
7527970 verified
from __future__ import annotations
# ruff: noqa: E402
import argparse
import csv
import re
import sys
from pathlib import Path
from typing import Dict, Tuple
try:
from scripts.hydrate_defextra import (
_ensure_trailing_punct,
_postprocess_text,
)
except ModuleNotFoundError as exc:
if exc.name != "scripts":
raise
PROJECT_ROOT = Path(__file__).resolve().parent.parent
if str(PROJECT_ROOT) not in sys.path:
sys.path.insert(0, str(PROJECT_ROOT))
from scripts.hydrate_defextra import (
_ensure_trailing_punct,
_postprocess_text,
)
def _normalize_text(text: str) -> str:
if text is None:
return ""
value = text.replace("\u00ad", "")
value = re.sub(r"([A-Za-z])-\s+([A-Za-z])", r"\1\2", value)
value = re.sub(r"\s+", " ", value).strip()
return value
def _normalize_punct(text: str) -> str:
if text is None:
return ""
value = text.replace("\u00ad", "")
value = re.sub(r"[^\w\s]", "", value)
value = re.sub(r"\\s+", " ", value).strip()
return value
def _load(path: Path) -> Dict[Tuple[str, str], Dict[str, str]]:
with path.open(encoding="utf-8", newline="") as handle:
reader = csv.DictReader(handle)
return {(row["paper_id"], row["concept"]): row for row in reader}
def _row_flag(
row: Dict[str, str],
key: str,
default: bool = False,
) -> bool:
value = (row.get(key) or "").strip().lower()
if not value:
return default
return value == "true"
def main() -> None:
parser = argparse.ArgumentParser(
description="Compare DefExtra hydrated CSV against reference.",
)
parser.add_argument(
"--ref",
type=Path,
default=Path("results/paper_results/defextra_hf_tablefix.csv"),
help="Reference CSV path.",
)
parser.add_argument(
"--hyd",
type=Path,
default=Path(
"results/paper_results/defextra_hydrated_tablefix_test.csv",
),
help="Hydrated CSV path.",
)
parser.add_argument(
"--limit",
type=int,
default=5,
help="Number of mismatches to print.",
)
parser.add_argument(
"--report",
type=Path,
default=None,
help="Optional path to write a detailed mismatch report.",
)
parser.add_argument(
"--report-limit",
type=int,
default=0,
help="Limit mismatches written to report (0 = all).",
)
parser.add_argument(
"--legal",
type=Path,
default=Path("results/paper_results/defextra_legal_tablefix.csv"),
help="Legal CSV with token counts and linebreak flags.",
)
parser.add_argument(
"--postprocess",
action="store_true",
help="Apply hydration postprocessing to hydrated text before compare.",
)
args = parser.parse_args()
ref = _load(args.ref)
hyd = _load(args.hyd)
legal = _load(args.legal) if args.postprocess else {}
missing = [k for k in ref if k not in hyd]
extra = [k for k in hyd if k not in ref]
def_mismatch = []
ctx_mismatch = []
def_mismatch_norm = []
ctx_mismatch_norm = []
def_mismatch_punct = []
ctx_mismatch_punct = []
for key, ref_row in ref.items():
hyd_row = hyd.get(key)
if not hyd_row:
continue
if args.postprocess:
legal_row = legal.get(key, {})
def_expected = int(legal_row.get("definition_token_count") or 0)
ctx_expected = int(legal_row.get("context_token_count") or 0)
def_preserve = _row_flag(
legal_row,
"definition_preserve_linebreaks",
)
ctx_preserve = _row_flag(
legal_row,
"context_preserve_linebreaks",
)
def_preserve_hyphen = _row_flag(
legal_row,
"definition_preserve_hyphenation",
)
ctx_preserve_hyphen = _row_flag(
legal_row,
"context_preserve_hyphenation",
)
def_keep_bracket = _row_flag(
legal_row,
"definition_has_bracket_citation",
True,
)
def_keep_paren = _row_flag(
legal_row,
"definition_has_paren_citation",
True,
)
def_split_letter_digit = not _row_flag(
legal_row,
"definition_has_letter_digit",
)
ctx_keep_bracket = _row_flag(
legal_row,
"context_has_bracket_citation",
True,
)
ctx_keep_paren = _row_flag(
legal_row,
"context_has_paren_citation",
True,
)
ctx_split_letter_digit = not _row_flag(
legal_row,
"context_has_letter_digit",
)
hyd_row = dict(hyd_row)
hyd_row["definition"] = _ensure_trailing_punct(
_postprocess_text(
hyd_row.get("definition", ""),
def_expected,
def_preserve,
def_preserve_hyphen,
def_keep_bracket,
def_keep_paren,
def_split_letter_digit,
),
legal_row.get("definition_end_punct", ""),
)
hyd_row["context"] = _ensure_trailing_punct(
_postprocess_text(
hyd_row.get("context", ""),
ctx_expected,
ctx_preserve,
ctx_preserve_hyphen,
ctx_keep_bracket,
ctx_keep_paren,
ctx_split_letter_digit,
),
legal_row.get("context_end_punct", ""),
)
if ref_row.get("definition", "") != hyd_row.get("definition", ""):
def_mismatch.append(key)
if _normalize_text(
ref_row.get("definition", ""),
) != _normalize_text(
hyd_row.get("definition", ""),
):
def_mismatch_norm.append(key)
if _normalize_punct(ref_row.get("definition", "")) == _normalize_punct(
hyd_row.get("definition", ""),
):
def_mismatch_punct.append(key)
if ref_row.get("context", "") != hyd_row.get("context", ""):
ctx_mismatch.append(key)
if _normalize_text(ref_row.get("context", "")) != _normalize_text(
hyd_row.get("context", ""),
):
ctx_mismatch_norm.append(key)
if _normalize_punct(ref_row.get("context", "")) == _normalize_punct(
hyd_row.get("context", ""),
):
ctx_mismatch_punct.append(key)
total_ref = len(ref)
total_hyd = len(hyd)
print(f"Reference rows: {total_ref}")
print(f"Hydrated rows: {total_hyd}")
print(f"Missing keys: {len(missing)}")
print(f"Extra keys: {len(extra)}")
print(f"Definition mismatches (exact): {len(def_mismatch)}")
print(f"Definition mismatches (normalized): {len(def_mismatch_norm)}")
print(f"Context mismatches (exact): {len(ctx_mismatch)}")
print(f"Context mismatches (normalized): {len(ctx_mismatch_norm)}")
if def_mismatch_punct:
print(
"Definition mismatches (punctuation-only): "
f"{len(def_mismatch_punct)}",
)
if ctx_mismatch_punct:
print(
"Context mismatches (punctuation-only): "
f"{len(ctx_mismatch_punct)}",
)
if args.limit <= 0:
return
shown = 0
for key in def_mismatch:
if shown >= args.limit:
break
ref_row = ref[key]
hyd_row = hyd[key]
print("\nDefinition mismatch:", key)
print("ref:", ref_row.get("definition", ""))
print("hyd:", hyd_row.get("definition", ""))
shown += 1
shown = 0
for key in ctx_mismatch:
if shown >= args.limit:
break
ref_row = ref[key]
hyd_row = hyd[key]
print("\nContext mismatch:", key)
print("ref:", ref_row.get("context", ""))
print("hyd:", hyd_row.get("context", ""))
shown += 1
if args.report is not None:
report_lines = []
report_lines.append(f"Missing keys: {len(missing)}")
report_lines.extend([f"- {k}" for k in missing])
report_lines.append("")
report_lines.append(
f"Definition mismatches (exact): {len(def_mismatch)}"
)
report_lines.append(
f"Definition mismatches (normalized): {len(def_mismatch_norm)}"
)
report_lines.append(
f"Definition mismatches (punctuation-only): {len(def_mismatch_punct)}"
)
report_lines.append(
f"Context mismatches (exact): {len(ctx_mismatch)}"
)
report_lines.append(
f"Context mismatches (normalized): {len(ctx_mismatch_norm)}"
)
report_lines.append(
f"Context mismatches (punctuation-only): {len(ctx_mismatch_punct)}"
)
report_lines.append("")
def_limit = args.report_limit or len(def_mismatch)
ctx_limit = args.report_limit or len(ctx_mismatch)
report_lines.append("Definition mismatches:")
for key in def_mismatch[:def_limit]:
ref_row = ref[key]
hyd_row = hyd[key]
report_lines.append(f"- {key[0]} | {key[1]}")
report_lines.append(f" ref: {ref_row.get('definition','')}")
report_lines.append(f" hyd: {hyd_row.get('definition','')}")
report_lines.append("")
report_lines.append("Context mismatches:")
for key in ctx_mismatch[:ctx_limit]:
ref_row = ref[key]
hyd_row = hyd[key]
report_lines.append(f"- {key[0]} | {key[1]}")
report_lines.append(f" ref: {ref_row.get('context','')}")
report_lines.append(f" hyd: {hyd_row.get('context','')}")
args.report.parent.mkdir(parents=True, exist_ok=True)
args.report.write_text(
"\n".join(report_lines) + "\n",
encoding="utf-8",
)
print(f"Wrote report to {args.report}")
if __name__ == "__main__":
main()