Datasets:

Languages:
English
ArXiv:
License:
DefExtra / scripts /prepare_defextra_legal.py
bitwise31337's picture
Upload folder using huggingface_hub
5cbe47d verified
from __future__ import annotations
# ruff: noqa: E402
import argparse
import csv
import re
import sys
from pathlib import Path
from typing import Dict, Optional
try:
from scripts.defextra_markers import (
ANCHOR_WINDOW,
ANCHOR_WINDOW_ALT,
DocIndex,
HASH_VERSION,
TokenIndex,
build_tei_index,
hash_token_sequence,
tokenize_text,
normalize_paper_id,
)
except ModuleNotFoundError as exc:
if exc.name != "scripts":
raise
PROJECT_ROOT = Path(__file__).resolve().parent.parent
if str(PROJECT_ROOT) not in sys.path:
sys.path.insert(0, str(PROJECT_ROOT))
from scripts.defextra_markers import (
ANCHOR_WINDOW,
ANCHOR_WINDOW_ALT,
DocIndex,
HASH_VERSION,
TokenIndex,
build_tei_index,
hash_token_sequence,
tokenize_text,
normalize_paper_id,
)
TRAILING_PUNCT = {".", ",", ";", ":", "?", "!"}
TRAILING_QUOTES = {"'", '"', "”", "’", ")", "]"}
CITATION_BRACKET_RE = re.compile(r"\[[0-9][0-9,;\s\-–]*\]")
CITATION_PAREN_RE = re.compile(r"\([^)]*\d{4}[^)]*\)")
def _extract_end_punct(text: str) -> str:
trimmed = text.rstrip()
if not trimmed:
return ""
i = len(trimmed) - 1
while i >= 0 and trimmed[i] in TRAILING_QUOTES:
i -= 1
if i >= 0 and trimmed[i] in TRAILING_PUNCT:
return trimmed[i]
return ""
def _spec(
count: str,
hash64: str,
sha: str,
) -> Optional[tuple[int, int, str]]:
if not count or not hash64 or not sha:
return None
try:
return int(count), int(hash64), sha
except ValueError:
return None
def _build_mid_candidates(
token_index: TokenIndex,
mid_specs: list[tuple[int, int, str]],
) -> list[tuple[int, int]]:
if not mid_specs:
return []
candidates: list[tuple[int, int]] = []
for spec in mid_specs:
for position in token_index.find_token_positions_by_hash(*spec):
candidates.append((position, spec[0]))
return candidates
def _span_has_mid(
mid_candidates: list[tuple[int, int]],
start_idx: int,
end_idx: int,
) -> bool:
for mid_start, mid_len in mid_candidates:
mid_end = mid_start + mid_len - 1
if mid_start >= start_idx and mid_end <= end_idx:
return True
return False
def _find_span_by_anchors(
token_index: TokenIndex,
head_spec: Optional[tuple[int, int, str]],
tail_spec: Optional[tuple[int, int, str]],
expected_len: int,
mid_specs: Optional[list[tuple[int, int, str]]] = None,
) -> Optional[tuple[int, int]]:
if head_spec is None or tail_spec is None or expected_len <= 0:
return None
head_positions = token_index.find_token_positions_by_hash(*head_spec)
tail_positions = token_index.find_token_positions_by_hash(*tail_spec)
if not head_positions or not tail_positions:
return None
mid_candidates = _build_mid_candidates(token_index, mid_specs or [])
best = None
best_diff = None
tol = max(5, int(expected_len * 0.3))
min_len = max(1, expected_len // 2)
max_len = expected_len * 3
for head_start in head_positions:
head_end = head_start + head_spec[0] - 1
for tail_start in tail_positions:
tail_end = tail_start + tail_spec[0] - 1
if tail_end < head_end:
continue
if mid_candidates and not _span_has_mid(
mid_candidates,
head_start,
tail_end,
):
continue
length = tail_end - head_start + 1
if length < min_len or length > max_len:
continue
if length < expected_len - tol or length > expected_len + tol:
continue
diff = abs(length - expected_len)
if best_diff is None or diff < best_diff:
best_diff = diff
best = (head_start, tail_end)
if best is None:
return None
start_char = token_index.spans[best[0]][0]
end_char = token_index.spans[best[1]][1]
return start_char, end_char
def _find_span_from_anchor(
token_index: TokenIndex,
anchor_spec: Optional[tuple[int, int, str]],
expected_len: int,
position: str,
mid_specs: Optional[list[tuple[int, int, str]]] = None,
) -> Optional[tuple[int, int]]:
if anchor_spec is None or expected_len <= 0:
return None
positions = token_index.find_token_positions_by_hash(*anchor_spec)
if not positions:
return None
mid_candidates = _build_mid_candidates(token_index, mid_specs or [])
if position == "tail":
positions = list(reversed(positions))
for anchor_start in positions:
if position == "head":
start_idx = anchor_start
end_idx = anchor_start + expected_len - 1
else:
anchor_end = anchor_start + anchor_spec[0] - 1
end_idx = anchor_end
start_idx = end_idx - expected_len + 1
if start_idx < 0 or end_idx >= len(token_index.tokens):
continue
if mid_candidates and not _span_has_mid(
mid_candidates,
start_idx,
end_idx,
):
continue
start_char = token_index.spans[start_idx][0]
end_char = token_index.spans[end_idx][1]
return start_char, end_char
return None
def _candidate_ids(paper_id: str, doi: str, arxiv: str) -> list[str]:
candidates = [paper_id, normalize_paper_id(paper_id)]
if doi:
candidates.append(doi)
if arxiv:
candidates.append(arxiv)
seen = set()
ordered = []
for item in candidates:
value = item.strip()
if value and value not in seen:
seen.add(value)
ordered.append(value)
return ordered
def _resolve_tei_path(
paper_id: str,
doi: str,
arxiv: str,
tei_index: Dict[str, Path],
) -> Optional[Path]:
for candidate in _candidate_ids(paper_id, doi, arxiv):
if candidate in tei_index:
return tei_index[candidate]
if candidate.startswith("paper_"):
stripped = candidate[len("paper_") :]
if stripped in tei_index:
return tei_index[stripped]
return None
def main() -> None:
parser = argparse.ArgumentParser(
description="Build legal DefExtra CSV with GROBID markers.",
)
parser.add_argument(
"--input-csv",
type=Path,
default=Path("results/paper_results/defextra_hf.csv"),
help="Source DefExtra CSV (contains excerpts).",
)
parser.add_argument(
"--output-csv",
type=Path,
default=Path("results/paper_results/defextra_legal.csv"),
help="Output legal DefExtra CSV (no excerpts).",
)
parser.add_argument(
"--tei-dir",
type=Path,
nargs="+",
default=[
Path("ManualPDFsGROBID/manual_pdfs_grobid"),
Path("ManualPDFsGROBID/new_grobid"),
],
help="Directories with GROBID TEI files.",
)
parser.add_argument(
"--report",
type=Path,
default=None,
help="Optional report path for missing TEI spans.",
)
args = parser.parse_args()
if not args.input_csv.exists():
raise SystemExit(f"Input CSV not found: {args.input_csv}")
tei_index = build_tei_index(args.tei_dir)
doc_cache: Dict[str, Optional[DocIndex]] = {}
token_cache: Dict[str, Optional[TokenIndex]] = {}
rows = []
with args.input_csv.open("r", encoding="utf-8", newline="") as handle:
reader = csv.DictReader(handle)
for row in reader:
rows.append(row)
output_rows = []
missing_tei = 0
missing_def = 0
missing_ctx = 0
missing_def_rows: list[dict] = []
missing_ctx_rows: list[dict] = []
def_hash_available = 0
ctx_hash_available = 0
def_anchor_available = 0
ctx_anchor_available = 0
for row in rows:
paper_id = (row.get("paper_id") or "").strip()
doi = (row.get("paper_doi") or "").strip()
arxiv = (row.get("paper_arxiv") or "").strip()
definition = row.get("definition") or ""
context = row.get("context") or ""
if paper_id not in doc_cache:
tei_path = _resolve_tei_path(paper_id, doi, arxiv, tei_index)
if tei_path is None:
doc_cache[paper_id] = None
token_cache[paper_id] = None
else:
doc_index = DocIndex.from_tei(tei_path)
doc_cache[paper_id] = doc_index
token_cache[paper_id] = TokenIndex.from_text(
doc_index.doc_text,
)
doc_index = doc_cache.get(paper_id)
def_start = def_end = ctx_start = ctx_end = ""
def_match = ctx_match = "missing"
def_hash64 = def_sha = def_tok_len = ""
ctx_hash64 = ctx_sha = ctx_tok_len = ""
def_head_hash64 = def_head_sha = def_head_len = ""
def_tail_hash64 = def_tail_sha = def_tail_len = ""
def_mid_hash64 = def_mid_sha = def_mid_len = ""
def_head_alt_hash64 = def_head_alt_sha = def_head_alt_len = ""
def_tail_alt_hash64 = def_tail_alt_sha = def_tail_alt_len = ""
def_mid_alt_hash64 = def_mid_alt_sha = def_mid_alt_len = ""
ctx_head_hash64 = ctx_head_sha = ctx_head_len = ""
ctx_tail_hash64 = ctx_tail_sha = ctx_tail_len = ""
ctx_mid_hash64 = ctx_mid_sha = ctx_mid_len = ""
ctx_head_alt_hash64 = ctx_head_alt_sha = ctx_head_alt_len = ""
ctx_tail_alt_hash64 = ctx_tail_alt_sha = ctx_tail_alt_len = ""
ctx_mid_alt_hash64 = ctx_mid_alt_sha = ctx_mid_alt_len = ""
def_anchor_has = False
ctx_anchor_has = False
def_preserve_linebreaks = "true" if "\n" in definition else "false"
ctx_preserve_linebreaks = "true" if "\n" in context else "false"
def_preserve_hyphenation = (
"true"
if re.search(r"[A-Za-z]-\s+[A-Za-z]", definition)
else "false"
)
ctx_preserve_hyphenation = (
"true" if re.search(r"[A-Za-z]-\s+[A-Za-z]", context) else "false"
)
def_has_bracket_citation = (
"true" if CITATION_BRACKET_RE.search(definition) else "false"
)
def_has_paren_citation = (
"true" if CITATION_PAREN_RE.search(definition) else "false"
)
def_has_letter_digit = (
"true" if re.search(r"[A-Za-z][0-9]", definition) else "false"
)
ctx_has_bracket_citation = (
"true" if CITATION_BRACKET_RE.search(context) else "false"
)
ctx_has_paren_citation = (
"true" if CITATION_PAREN_RE.search(context) else "false"
)
ctx_has_letter_digit = (
"true" if re.search(r"[A-Za-z][0-9]", context) else "false"
)
def_end_punct = _extract_end_punct(definition)
ctx_end_punct = _extract_end_punct(context)
def_tokens, _ = tokenize_text(definition, return_spans=False)
if def_tokens:
h64, sha, tok_len = hash_token_sequence(def_tokens)
def_hash64 = str(h64)
def_sha = sha
def_tok_len = str(tok_len)
def_hash_available += 1
if tok_len >= ANCHOR_WINDOW:
h64, sha, tok_len = hash_token_sequence(
def_tokens[:ANCHOR_WINDOW],
)
def_head_hash64 = str(h64)
def_head_sha = sha
def_head_len = str(tok_len)
h64, sha, tok_len = hash_token_sequence(
def_tokens[-ANCHOR_WINDOW:],
)
def_tail_hash64 = str(h64)
def_tail_sha = sha
def_tail_len = str(tok_len)
mid_start = max(0, (len(def_tokens) - ANCHOR_WINDOW) // 2)
h64, sha, tok_len = hash_token_sequence(
def_tokens[mid_start : mid_start + ANCHOR_WINDOW],
)
def_mid_hash64 = str(h64)
def_mid_sha = sha
def_mid_len = str(tok_len)
def_anchor_has = True
if tok_len >= 2:
alt_window = (
ANCHOR_WINDOW_ALT
if tok_len >= ANCHOR_WINDOW_ALT
else max(2, tok_len - 1)
)
h64, sha, tok_len = hash_token_sequence(
def_tokens[:alt_window],
)
def_head_alt_hash64 = str(h64)
def_head_alt_sha = sha
def_head_alt_len = str(tok_len)
h64, sha, tok_len = hash_token_sequence(
def_tokens[-alt_window:],
)
def_tail_alt_hash64 = str(h64)
def_tail_alt_sha = sha
def_tail_alt_len = str(tok_len)
mid_start = max(0, (len(def_tokens) - alt_window) // 2)
h64, sha, tok_len = hash_token_sequence(
def_tokens[mid_start : mid_start + alt_window],
)
def_mid_alt_hash64 = str(h64)
def_mid_alt_sha = sha
def_mid_alt_len = str(tok_len)
def_anchor_has = True
ctx_tokens, _ = tokenize_text(context, return_spans=False)
if ctx_tokens:
h64, sha, tok_len = hash_token_sequence(ctx_tokens)
ctx_hash64 = str(h64)
ctx_sha = sha
ctx_tok_len = str(tok_len)
ctx_hash_available += 1
if tok_len >= ANCHOR_WINDOW:
h64, sha, tok_len = hash_token_sequence(
ctx_tokens[:ANCHOR_WINDOW],
)
ctx_head_hash64 = str(h64)
ctx_head_sha = sha
ctx_head_len = str(tok_len)
h64, sha, tok_len = hash_token_sequence(
ctx_tokens[-ANCHOR_WINDOW:],
)
ctx_tail_hash64 = str(h64)
ctx_tail_sha = sha
ctx_tail_len = str(tok_len)
mid_start = max(0, (len(ctx_tokens) - ANCHOR_WINDOW) // 2)
h64, sha, tok_len = hash_token_sequence(
ctx_tokens[mid_start : mid_start + ANCHOR_WINDOW],
)
ctx_mid_hash64 = str(h64)
ctx_mid_sha = sha
ctx_mid_len = str(tok_len)
ctx_anchor_has = True
if tok_len >= 2:
alt_window = (
ANCHOR_WINDOW_ALT
if tok_len >= ANCHOR_WINDOW_ALT
else max(2, tok_len - 1)
)
h64, sha, tok_len = hash_token_sequence(
ctx_tokens[:alt_window],
)
ctx_head_alt_hash64 = str(h64)
ctx_head_alt_sha = sha
ctx_head_alt_len = str(tok_len)
h64, sha, tok_len = hash_token_sequence(
ctx_tokens[-alt_window:],
)
ctx_tail_alt_hash64 = str(h64)
ctx_tail_alt_sha = sha
ctx_tail_alt_len = str(tok_len)
mid_start = max(0, (len(ctx_tokens) - alt_window) // 2)
h64, sha, tok_len = hash_token_sequence(
ctx_tokens[mid_start : mid_start + alt_window],
)
ctx_mid_alt_hash64 = str(h64)
ctx_mid_alt_sha = sha
ctx_mid_alt_len = str(tok_len)
ctx_anchor_has = True
if def_anchor_has:
def_anchor_available += 1
if ctx_anchor_has:
ctx_anchor_available += 1
def_head_spec = _spec(def_head_len, def_head_hash64, def_head_sha)
def_head_alt_spec = _spec(
def_head_alt_len,
def_head_alt_hash64,
def_head_alt_sha,
)
def_mid_spec = _spec(def_mid_len, def_mid_hash64, def_mid_sha)
def_mid_alt_spec = _spec(
def_mid_alt_len,
def_mid_alt_hash64,
def_mid_alt_sha,
)
def_tail_spec = _spec(def_tail_len, def_tail_hash64, def_tail_sha)
def_tail_alt_spec = _spec(
def_tail_alt_len,
def_tail_alt_hash64,
def_tail_alt_sha,
)
ctx_head_spec = _spec(ctx_head_len, ctx_head_hash64, ctx_head_sha)
ctx_head_alt_spec = _spec(
ctx_head_alt_len,
ctx_head_alt_hash64,
ctx_head_alt_sha,
)
ctx_mid_spec = _spec(ctx_mid_len, ctx_mid_hash64, ctx_mid_sha)
ctx_mid_alt_spec = _spec(
ctx_mid_alt_len,
ctx_mid_alt_hash64,
ctx_mid_alt_sha,
)
ctx_tail_spec = _spec(ctx_tail_len, ctx_tail_hash64, ctx_tail_sha)
ctx_tail_alt_spec = _spec(
ctx_tail_alt_len,
ctx_tail_alt_hash64,
ctx_tail_alt_sha,
)
token_index = token_cache.get(paper_id)
if doc_index is None:
missing_tei += 1
else:
def_span = doc_index.find_span(definition)
if def_span is not None:
def_start, def_end, def_match = def_span
else:
if def_hash64 and def_sha and def_tok_len and token_index:
span = token_index.find_span_by_hash(
int(def_tok_len),
int(def_hash64),
def_sha,
)
if span:
def_start, def_end = span
def_match = "hash"
else:
expected_len = int(def_tok_len or 0)
head_specs = [
spec
for spec in (def_head_spec, def_head_alt_spec)
if spec
]
mid_specs = [
spec
for spec in (def_mid_spec, def_mid_alt_spec)
if spec
]
tail_specs = [
spec
for spec in (def_tail_spec, def_tail_alt_spec)
if spec
]
if (
token_index
and expected_len
and head_specs
and tail_specs
):
for head in head_specs:
for tail in tail_specs:
anchor_span = _find_span_by_anchors(
token_index,
head,
tail,
expected_len,
mid_specs=mid_specs,
)
if anchor_span:
def_start, def_end = anchor_span
def_match = "anchor"
break
if def_match == "anchor":
break
if (
def_match != "anchor"
and token_index
and expected_len
):
for spec, position in (
(def_head_spec, "head"),
(def_head_alt_spec, "head"),
(def_mid_spec, "head"),
(def_mid_alt_spec, "head"),
(def_tail_spec, "tail"),
(def_tail_alt_spec, "tail"),
):
anchor_span = _find_span_from_anchor(
token_index,
spec,
expected_len,
position,
mid_specs=mid_specs,
)
if anchor_span:
def_start, def_end = anchor_span
def_match = "anchor"
break
if def_match != "anchor":
missing_def += 1
missing_def_rows.append(
{
"paper_id": paper_id,
"concept": row.get("concept", ""),
"reason": "missing_definition_span",
},
)
else:
missing_def += 1
missing_def_rows.append(
{
"paper_id": paper_id,
"concept": row.get("concept", ""),
"reason": "missing_definition_span",
},
)
ctx_span = doc_index.find_span(context)
if ctx_span is not None:
ctx_start, ctx_end, ctx_match = ctx_span
else:
if ctx_hash64 and ctx_sha and ctx_tok_len and token_index:
span = token_index.find_span_by_hash(
int(ctx_tok_len),
int(ctx_hash64),
ctx_sha,
)
if span:
ctx_start, ctx_end = span
ctx_match = "hash"
else:
expected_len = int(ctx_tok_len or 0)
head_specs = [
spec
for spec in (ctx_head_spec, ctx_head_alt_spec)
if spec
]
mid_specs = [
spec
for spec in (ctx_mid_spec, ctx_mid_alt_spec)
if spec
]
tail_specs = [
spec
for spec in (ctx_tail_spec, ctx_tail_alt_spec)
if spec
]
if (
token_index
and expected_len
and head_specs
and tail_specs
):
for head in head_specs:
for tail in tail_specs:
anchor_span = _find_span_by_anchors(
token_index,
head,
tail,
expected_len,
mid_specs=mid_specs,
)
if anchor_span:
ctx_start, ctx_end = anchor_span
ctx_match = "anchor"
break
if ctx_match == "anchor":
break
if (
ctx_match != "anchor"
and token_index
and expected_len
):
for spec, position in (
(ctx_head_spec, "head"),
(ctx_head_alt_spec, "head"),
(ctx_mid_spec, "head"),
(ctx_mid_alt_spec, "head"),
(ctx_tail_spec, "tail"),
(ctx_tail_alt_spec, "tail"),
):
anchor_span = _find_span_from_anchor(
token_index,
spec,
expected_len,
position,
mid_specs=mid_specs,
)
if anchor_span:
ctx_start, ctx_end = anchor_span
ctx_match = "anchor"
break
if ctx_match != "anchor":
missing_ctx += 1
missing_ctx_rows.append(
{
"paper_id": paper_id,
"concept": row.get("concept", ""),
"reason": "missing_context_span",
},
)
else:
missing_ctx += 1
missing_ctx_rows.append(
{
"paper_id": paper_id,
"concept": row.get("concept", ""),
"reason": "missing_context_span",
},
)
output_rows.append(
{
"paper_id": paper_id,
"paper_title": row.get("paper_title", ""),
"paper_doi": doi,
"paper_arxiv": arxiv,
"concept": row.get("concept", ""),
"definition_type": row.get("definition_type", ""),
"source_file": row.get("source_file", ""),
"is_out_of_domain": row.get("is_out_of_domain", ""),
"definition_preserve_linebreaks": def_preserve_linebreaks,
"context_preserve_linebreaks": ctx_preserve_linebreaks,
"definition_preserve_hyphenation": def_preserve_hyphenation,
"context_preserve_hyphenation": ctx_preserve_hyphenation,
"definition_has_bracket_citation": def_has_bracket_citation,
"definition_has_paren_citation": def_has_paren_citation,
"definition_has_letter_digit": def_has_letter_digit,
"context_has_bracket_citation": ctx_has_bracket_citation,
"context_has_paren_citation": ctx_has_paren_citation,
"context_has_letter_digit": ctx_has_letter_digit,
"definition_end_punct": def_end_punct,
"context_end_punct": ctx_end_punct,
"marker_version": "grobid_text_v1",
"hash_version": HASH_VERSION,
"definition_char_start": def_start,
"definition_char_end": def_end,
"definition_match": def_match,
"definition_hash64": def_hash64,
"definition_sha256": def_sha,
"definition_token_count": def_tok_len,
"definition_head_hash64": def_head_hash64,
"definition_head_sha256": def_head_sha,
"definition_head_token_count": def_head_len,
"definition_mid_hash64": def_mid_hash64,
"definition_mid_sha256": def_mid_sha,
"definition_mid_token_count": def_mid_len,
"definition_tail_hash64": def_tail_hash64,
"definition_tail_sha256": def_tail_sha,
"definition_tail_token_count": def_tail_len,
"definition_head_alt_hash64": def_head_alt_hash64,
"definition_head_alt_sha256": def_head_alt_sha,
"definition_head_alt_token_count": def_head_alt_len,
"definition_mid_alt_hash64": def_mid_alt_hash64,
"definition_mid_alt_sha256": def_mid_alt_sha,
"definition_mid_alt_token_count": def_mid_alt_len,
"definition_tail_alt_hash64": def_tail_alt_hash64,
"definition_tail_alt_sha256": def_tail_alt_sha,
"definition_tail_alt_token_count": def_tail_alt_len,
"context_char_start": ctx_start,
"context_char_end": ctx_end,
"context_match": ctx_match,
"context_hash64": ctx_hash64,
"context_sha256": ctx_sha,
"context_token_count": ctx_tok_len,
"context_head_hash64": ctx_head_hash64,
"context_head_sha256": ctx_head_sha,
"context_head_token_count": ctx_head_len,
"context_mid_hash64": ctx_mid_hash64,
"context_mid_sha256": ctx_mid_sha,
"context_mid_token_count": ctx_mid_len,
"context_tail_hash64": ctx_tail_hash64,
"context_tail_sha256": ctx_tail_sha,
"context_tail_token_count": ctx_tail_len,
"context_head_alt_hash64": ctx_head_alt_hash64,
"context_head_alt_sha256": ctx_head_alt_sha,
"context_head_alt_token_count": ctx_head_alt_len,
"context_mid_alt_hash64": ctx_mid_alt_hash64,
"context_mid_alt_sha256": ctx_mid_alt_sha,
"context_mid_alt_token_count": ctx_mid_alt_len,
"context_tail_alt_hash64": ctx_tail_alt_hash64,
"context_tail_alt_sha256": ctx_tail_alt_sha,
"context_tail_alt_token_count": ctx_tail_alt_len,
},
)
fieldnames = [
"paper_id",
"paper_title",
"paper_doi",
"paper_arxiv",
"concept",
"definition_type",
"source_file",
"is_out_of_domain",
"definition_preserve_linebreaks",
"context_preserve_linebreaks",
"definition_preserve_hyphenation",
"context_preserve_hyphenation",
"definition_has_bracket_citation",
"definition_has_paren_citation",
"definition_has_letter_digit",
"context_has_bracket_citation",
"context_has_paren_citation",
"context_has_letter_digit",
"definition_end_punct",
"context_end_punct",
"marker_version",
"hash_version",
"definition_char_start",
"definition_char_end",
"definition_match",
"definition_hash64",
"definition_sha256",
"definition_token_count",
"definition_head_hash64",
"definition_head_sha256",
"definition_head_token_count",
"definition_mid_hash64",
"definition_mid_sha256",
"definition_mid_token_count",
"definition_tail_hash64",
"definition_tail_sha256",
"definition_tail_token_count",
"definition_head_alt_hash64",
"definition_head_alt_sha256",
"definition_head_alt_token_count",
"definition_mid_alt_hash64",
"definition_mid_alt_sha256",
"definition_mid_alt_token_count",
"definition_tail_alt_hash64",
"definition_tail_alt_sha256",
"definition_tail_alt_token_count",
"context_char_start",
"context_char_end",
"context_match",
"context_hash64",
"context_sha256",
"context_token_count",
"context_head_hash64",
"context_head_sha256",
"context_head_token_count",
"context_mid_hash64",
"context_mid_sha256",
"context_mid_token_count",
"context_tail_hash64",
"context_tail_sha256",
"context_tail_token_count",
"context_head_alt_hash64",
"context_head_alt_sha256",
"context_head_alt_token_count",
"context_mid_alt_hash64",
"context_mid_alt_sha256",
"context_mid_alt_token_count",
"context_tail_alt_hash64",
"context_tail_alt_sha256",
"context_tail_alt_token_count",
]
args.output_csv.parent.mkdir(parents=True, exist_ok=True)
with args.output_csv.open("w", encoding="utf-8", newline="") as handle:
writer = csv.DictWriter(handle, fieldnames=fieldnames)
writer.writeheader()
for row in output_rows:
writer.writerow(row)
total = len(output_rows)
print(f"Wrote {total} rows to {args.output_csv}")
print(
"Exact TEI spans missing - "
f"TEI: {missing_tei}, def spans: {missing_def}, ctx spans: {missing_ctx}",
)
print(
"Hash/anchor markers available - "
f"def hash: {def_hash_available}/{total}, "
f"ctx hash: {ctx_hash_available}/{total}, "
f"def anchors: {def_anchor_available}/{total}, "
f"ctx anchors: {ctx_anchor_available}/{total}",
)
print(
"Note: missing exact TEI spans do not block hydration; "
"hash/anchor markers are used as the primary fallback.",
)
if args.report is not None:
report_lines = []
if missing_tei:
report_lines.append(f"Missing TEI: {missing_tei}")
report_lines.append(f"Missing definition spans: {missing_def}")
report_lines.append(f"Missing context spans: {missing_ctx}")
if missing_def_rows:
report_lines.append("")
report_lines.append("Missing definitions (paper_id | concept):")
for item in missing_def_rows:
report_lines.append(
f"- {item['paper_id']} | {item['concept']}",
)
if missing_ctx_rows:
report_lines.append("")
report_lines.append("Missing contexts (paper_id | concept):")
for item in missing_ctx_rows:
report_lines.append(
f"- {item['paper_id']} | {item['concept']}",
)
args.report.parent.mkdir(parents=True, exist_ok=True)
args.report.write_text(
"\n".join(report_lines) + "\n",
encoding="utf-8",
)
print(f"Wrote report to {args.report}")
if __name__ == "__main__":
main()