|
|
from __future__ import annotations |
|
|
|
|
|
import importlib |
|
|
import re |
|
|
import unicodedata |
|
|
from dataclasses import dataclass, field |
|
|
from pathlib import Path |
|
|
from typing import Any, Dict, Iterable, Optional |
|
|
|
|
|
try: |
|
|
etree = importlib.import_module("lxml.etree") |
|
|
except Exception as exc: |
|
|
raise RuntimeError( |
|
|
"Missing lxml dependency. Install with `pip install lxml`.", |
|
|
) from exc |
|
|
|
|
|
NS = {"tei": "http://www.tei-c.org/ns/1.0"} |
|
|
|
|
|
BLOCK_TAGS = { |
|
|
"head", |
|
|
"p", |
|
|
"item", |
|
|
"title", |
|
|
"label", |
|
|
"figDesc", |
|
|
"caption", |
|
|
"cell", |
|
|
"ab", |
|
|
"note", |
|
|
"quote", |
|
|
} |
|
|
|
|
|
HASH_BASE = 1_000_003 |
|
|
HASH_MASK = (1 << 64) - 1 |
|
|
HASH_VERSION = "tokhash_v2" |
|
|
ANCHOR_WINDOW = 12 |
|
|
ANCHOR_WINDOW_ALT = 6 |
|
|
|
|
|
|
|
|
def normalize_paper_id(paper_id: str) -> str: |
|
|
raw = paper_id.strip() |
|
|
if raw.lower().startswith("doi:"): |
|
|
raw = raw[4:] |
|
|
if raw.startswith("dx.doi.org/"): |
|
|
raw = raw[len("dx.doi.org/") :] |
|
|
if "doi.org/" in raw: |
|
|
raw = raw.split("doi.org/", 1)[1] |
|
|
if raw.startswith("http://") or raw.startswith("https://"): |
|
|
match = re.search(r"arxiv\.org/(abs|pdf)/([^?#]+)", raw) |
|
|
if match: |
|
|
return match.group(2).replace(".pdf", "") |
|
|
match = re.search(r"/pii/([^/?#]+)", raw) |
|
|
if match: |
|
|
return match.group(1) |
|
|
match = re.search(r"10\.[0-9]+/([^?#]+)", raw) |
|
|
if match: |
|
|
return match.group(1) |
|
|
parts = [p for p in re.split(r"[/?#]", raw) if p] |
|
|
if parts: |
|
|
return parts[-1] |
|
|
match = re.search(r"10\.[0-9]+/([^\s]+)", raw) |
|
|
if match: |
|
|
return match.group(1) |
|
|
return raw |
|
|
|
|
|
|
|
|
def normalize_arxiv(value: str) -> str: |
|
|
cleaned = value.strip() |
|
|
match = re.search( |
|
|
r"(\d{4}\.\d{4,5}v\d+|[a-z-]+/\d{7}v\d+)", |
|
|
cleaned, |
|
|
re.I, |
|
|
) |
|
|
if match: |
|
|
return match.group(1) |
|
|
return cleaned.replace("arXiv:", "").strip() |
|
|
|
|
|
|
|
|
def normalize_doi(value: str) -> str: |
|
|
cleaned = value.strip() |
|
|
if cleaned.startswith("https://doi.org/"): |
|
|
cleaned = cleaned[len("https://doi.org/") :] |
|
|
if cleaned.startswith("http://doi.org/"): |
|
|
cleaned = cleaned[len("http://doi.org/") :] |
|
|
return cleaned.lower() |
|
|
|
|
|
|
|
|
def doi_suffix(value: str) -> str: |
|
|
cleaned = normalize_doi(value) |
|
|
match = re.search(r"10\.[0-9]+/(.+)", cleaned) |
|
|
if match: |
|
|
return match.group(1) |
|
|
return cleaned |
|
|
|
|
|
|
|
|
def extract_ids_from_tei(path: Path) -> tuple[Optional[str], Optional[str]]: |
|
|
try: |
|
|
root = etree.parse(str(path)).getroot() |
|
|
except (OSError, etree.XMLSyntaxError): |
|
|
return None, None |
|
|
bibl = root.find( |
|
|
".//tei:teiHeader/tei:fileDesc/tei:sourceDesc/tei:biblStruct", |
|
|
namespaces=NS, |
|
|
) |
|
|
if bibl is None: |
|
|
return None, None |
|
|
idnos = bibl.findall(".//tei:idno", namespaces=NS) |
|
|
doi = arxiv = None |
|
|
for idno in idnos: |
|
|
if idno.text is None: |
|
|
continue |
|
|
value = idno.text.strip() |
|
|
if not value: |
|
|
continue |
|
|
id_type = (idno.get("type") or "").lower() |
|
|
if doi is None and ( |
|
|
id_type == "doi" or value.lower().startswith("10.") |
|
|
): |
|
|
doi = value |
|
|
if arxiv is None and ( |
|
|
"arxiv" in id_type or value.lower().startswith("arxiv") |
|
|
): |
|
|
arxiv = normalize_arxiv(value) |
|
|
return doi, arxiv |
|
|
|
|
|
|
|
|
def extract_title_from_tei(path: Path) -> Optional[str]: |
|
|
try: |
|
|
root = etree.parse(str(path)).getroot() |
|
|
except (OSError, etree.XMLSyntaxError): |
|
|
return None |
|
|
for path_expr in ( |
|
|
".//tei:teiHeader/tei:fileDesc/tei:titleStmt/tei:title", |
|
|
".//tei:teiHeader/tei:fileDesc/tei:sourceDesc/tei:biblStruct/tei:analytic/tei:title", |
|
|
".//tei:teiHeader/tei:fileDesc/tei:sourceDesc/tei:biblStruct/tei:monogr/tei:title", |
|
|
): |
|
|
node = root.find(path_expr, namespaces=NS) |
|
|
if node is None: |
|
|
continue |
|
|
text = "".join(node.itertext()).strip() |
|
|
if text: |
|
|
return text |
|
|
return None |
|
|
|
|
|
|
|
|
def extract_text_from_pdf(path: Path) -> str: |
|
|
pdfplumber_module: Any | None |
|
|
try: |
|
|
pdfplumber_module = importlib.import_module("pdfplumber") |
|
|
except Exception: |
|
|
pdfplumber_module = None |
|
|
pdfplumber_any: Any = pdfplumber_module |
|
|
if pdfplumber_any is not None: |
|
|
try: |
|
|
texts = [] |
|
|
with pdfplumber_any.open(str(path)) as pdf: |
|
|
for page in pdf.pages: |
|
|
page_text = page.extract_text() or "" |
|
|
words_text = "" |
|
|
try: |
|
|
words = page.extract_words( |
|
|
use_text_flow=True, |
|
|
keep_blank_chars=False, |
|
|
) |
|
|
if words: |
|
|
words_text = " ".join( |
|
|
w.get("text", "") for w in words |
|
|
) |
|
|
except Exception: |
|
|
words_text = "" |
|
|
if words_text and words_text not in page_text: |
|
|
if page_text: |
|
|
page_text = f"{page_text}\n{words_text}" |
|
|
else: |
|
|
page_text = words_text |
|
|
texts.append(page_text) |
|
|
return "\n\n".join(texts) |
|
|
except Exception: |
|
|
|
|
|
pass |
|
|
try: |
|
|
from pdfminer.high_level import extract_text |
|
|
except Exception: |
|
|
try: |
|
|
from PyPDF2 import PdfReader |
|
|
except Exception as py_exc: |
|
|
raise RuntimeError( |
|
|
"PDF fallback extraction requires pdfminer.six " |
|
|
"or PyPDF2. Install with `pip install pdfminer.six`.", |
|
|
) from py_exc |
|
|
reader = PdfReader(str(path)) |
|
|
texts = [] |
|
|
for page in reader.pages: |
|
|
texts.append(page.extract_text() or "") |
|
|
return "\n\n".join(texts) |
|
|
return extract_text(str(path)) |
|
|
|
|
|
|
|
|
def build_tei_index(tei_dirs: Iterable[Path]) -> Dict[str, Path]: |
|
|
index: Dict[str, Path] = {} |
|
|
for tei_dir in tei_dirs: |
|
|
if not tei_dir.exists(): |
|
|
continue |
|
|
for path in tei_dir.glob("*.grobid.tei.xml"): |
|
|
stem = path.name[: -len(".grobid.tei.xml")] |
|
|
if stem.startswith("paper_"): |
|
|
stem = stem[len("paper_") :] |
|
|
index.setdefault(stem, path) |
|
|
return index |
|
|
|
|
|
|
|
|
def _local_name(tag: str) -> str: |
|
|
return tag.split("}", 1)[-1] if "}" in tag else tag |
|
|
|
|
|
|
|
|
def extract_blocks_from_tei(path: Path) -> list[str]: |
|
|
root = etree.parse(str(path)).getroot() |
|
|
blocks: list[str] = [] |
|
|
|
|
|
def add_blocks(elem) -> None: |
|
|
tag = _local_name(elem.tag) |
|
|
if tag in BLOCK_TAGS: |
|
|
text = "".join(elem.itertext()).strip() |
|
|
if text: |
|
|
number = elem.get("n") |
|
|
if number and tag in {"head", "label"}: |
|
|
text = f"{number} {text}".strip() |
|
|
blocks.append(text) |
|
|
return |
|
|
for child in elem: |
|
|
add_blocks(child) |
|
|
|
|
|
for abstract in root.xpath( |
|
|
".//tei:teiHeader//tei:abstract", |
|
|
namespaces=NS, |
|
|
): |
|
|
add_blocks(abstract) |
|
|
|
|
|
text_root = root.find(".//tei:text", namespaces=NS) |
|
|
if text_root is not None: |
|
|
add_blocks(text_root) |
|
|
return blocks |
|
|
|
|
|
|
|
|
def _normalize_token(token: str) -> str: |
|
|
return unicodedata.normalize("NFKC", token).lower() |
|
|
|
|
|
|
|
|
HYPHEN_CHARS = { |
|
|
"-", |
|
|
"\u2010", |
|
|
"\u2011", |
|
|
"\u2012", |
|
|
"\u2013", |
|
|
"\u2014", |
|
|
"\u2212", |
|
|
} |
|
|
SOFT_HYPHEN = "\u00ad" |
|
|
|
|
|
CITATION_BRACKET_RE = re.compile(r"\[[^\]]{0,120}\]") |
|
|
CITATION_PAREN_RE = re.compile(r"\([^\)]{0,120}\)") |
|
|
|
|
|
|
|
|
def _looks_like_bracket_citation(text: str) -> bool: |
|
|
return any(ch.isdigit() for ch in text) |
|
|
|
|
|
|
|
|
def _looks_like_paren_citation(text: str) -> bool: |
|
|
if not any(ch.isdigit() for ch in text): |
|
|
return False |
|
|
lowered = text.lower() |
|
|
if "et al" in lowered: |
|
|
return True |
|
|
if re.search(r"\b(19|20)\d{2}\b", text): |
|
|
return True |
|
|
return False |
|
|
|
|
|
|
|
|
def strip_citations( |
|
|
text: str, |
|
|
*, |
|
|
strip_brackets: bool = True, |
|
|
strip_parens: bool = False, |
|
|
) -> str: |
|
|
if not text: |
|
|
return text |
|
|
spans: list[tuple[int, int]] = [] |
|
|
if strip_brackets: |
|
|
for match in CITATION_BRACKET_RE.finditer(text): |
|
|
if _looks_like_bracket_citation(match.group(0)): |
|
|
spans.append((match.start(), match.end())) |
|
|
if strip_parens: |
|
|
for match in CITATION_PAREN_RE.finditer(text): |
|
|
if _looks_like_paren_citation(match.group(0)): |
|
|
spans.append((match.start(), match.end())) |
|
|
if not spans: |
|
|
return text |
|
|
spans.sort() |
|
|
merged: list[tuple[int, int]] = [] |
|
|
for start, end in spans: |
|
|
if not merged or start > merged[-1][1]: |
|
|
merged.append((start, end)) |
|
|
else: |
|
|
merged[-1] = (merged[-1][0], max(merged[-1][1], end)) |
|
|
parts = [] |
|
|
cursor = 0 |
|
|
for start, end in merged: |
|
|
if cursor < start: |
|
|
parts.append(text[cursor:start]) |
|
|
parts.append(" ") |
|
|
cursor = end |
|
|
if cursor < len(text): |
|
|
parts.append(text[cursor:]) |
|
|
return "".join(parts) |
|
|
|
|
|
|
|
|
def tokenize_text( |
|
|
text: str, |
|
|
*, |
|
|
return_spans: bool = False, |
|
|
) -> tuple[list[str], Optional[list[tuple[int, int]]]]: |
|
|
tokens: list[str] = [] |
|
|
spans: list[tuple[int, int]] = [] |
|
|
i = 0 |
|
|
while i < len(text): |
|
|
ch = text[i] |
|
|
if ch == SOFT_HYPHEN: |
|
|
i += 1 |
|
|
continue |
|
|
if ch.isalnum(): |
|
|
start = i |
|
|
last_idx = i |
|
|
last_alpha = ch.isalpha() |
|
|
token_chars = [ch] |
|
|
i += 1 |
|
|
while i < len(text): |
|
|
ch = text[i] |
|
|
if ch == SOFT_HYPHEN: |
|
|
i += 1 |
|
|
continue |
|
|
if ch.isalnum(): |
|
|
is_alpha = ch.isalpha() |
|
|
if is_alpha != last_alpha: |
|
|
break |
|
|
token_chars.append(ch) |
|
|
last_idx = i |
|
|
last_alpha = is_alpha |
|
|
i += 1 |
|
|
continue |
|
|
if ch in HYPHEN_CHARS and last_alpha: |
|
|
j = i + 1 |
|
|
while j < len(text) and text[j].isspace(): |
|
|
j += 1 |
|
|
if j < len(text) and text[j].isalpha(): |
|
|
i = j |
|
|
continue |
|
|
break |
|
|
tokens.append(_normalize_token("".join(token_chars))) |
|
|
if return_spans: |
|
|
spans.append((start, last_idx + 1)) |
|
|
else: |
|
|
i += 1 |
|
|
return tokens, spans if return_spans else None |
|
|
|
|
|
|
|
|
def hash_token(token: str) -> int: |
|
|
import hashlib |
|
|
|
|
|
digest = hashlib.blake2b(token.encode("utf-8"), digest_size=8).digest() |
|
|
return int.from_bytes(digest, "big") |
|
|
|
|
|
|
|
|
def hash_token_sequence(tokens: list[str]) -> tuple[int, str, int]: |
|
|
import hashlib |
|
|
|
|
|
rolling = 0 |
|
|
normalized = [_normalize_token(token) for token in tokens] |
|
|
for token in normalized: |
|
|
rolling = ((rolling * HASH_BASE) + hash_token(token)) & HASH_MASK |
|
|
joined = " ".join(normalized).encode("utf-8") |
|
|
sha = hashlib.sha256(joined).hexdigest() |
|
|
return rolling, sha, len(normalized) |
|
|
|
|
|
|
|
|
@dataclass |
|
|
class TokenIndex: |
|
|
doc_text: str |
|
|
tokens: list[str] |
|
|
spans: list[tuple[int, int]] |
|
|
token_hashes: list[int] |
|
|
rolling_cache: dict[int, dict[int, list[int]]] = field( |
|
|
default_factory=dict, |
|
|
) |
|
|
|
|
|
@classmethod |
|
|
def from_text(cls, doc_text: str) -> "TokenIndex": |
|
|
tokens, spans = tokenize_text(doc_text, return_spans=True) |
|
|
token_hashes = [hash_token(t) for t in tokens] |
|
|
return cls( |
|
|
doc_text=doc_text, |
|
|
tokens=tokens, |
|
|
spans=spans or [], |
|
|
token_hashes=token_hashes, |
|
|
) |
|
|
|
|
|
def _build_rolling_index(self, window: int) -> dict[int, list[int]]: |
|
|
if window in self.rolling_cache: |
|
|
return self.rolling_cache[window] |
|
|
index: dict[int, list[int]] = {} |
|
|
if window <= 0 or window > len(self.tokens): |
|
|
self.rolling_cache[window] = index |
|
|
return index |
|
|
|
|
|
pow_base = 1 |
|
|
for _ in range(window - 1): |
|
|
pow_base = (pow_base * HASH_BASE) & HASH_MASK |
|
|
|
|
|
rolling = 0 |
|
|
for i in range(window): |
|
|
rolling = ( |
|
|
(rolling * HASH_BASE) + self.token_hashes[i] |
|
|
) & HASH_MASK |
|
|
index.setdefault(rolling, []).append(0) |
|
|
|
|
|
for i in range(1, len(self.tokens) - window + 1): |
|
|
remove = (self.token_hashes[i - 1] * pow_base) & HASH_MASK |
|
|
rolling = (rolling - remove) & HASH_MASK |
|
|
rolling = ( |
|
|
(rolling * HASH_BASE) + self.token_hashes[i + window - 1] |
|
|
) & HASH_MASK |
|
|
index.setdefault(rolling, []).append(i) |
|
|
|
|
|
self.rolling_cache[window] = index |
|
|
return index |
|
|
|
|
|
def _positions_for_hash( |
|
|
self, |
|
|
window: int, |
|
|
target_hash: int, |
|
|
target_sha: str, |
|
|
) -> list[int]: |
|
|
index = self._build_rolling_index(window) |
|
|
candidates = index.get(target_hash, []) |
|
|
if not candidates: |
|
|
return [] |
|
|
import hashlib |
|
|
|
|
|
positions: list[int] = [] |
|
|
for start_idx in candidates: |
|
|
end_idx = start_idx + window - 1 |
|
|
if end_idx >= len(self.tokens): |
|
|
continue |
|
|
token_slice = self.tokens[start_idx : start_idx + window] |
|
|
sha = hashlib.sha256( |
|
|
" ".join(token_slice).encode("utf-8"), |
|
|
).hexdigest() |
|
|
if sha == target_sha: |
|
|
positions.append(start_idx) |
|
|
return positions |
|
|
|
|
|
def find_token_span_by_hash( |
|
|
self, |
|
|
window: int, |
|
|
target_hash: int, |
|
|
target_sha: str, |
|
|
) -> Optional[tuple[int, int]]: |
|
|
positions = self._positions_for_hash(window, target_hash, target_sha) |
|
|
if not positions: |
|
|
return None |
|
|
start_idx = positions[0] |
|
|
end_idx = start_idx + window - 1 |
|
|
return start_idx, end_idx |
|
|
|
|
|
def find_token_positions_by_hash( |
|
|
self, |
|
|
window: int, |
|
|
target_hash: int, |
|
|
target_sha: str, |
|
|
) -> list[int]: |
|
|
return self._positions_for_hash(window, target_hash, target_sha) |
|
|
|
|
|
def find_span_by_hash( |
|
|
self, |
|
|
window: int, |
|
|
target_hash: int, |
|
|
target_sha: str, |
|
|
) -> Optional[tuple[int, int]]: |
|
|
span = self.find_token_span_by_hash(window, target_hash, target_sha) |
|
|
if span is None: |
|
|
return None |
|
|
start_idx, end_idx = span |
|
|
start_char = self.spans[start_idx][0] |
|
|
end_char = self.spans[end_idx][1] |
|
|
return start_char, end_char |
|
|
return None |
|
|
|
|
|
|
|
|
@dataclass |
|
|
class DocIndex: |
|
|
doc_text: str |
|
|
norm_space: str |
|
|
norm_space_map: list[int] |
|
|
norm_nospace: str |
|
|
norm_nospace_map: list[int] |
|
|
|
|
|
@classmethod |
|
|
def from_tei(cls, tei_path: Path) -> "DocIndex": |
|
|
blocks = extract_blocks_from_tei(tei_path) |
|
|
doc_text = " ".join(blocks) |
|
|
return cls.from_text(doc_text) |
|
|
|
|
|
@classmethod |
|
|
def from_text(cls, doc_text: str) -> "DocIndex": |
|
|
norm_space: list[str] = [] |
|
|
norm_space_map: list[int] = [] |
|
|
norm_nospace: list[str] = [] |
|
|
norm_nospace_map: list[int] = [] |
|
|
prev_space = False |
|
|
i = 0 |
|
|
while i < len(doc_text): |
|
|
ch = doc_text[i] |
|
|
if ch == "-" and i > 0 and doc_text[i - 1].isalpha(): |
|
|
j = i + 1 |
|
|
while j < len(doc_text) and doc_text[j].isspace(): |
|
|
j += 1 |
|
|
if j < len(doc_text) and doc_text[j].isalpha(): |
|
|
i = j |
|
|
continue |
|
|
lower = ch.lower() |
|
|
if lower.isalnum(): |
|
|
norm_space.append(lower) |
|
|
norm_space_map.append(i) |
|
|
norm_nospace.append(lower) |
|
|
norm_nospace_map.append(i) |
|
|
prev_space = False |
|
|
else: |
|
|
if not prev_space: |
|
|
norm_space.append(" ") |
|
|
norm_space_map.append(i) |
|
|
prev_space = True |
|
|
i += 1 |
|
|
|
|
|
while norm_space and norm_space[0] == " ": |
|
|
norm_space.pop(0) |
|
|
norm_space_map.pop(0) |
|
|
while norm_space and norm_space[-1] == " ": |
|
|
norm_space.pop() |
|
|
norm_space_map.pop() |
|
|
|
|
|
return cls( |
|
|
doc_text=doc_text, |
|
|
norm_space="".join(norm_space), |
|
|
norm_space_map=norm_space_map, |
|
|
norm_nospace="".join(norm_nospace), |
|
|
norm_nospace_map=norm_nospace_map, |
|
|
) |
|
|
|
|
|
def find_span(self, query: str) -> Optional[tuple[int, int, str]]: |
|
|
if not query: |
|
|
return None |
|
|
n_q, n_q_ns = _normalize_query(query) |
|
|
idx = self.norm_space.find(n_q) |
|
|
if idx != -1: |
|
|
start = self.norm_space_map[idx] |
|
|
end = self.norm_space_map[idx + len(n_q) - 1] + 1 |
|
|
return start, end, "space" |
|
|
|
|
|
trimmed = re.sub(r"^\s*\d+(?:\.\d+)*\s+", "", query) |
|
|
if trimmed != query: |
|
|
n_q_trim, n_q_trim_ns = _normalize_query(trimmed) |
|
|
idx = self.norm_space.find(n_q_trim) |
|
|
if idx != -1: |
|
|
start = self.norm_space_map[idx] |
|
|
end = self.norm_space_map[idx + len(n_q_trim) - 1] + 1 |
|
|
return start, end, "space_trim" |
|
|
n_q_ns = n_q_trim_ns |
|
|
|
|
|
idx = self.norm_nospace.find(n_q_ns) |
|
|
if idx != -1: |
|
|
start = self.norm_nospace_map[idx] |
|
|
end = self.norm_nospace_map[idx + len(n_q_ns) - 1] + 1 |
|
|
return start, end, "nospace" |
|
|
return None |
|
|
|
|
|
def extract_span(self, start: Optional[int], end: Optional[int]) -> str: |
|
|
if start is None or end is None: |
|
|
return "" |
|
|
if start < 0 or end > len(self.doc_text) or start >= end: |
|
|
return "" |
|
|
return self.doc_text[start:end] |
|
|
|
|
|
|
|
|
def _normalize_query(text: str) -> tuple[str, str]: |
|
|
norm_space: list[str] = [] |
|
|
norm_nospace: list[str] = [] |
|
|
prev_space = False |
|
|
i = 0 |
|
|
while i < len(text): |
|
|
ch = text[i] |
|
|
if ch == "-" and i > 0 and text[i - 1].isalpha(): |
|
|
j = i + 1 |
|
|
while j < len(text) and text[j].isspace(): |
|
|
j += 1 |
|
|
if j < len(text) and text[j].isalpha(): |
|
|
i = j |
|
|
continue |
|
|
lower = ch.lower() |
|
|
if lower.isalnum(): |
|
|
norm_space.append(lower) |
|
|
norm_nospace.append(lower) |
|
|
prev_space = False |
|
|
else: |
|
|
if not prev_space: |
|
|
norm_space.append(" ") |
|
|
prev_space = True |
|
|
i += 1 |
|
|
|
|
|
while norm_space and norm_space[0] == " ": |
|
|
norm_space.pop(0) |
|
|
while norm_space and norm_space[-1] == " ": |
|
|
norm_space.pop() |
|
|
return "".join(norm_space), "".join(norm_nospace) |
|
|
|