| from __future__ import annotations |
|
|
| import base64 |
| import io |
| import contextlib |
| import difflib |
| import html |
| import importlib.util |
| import inspect |
| import io |
| import json |
| import os |
| import math |
| import re |
| import shlex |
| import shutil |
| import subprocess |
| import sys |
| import time |
| import warnings |
| from functools import lru_cache |
| from pathlib import Path |
| from tempfile import TemporaryDirectory |
| from typing import Any, Callable, Dict, List, Optional, Tuple |
|
|
| import requests |
|
|
| try: |
| from dotenv import load_dotenv |
| except Exception: |
| load_dotenv = None |
|
|
|
|
| ROOT = Path(__file__).resolve().parent |
| ProgressHandler = Optional[Callable[[str], None]] |
| _DEEPSEEK_OCR_MODEL = None |
| _DEEPSEEK_OCR_PROCESSOR = None |
| _DEEPSEEK_OCR_MODEL_ID = None |
| _DEEPSEEK_OCR_LOAD_ERROR = None |
| _GENERIC_OCR_BACKENDS: Dict[str, Any] = {} |
|
|
|
|
| _QIANFAN_OCR_DEFAULT_MODEL_ID = "baidu/Qianfan-OCR" |
| _GLM_OCR_DEFAULT_MODEL_ID = "zai-org/GLM-OCR" |
| _PADDLEOCR_VL_DEFAULT_MODEL_ID = "PaddlePaddle/PaddleOCR-VL-1.5" |
| _GOT_OCR_DEFAULT_MODEL_ID = "stepfun-ai/GOT-OCR2_0" |
| _MONKEYOCR_DEFAULT_MODEL_ID = "echo840/MonkeyOCR-pro-3B" |
| _QWEN_OCR_DEFAULT_MODEL_ID = "Qwen/Qwen3.6-35B-A3B" |
| _GEMMA4_VLLM_DEFAULT_MODEL_ID = "google/gemma-4-26B-A4B-it" |
| _GEMMA4_LLAMA_CPP_DEFAULT_MODEL_ID = "gemma4-26b-llamacpp" |
|
|
|
|
| if load_dotenv is not None: |
| load_dotenv(ROOT / "sec_parser" / ".env") |
| load_dotenv(ROOT / ".env") |
|
|
|
|
| class TableOCRContentError(RuntimeError): |
| def __init__( |
| self, |
| message: str, |
| *, |
| response_payload: Optional[Dict[str, Any]] = None, |
| details: Optional[Dict[str, Any]] = None, |
| ): |
| super().__init__(message) |
| self.response_payload = response_payload or {} |
| self.details = details or {} |
|
|
|
|
| class TableOCRTranscriptionText(str): |
| def __new__( |
| cls, |
| text: str, |
| *, |
| effective_model_id: Optional[str] = None, |
| thinking_latency_s: Optional[float] = None, |
| total_latency_s: Optional[float] = None, |
| first_content_latency_s: Optional[float] = None, |
| reasoning_tokens: Optional[int] = None, |
| completion_tokens: Optional[int] = None, |
| ): |
| obj = super().__new__(cls, text) |
| obj.effective_model_id = normalize_text(effective_model_id or "") |
| obj.thinking_latency_s = ( |
| max(0.0, float(thinking_latency_s)) |
| if isinstance(thinking_latency_s, (int, float)) |
| else None |
| ) |
| obj.total_latency_s = ( |
| max(0.0, float(total_latency_s)) |
| if isinstance(total_latency_s, (int, float)) |
| else None |
| ) |
| obj.first_content_latency_s = ( |
| max(0.0, float(first_content_latency_s)) |
| if isinstance(first_content_latency_s, (int, float)) |
| else None |
| ) |
| obj.reasoning_tokens = ( |
| max(0, int(reasoning_tokens)) |
| if isinstance(reasoning_tokens, (int, float)) |
| else None |
| ) |
| obj.completion_tokens = ( |
| max(0, int(completion_tokens)) |
| if isinstance(completion_tokens, (int, float)) |
| else None |
| ) |
| return obj |
|
|
|
|
| def emit_progress(message: str, *, progress_handler: ProgressHandler = None) -> None: |
| if progress_handler is not None: |
| progress_handler(message) |
| return |
| print(message, flush=True) |
|
|
|
|
| def default_table_image_transcription_prompt() -> str: |
| return ( |
| "Transcribe the table in the image into a minimal HTML fragment.\n" |
| "Return only plain HTML using <table>, <tr>, <th>, <td>, <br>, and inline formatting tags such as <strong>/<b>, <em>/<i>, <u>, <sup>, and <sub> when needed.\n" |
| "Do not use markdown fences, commentary, CSS, or JavaScript.\n" |
| "Assume every visible textual element inside the image crop belongs to the table and must be transcribed.\n" |
| "Include all visible titles, units, stub labels, grouped headers, footnotes, side labels, and notes, even when they sit above, below, or beside the main grid.\n" |
| "Do not omit text just because it looks visually separate from the numeric body; if it is visible in the crop, capture it in the output table.\n" |
| "Preserve merged-cell structure faithfully. Use colspan and rowspan when they are needed to represent the visible table layout.\n" |
| "Do not duplicate merged-cell text across multiple cells unless the image itself repeats that text.\n" |
| "Preserve visible text formatting exactly when present, including bold, italics, underline, superscripts, and subscripts.\n" |
| "Ignore purely visual table styling such as thick borders, ruling lines, shading, and decorative emphasis; transcribe only the table's textual content and structure.\n" |
| "Preserve row order, column order, signs, punctuation, decimals, and capitalization exactly." |
| ) |
|
|
|
|
| def strip_code_fences(text: str) -> str: |
| text = (text or "").strip() |
| text = re.sub(r"^```(?:html)?\s*", "", text, flags=re.IGNORECASE) |
| text = re.sub(r"\s*```$", "", text) |
| return text.strip() |
|
|
|
|
| def normalize_text(text: str) -> str: |
| return str(text or "").strip() |
|
|
|
|
| _HTML_TABLE_RE = re.compile(r"<table\b", re.IGNORECASE) |
| _HTML_TABLE_FRAGMENT_RE = re.compile(r"<table\b[\s\S]*?</table>", re.IGNORECASE) |
| _GEMMA4_THOUGHT_BLOCK_RE = re.compile( |
| r"<\|channel\>thought\s*[\s\S]*?<channel\|>", |
| re.IGNORECASE, |
| ) |
| _GEMMA4_THINK_BLOCK_RE = re.compile(r"<think>[\s\S]*?</think>", re.IGNORECASE) |
| _GEMMA4_CHANNEL_MARKER_RE = re.compile( |
| r"<\|channel\>[A-Za-z0-9_-]*\s*|<channel\|>", |
| re.IGNORECASE, |
| ) |
| _LATEX_INLINE_TEXT_RE = re.compile( |
| r"\\(?:[A-Za-z]+|[()[\]{}%&_#$,])" |
| r"|(?<!\\)\$\$?.+?(?<!\\)\$\$?" |
| r"|(?:[A-Za-z0-9])(?:\^|_)\{", |
| re.DOTALL, |
| ) |
| _LATEX_TABULAR_RE = re.compile( |
| r"\\begin\s*\{tabular\}\s*\{(?P<spec>[^{}]*)\}(?P<body>[\s\S]*?)\\end\s*\{tabular\}", |
| re.IGNORECASE, |
| ) |
| _LATEX_BEGIN_ENV_RE = re.compile(r"\\begin\s*\{(?P<name>[^{}]+)\}", re.IGNORECASE) |
| _LATEX_RULE_COMMAND_RE = re.compile( |
| r"\\(?:hline|toprule|midrule|bottomrule)\b" |
| r"|\\(?:c|x)?cline(?:\[[^\]]+\])?\{[^{}]*\}" |
| r"|\\cmidrule(?:\([^)]+\))?\{[^{}]*\}", |
| re.IGNORECASE, |
| ) |
| _LATEX_TABLE_ENVIRONMENT_ARG_COUNTS: Dict[str, int] = { |
| "array": 1, |
| "longtable": 1, |
| "longtabu": 2, |
| "nicetabular": 1, |
| "tabu": 1, |
| "tabular": 1, |
| "tabular*": 2, |
| "tabularx": 2, |
| "tabulary": 2, |
| "tblr": 1, |
| "xltabular": 2, |
| } |
| _LATEX_INLINE_SYMBOLS: Dict[str, str] = { |
| "alpha": "α", |
| "beta": "β", |
| "gamma": "γ", |
| "delta": "δ", |
| "epsilon": "ε", |
| "theta": "θ", |
| "lambda": "λ", |
| "mu": "μ", |
| "pi": "π", |
| "sigma": "σ", |
| "tau": "τ", |
| "phi": "φ", |
| "omega": "ω", |
| "Gamma": "Γ", |
| "Delta": "Δ", |
| "Theta": "Θ", |
| "Lambda": "Λ", |
| "Pi": "Π", |
| "Sigma": "Σ", |
| "Phi": "Φ", |
| "Omega": "Ω", |
| "cdot": "·", |
| "times": "×", |
| "div": "÷", |
| "leq": "≤", |
| "geq": "≥", |
| "neq": "≠", |
| "approx": "≈", |
| "sim": "~", |
| "pm": "±", |
| "mp": "∓", |
| "to": "→", |
| "rightarrow": "→", |
| "leftarrow": "←", |
| "leftrightarrow": "↔", |
| "infty": "∞", |
| "degree": "°", |
| "circ": "°", |
| "ldots": "...", |
| "dots": "...", |
| "cdots": "...", |
| "vdots": "...", |
| "ddots": "...", |
| } |
| _LATEX_INLINE_UNWRAP_COMMANDS = { |
| "bar", |
| "boxed", |
| "fbox", |
| "hphantom", |
| "mathbb", |
| "mathcal", |
| "mathfrak", |
| "mathrm", |
| "mathsf", |
| "mathtt", |
| "mbox", |
| "operatorname", |
| "overline", |
| "phantom", |
| "rlap", |
| "smash", |
| "text", |
| "textnormal", |
| "textrm", |
| "textsf", |
| "texttt", |
| "tilde", |
| "vec", |
| "vphantom", |
| "widehat", |
| "widetilde", |
| } |
| _LATEX_INLINE_BOLD_COMMANDS = {"bf", "bfseries", "bm", "boldsymbol", "mathbf", "textbf"} |
| _LATEX_INLINE_ITALIC_COMMANDS = {"emph", "it", "itshape", "mathit", "textit"} |
| _LATEX_INLINE_UNDERLINE_COMMANDS = {"dashuline", "uline", "underline", "uwave"} |
| _LATEX_LAYOUT_COMMAND_RE = re.compile( |
| r"\\(?:addlinespace(?:\[[^\]]*\])?|arraybackslash|centering|footnotesize|Huge|huge|LARGE|Large|large|" |
| r"morecmidrulespace|newline|normalcolor|normalfont|normalsize|par|qquad|quad|raggedleft|raggedright|" |
| r"scriptsize|small|tabularnewline|tiny)\b", |
| re.IGNORECASE, |
| ) |
| _LATEX_HEADER_FOOTER_MARKER_RE = re.compile( |
| r"\\(?:endfirsthead|endhead|endfoot|endlastfoot)\b", |
| re.IGNORECASE, |
| ) |
| _LATEX_TABLENOTES_ENV_RE = re.compile( |
| r"\\begin\s*\{tablenotes\}(?P<body>[\s\S]*?)\\end\s*\{tablenotes\}", |
| re.IGNORECASE, |
| ) |
|
|
|
|
| def _consume_latex_braced_group(text: str, start_index: int) -> tuple[Optional[str], int]: |
| if start_index >= len(text) or text[start_index] != "{": |
| return None, start_index |
| depth = 0 |
| index = start_index |
| while index < len(text): |
| char = text[index] |
| if char == "\\": |
| index += 2 |
| continue |
| if char == "{": |
| depth += 1 |
| elif char == "}": |
| depth -= 1 |
| if depth == 0: |
| return text[start_index + 1 : index], index + 1 |
| index += 1 |
| return None, start_index |
|
|
|
|
| def _consume_latex_optional_bracket_group(text: str, start_index: int) -> int: |
| _, next_index = _consume_latex_bracket_group(text, start_index) |
| return next_index |
|
|
|
|
| def _consume_latex_bracket_group(text: str, start_index: int) -> tuple[Optional[str], int]: |
| if start_index >= len(text) or text[start_index] != "[": |
| return None, start_index |
| depth = 0 |
| index = start_index |
| while index < len(text): |
| char = text[index] |
| if char == "\\": |
| index += 2 |
| continue |
| if char == "[": |
| depth += 1 |
| elif char == "]": |
| depth -= 1 |
| if depth == 0: |
| return text[start_index + 1 : index], index + 1 |
| index += 1 |
| return None, start_index |
|
|
|
|
| def _consume_latex_command( |
| text: str, |
| command: str, |
| *, |
| required_arg_count: int, |
| optional_arg_count: int = 0, |
| allow_star: bool = False, |
| ) -> tuple[Optional[Dict[str, Any]], int]: |
| index = 0 |
| while index < len(text) and text[index].isspace(): |
| index += 1 |
| prefix = f"\\{command}" |
| if not text.startswith(prefix, index): |
| return None, 0 |
| index += len(prefix) |
|
|
| starred = False |
| if allow_star and index < len(text) and text[index] == "*": |
| starred = True |
| index += 1 |
|
|
| optional_args: List[str] = [] |
| for _ in range(optional_arg_count): |
| while index < len(text) and text[index].isspace(): |
| index += 1 |
| optional_arg, next_index = _consume_latex_bracket_group(text, index) |
| if optional_arg is None: |
| break |
| optional_args.append(optional_arg) |
| index = next_index |
|
|
| args: List[str] = [] |
| for _ in range(required_arg_count): |
| while index < len(text) and text[index].isspace(): |
| index += 1 |
| arg, next_index = _consume_latex_braced_group(text, index) |
| if arg is None: |
| return None, 0 |
| args.append(arg) |
| index = next_index |
|
|
| return { |
| "args": args, |
| "optional_args": optional_args, |
| "starred": starred, |
| }, index |
|
|
|
|
| def _consume_latex_command_args(text: str, command: str, arg_count: int) -> tuple[Optional[List[str]], int]: |
| payload, end_index = _consume_latex_command( |
| text, |
| command, |
| required_arg_count=arg_count, |
| ) |
| if payload is None: |
| return None, 0 |
| return list(payload["args"]), end_index |
|
|
|
|
| def _split_latex_top_level(text: str, *, separator: str) -> List[str]: |
| parts: List[str] = [] |
| current: List[str] = [] |
| brace_depth = 0 |
| index = 0 |
| while index < len(text): |
| char = text[index] |
| if char == "\\": |
| if separator == "\\\\" and text.startswith("\\\\", index) and brace_depth == 0: |
| parts.append("".join(current)) |
| current = [] |
| index += 2 |
| while index < len(text) and text[index].isspace(): |
| index += 1 |
| next_index = _consume_latex_optional_bracket_group(text, index) |
| if next_index != index: |
| index = next_index |
| continue |
| if index + 1 < len(text): |
| current.append(text[index : index + 2]) |
| index += 2 |
| continue |
| if char == "{": |
| brace_depth += 1 |
| elif char == "}" and brace_depth > 0: |
| brace_depth -= 1 |
| elif separator == "&" and char == "&" and brace_depth == 0: |
| parts.append("".join(current)) |
| current = [] |
| index += 1 |
| continue |
| current.append(char) |
| index += 1 |
| parts.append("".join(current)) |
| return parts |
|
|
|
|
| def _replace_latex_command_occurrences( |
| text: str, |
| command: str, |
| *, |
| required_arg_count: int, |
| optional_arg_count: int = 0, |
| allow_star: bool = False, |
| replacement: Any = "", |
| predicate: Optional[Callable[[Dict[str, Any]], bool]] = None, |
| ) -> str: |
| if not text: |
| return text |
| star_pattern = r"(?:\*)?" if allow_star else "" |
| pattern = re.compile( |
| rf"\\{re.escape(command)}{star_pattern}(?![A-Za-z])", |
| re.IGNORECASE, |
| ) |
| chunks: List[str] = [] |
| index = 0 |
| for match in pattern.finditer(text): |
| if match.start() < index: |
| continue |
| parsed, consumed = _consume_latex_command( |
| text[match.start() :], |
| command, |
| required_arg_count=required_arg_count, |
| optional_arg_count=optional_arg_count, |
| allow_star=allow_star, |
| ) |
| if parsed is None: |
| continue |
| if predicate is not None and not predicate(parsed): |
| continue |
| chunks.append(text[index : match.start()]) |
| if callable(replacement): |
| replacement_text = replacement(parsed) |
| if replacement_text: |
| chunks.append(str(replacement_text)) |
| elif replacement: |
| chunks.append(str(replacement)) |
| index = match.start() + consumed |
| if not chunks: |
| return text |
| chunks.append(text[index:]) |
| return "".join(chunks) |
|
|
|
|
| def _find_matching_latex_end( |
| text: str, |
| *, |
| env_name: str, |
| start_index: int, |
| ) -> tuple[Optional[int], Optional[int]]: |
| pattern = re.compile( |
| rf"\\(?P<kind>begin|end)\s*\{{{re.escape(env_name)}\}}", |
| re.IGNORECASE, |
| ) |
| depth = 1 |
| for match in pattern.finditer(text, start_index): |
| if normalize_text(match.group("kind")).lower() == "begin": |
| depth += 1 |
| continue |
| depth -= 1 |
| if depth == 0: |
| return match.start(), match.end() |
| return None, None |
|
|
|
|
| def _extract_latex_table_candidates(text: str) -> List[Dict[str, Any]]: |
| candidates: List[Dict[str, Any]] = [] |
| for begin_match in _LATEX_BEGIN_ENV_RE.finditer(text): |
| env_name = normalize_text(begin_match.group("name")) |
| arg_count = _LATEX_TABLE_ENVIRONMENT_ARG_COUNTS.get(env_name.lower()) |
| if arg_count is None: |
| continue |
| index = begin_match.end() |
| valid = True |
| for _ in range(arg_count): |
| while index < len(text) and text[index].isspace(): |
| index += 1 |
| while True: |
| optional_arg, next_index = _consume_latex_bracket_group(text, index) |
| if optional_arg is None: |
| break |
| index = next_index |
| while index < len(text) and text[index].isspace(): |
| index += 1 |
| _, next_index = _consume_latex_braced_group(text, index) |
| if next_index == index: |
| valid = False |
| break |
| index = next_index |
| if not valid: |
| continue |
| body_end_start, body_end_end = _find_matching_latex_end( |
| text, |
| env_name=env_name, |
| start_index=index, |
| ) |
| if body_end_start is None or body_end_end is None: |
| continue |
| candidates.append( |
| { |
| "env_name": env_name, |
| "body": text[index:body_end_start], |
| "source": text[begin_match.start() : body_end_end], |
| "start": begin_match.start(), |
| "end": body_end_end, |
| } |
| ) |
| return candidates |
|
|
|
|
| def _read_latex_inline_argument(text: str, start_index: int) -> tuple[str, int]: |
| index = start_index |
| while index < len(text) and text[index].isspace(): |
| index += 1 |
| if index >= len(text): |
| return "", index |
| if text[index] == "{": |
| group, next_index = _consume_latex_braced_group(text, index) |
| if group is None: |
| return "", index |
| return _latex_inline_to_html(group), next_index |
| if text[index] == "\\" and index + 1 < len(text): |
| return _latex_inline_to_html(text[index : index + 2]), index + 2 |
| return html.escape(text[index]), index + 1 |
|
|
|
|
| def _read_latex_inline_arguments( |
| text: str, |
| start_index: int, |
| *, |
| count: int, |
| ) -> tuple[List[str], int]: |
| values: List[str] = [] |
| index = start_index |
| for _ in range(count): |
| value_html, next_index = _read_latex_inline_argument(text, index) |
| values.append(value_html) |
| index = next_index |
| return values, index |
|
|
|
|
| def _latex_inline_to_html(text: str) -> str: |
| def parse_segment(start_index: int, *, stop_at_closing_brace: bool = False) -> tuple[str, int]: |
| parts: List[str] = [] |
| index = start_index |
| while index < len(text): |
| char = text[index] |
| if stop_at_closing_brace and char == "}": |
| return "".join(parts), index + 1 |
| if char == "$": |
| delimiter_len = 2 if text.startswith("$$", index) else 1 |
| search_index = index + delimiter_len |
| matched = False |
| while search_index < len(text): |
| matching_index = text.find("$" * delimiter_len, search_index) |
| if matching_index < 0: |
| break |
| if delimiter_len == 1 and matching_index > 0 and text[matching_index - 1] == "\\": |
| search_index = matching_index + 1 |
| continue |
| payload = text[index + delimiter_len : matching_index] |
| if _LATEX_INLINE_TEXT_RE.search(payload or ""): |
| parts.append(_latex_inline_to_html(payload)) |
| index = matching_index + delimiter_len |
| matched = True |
| break |
| if matched: |
| continue |
| parts.append(html.escape(char)) |
| index += 1 |
| continue |
| if char == "{": |
| inner, index = parse_segment(index + 1, stop_at_closing_brace=True) |
| parts.append(inner) |
| continue |
| if char in {"^", "_"}: |
| tag_name = "sup" if char == "^" else "sub" |
| payload_html, index = _read_latex_inline_argument(text, index + 1) |
| parts.append(f"<{tag_name}>{payload_html}</{tag_name}>") |
| continue |
| if char == "~": |
| parts.append(" ") |
| index += 1 |
| continue |
| if char == "\\": |
| if text.startswith("\\\\", index): |
| parts.append("<br>") |
| index += 2 |
| continue |
| index += 1 |
| if index >= len(text): |
| break |
| escaped_char = text[index] |
| if escaped_char in "{}%&$_#": |
| parts.append(html.escape(escaped_char)) |
| index += 1 |
| continue |
| if escaped_char in "()[],": |
| index += 1 |
| continue |
| match = re.match(r"[A-Za-z]+", text[index:]) |
| if not match: |
| parts.append(html.escape("\\" + escaped_char)) |
| index += 1 |
| continue |
| command = match.group(0) |
| index += len(command) |
| if command in _LATEX_INLINE_UNWRAP_COMMANDS: |
| payload_html, index = _read_latex_inline_argument(text, index) |
| parts.append(payload_html) |
| continue |
| if command in _LATEX_INLINE_BOLD_COMMANDS: |
| payload_html, index = _read_latex_inline_argument(text, index) |
| parts.append(f"<strong>{payload_html}</strong>") |
| continue |
| if command in _LATEX_INLINE_ITALIC_COMMANDS: |
| payload_html, index = _read_latex_inline_argument(text, index) |
| parts.append(f"<em>{payload_html}</em>") |
| continue |
| if command in _LATEX_INLINE_UNDERLINE_COMMANDS: |
| payload_html, index = _read_latex_inline_argument(text, index) |
| parts.append(f"<u>{payload_html}</u>") |
| continue |
| if command in {"frac", "dfrac", "tfrac"}: |
| fraction_parts, index = _read_latex_inline_arguments(text, index, count=2) |
| numerator_html, denominator_html = fraction_parts |
| parts.append(f"{numerator_html}/{denominator_html}") |
| continue |
| if command in {"textcolor", "colorbox"}: |
| _, index = _read_latex_inline_argument(text, index) |
| payload_html, index = _read_latex_inline_argument(text, index) |
| parts.append(payload_html) |
| continue |
| if command == "fcolorbox": |
| _, index = _read_latex_inline_argument(text, index) |
| _, index = _read_latex_inline_argument(text, index) |
| payload_html, index = _read_latex_inline_argument(text, index) |
| parts.append(payload_html) |
| continue |
| if command in {"makecell", "shortstack"}: |
| while True: |
| optional_arg, next_index = _consume_latex_bracket_group(text, index) |
| if optional_arg is None: |
| break |
| index = next_index |
| payload_html, index = _read_latex_inline_argument(text, index) |
| parts.append(payload_html) |
| continue |
| if command in {"multicolumn", "multirow", "parbox", "rotatebox", "scalebox"}: |
| payload_html = "" |
| if command == "multicolumn": |
| _, index = _read_latex_inline_argument(text, index) |
| _, index = _read_latex_inline_argument(text, index) |
| payload_html, index = _read_latex_inline_argument(text, index) |
| elif command == "multirow": |
| while True: |
| optional_arg, next_index = _consume_latex_bracket_group(text, index) |
| if optional_arg is None: |
| break |
| index = next_index |
| _, index = _read_latex_inline_argument(text, index) |
| _, index = _read_latex_inline_argument(text, index) |
| payload_html, index = _read_latex_inline_argument(text, index) |
| else: |
| _, index = _read_latex_inline_argument(text, index) |
| payload_html, index = _read_latex_inline_argument(text, index) |
| parts.append(payload_html) |
| continue |
| if command == "resizebox": |
| _, index = _read_latex_inline_argument(text, index) |
| _, index = _read_latex_inline_argument(text, index) |
| payload_html, index = _read_latex_inline_argument(text, index) |
| parts.append(payload_html) |
| continue |
| if command == "raisebox": |
| _, index = _read_latex_inline_argument(text, index) |
| while True: |
| optional_arg, next_index = _consume_latex_bracket_group(text, index) |
| if optional_arg is None: |
| break |
| index = next_index |
| payload_html, index = _read_latex_inline_argument(text, index) |
| parts.append(payload_html) |
| continue |
| if command in {"footnote", "tablefootnote"}: |
| payload_html, index = _read_latex_inline_argument(text, index) |
| if payload_html: |
| parts.append(f"<br>{payload_html}") |
| continue |
| if command == "tnote": |
| payload_html, index = _read_latex_inline_argument(text, index) |
| parts.append(f"<sup>{payload_html}</sup>") |
| continue |
| if command in _LATEX_INLINE_SYMBOLS: |
| parts.append(_LATEX_INLINE_SYMBOLS[command]) |
| continue |
| if command in {"left", "right"}: |
| continue |
| if command in {"label", "nonumber"}: |
| continue |
| payload_html, next_index = _read_latex_inline_argument(text, index) |
| if next_index != index: |
| parts.append(payload_html) |
| index = next_index |
| continue |
| if char == "}": |
| if stop_at_closing_brace: |
| return "".join(parts), index + 1 |
| index += 1 |
| continue |
| parts.append(html.escape(char)) |
| index += 1 |
| return "".join(parts), index |
|
|
| rendered, _ = parse_segment(0) |
| rendered = re.sub(r"\s*<br>\s*", "<br>", rendered) |
| rendered = re.sub(r"[ \t\r\f\v]+", " ", rendered) |
| return rendered.strip() |
|
|
|
|
| def normalize_inline_latex_in_html_fragment(html_fragment: str) -> str: |
| cleaned = strip_code_fences(str(html_fragment or "")) |
| if not cleaned or not _HTML_TABLE_RE.search(cleaned): |
| return cleaned |
|
|
| try: |
| from bs4 import BeautifulSoup, NavigableString, Tag |
| except ImportError: |
| return cleaned |
|
|
| soup = BeautifulSoup(cleaned, "html.parser") |
| for text_node in list(soup.find_all(string=True)): |
| parent = text_node.parent |
| if not isinstance(text_node, NavigableString) or not isinstance(parent, Tag): |
| continue |
| if parent.name in {"script", "style"}: |
| continue |
|
|
| original = str(text_node) |
| if not _LATEX_INLINE_TEXT_RE.search(original): |
| continue |
|
|
| rendered = _latex_inline_to_html(original) |
| if not rendered or rendered == html.escape(original): |
| continue |
|
|
| fragment_soup = BeautifulSoup(f"<body>{rendered}</body>", "html.parser") |
| body = getattr(fragment_soup, "body", None) |
| replacement_nodes = list(body.contents if body is not None else fragment_soup.contents) |
| if not replacement_nodes: |
| continue |
| for node in replacement_nodes: |
| text_node.insert_before(node) |
| text_node.extract() |
|
|
| return str(soup) |
|
|
|
|
| def _parse_latex_cell(cell_text: str) -> Dict[str, Any]: |
| cell = normalize_text(cell_text) |
| colspan = 1 |
| rowspan = 1 |
|
|
| while cell: |
| matched = False |
| multicolumn_payload, multicolumn_end = _consume_latex_command( |
| cell, |
| "multicolumn", |
| required_arg_count=3, |
| ) |
| if multicolumn_payload is not None: |
| multicolumn_args = list(multicolumn_payload["args"]) |
| try: |
| colspan = max(1, int(normalize_text(multicolumn_args[0]))) |
| except ValueError: |
| colspan = 1 |
| cell = (multicolumn_args[2] + cell[multicolumn_end:]).strip() |
| matched = True |
| multirow_payload, multirow_end = _consume_latex_command( |
| cell, |
| "multirow", |
| required_arg_count=3, |
| optional_arg_count=1, |
| ) |
| if multirow_payload is not None: |
| multirow_args = list(multirow_payload["args"]) |
| try: |
| rowspan = max(1, int(normalize_text(multirow_args[0]).lstrip("*"))) |
| except ValueError: |
| rowspan = 1 |
| cell = (multirow_args[2] + cell[multirow_end:]).strip() |
| matched = True |
| block_payload, block_end = _consume_latex_command( |
| cell, |
| "Block", |
| required_arg_count=2, |
| optional_arg_count=1, |
| ) |
| if block_payload is not None: |
| block_args = list(block_payload["args"]) |
| block_spec = normalize_text(block_args[0]) |
| span_match = re.match(r"(?P<rows>\d+)\s*[-x,]\s*(?P<cols>\d+)", block_spec) |
| if span_match is not None: |
| rowspan = max(rowspan, int(span_match.group("rows"))) |
| colspan = max(colspan, int(span_match.group("cols"))) |
| elif block_spec.isdigit(): |
| rowspan = max(rowspan, int(block_spec)) |
| cell = (block_args[1] + cell[block_end:]).strip() |
| matched = True |
| setcell_payload, setcell_end = _consume_latex_command( |
| cell, |
| "SetCell", |
| required_arg_count=1, |
| optional_arg_count=1, |
| ) |
| if setcell_payload is not None: |
| option_text = ",".join( |
| piece |
| for piece in [*setcell_payload["optional_args"], *setcell_payload["args"]] |
| if normalize_text(piece) |
| ) |
| row_match = re.search(r"(?:^|[,; ]+)r\s*=\s*(\d+)", option_text, re.IGNORECASE) |
| col_match = re.search(r"(?:^|[,; ]+)c\s*=\s*(\d+)", option_text, re.IGNORECASE) |
| if row_match is not None: |
| rowspan = max(rowspan, int(row_match.group(1))) |
| if col_match is not None: |
| colspan = max(colspan, int(col_match.group(1))) |
| cell = cell[setcell_end:].strip() |
| matched = True |
| if not matched: |
| break |
|
|
| return { |
| "html": _latex_inline_to_html(cell), |
| "colspan": colspan, |
| "rowspan": rowspan, |
| } |
|
|
|
|
| def _extract_latex_caption_html(text: str) -> str: |
| best_caption = "" |
| if not text: |
| return best_caption |
|
|
| for match in re.finditer(r"\\captionof(?:\*)?(?![A-Za-z])", text, re.IGNORECASE): |
| payload, _ = _consume_latex_command( |
| text[match.start() :], |
| "captionof", |
| required_arg_count=2, |
| optional_arg_count=1, |
| allow_star=True, |
| ) |
| if payload is None: |
| continue |
| target_name = normalize_text(payload["args"][0]).lower() |
| if "table" not in target_name: |
| continue |
| caption_html = _latex_inline_to_html(payload["args"][1]) |
| if len(caption_html) > len(best_caption): |
| best_caption = caption_html |
|
|
| for match in re.finditer(r"\\caption(?:\*)?(?![A-Za-z])", text, re.IGNORECASE): |
| payload, _ = _consume_latex_command( |
| text[match.start() :], |
| "caption", |
| required_arg_count=1, |
| optional_arg_count=1, |
| allow_star=True, |
| ) |
| if payload is None: |
| continue |
| caption_html = _latex_inline_to_html(payload["args"][0]) |
| if len(caption_html) > len(best_caption): |
| best_caption = caption_html |
| return best_caption |
|
|
|
|
| def _extract_latex_tablenotes_html(text: str) -> str: |
| note_fragments: List[str] = [] |
| for match in _LATEX_TABLENOTES_ENV_RE.finditer(text): |
| body = normalize_text(match.group("body")) |
| if not body: |
| continue |
| item_parts = re.split(r"\\item(?:\s*\[[^\]]*\])?", body) |
| rendered_items = [ |
| _latex_inline_to_html(piece) |
| for piece in item_parts |
| if normalize_text(piece) |
| ] |
| if rendered_items: |
| note_fragments.extend(rendered_items) |
| continue |
| fallback_html = _latex_inline_to_html(body) |
| if fallback_html: |
| note_fragments.append(fallback_html) |
| return "<br>".join(fragment for fragment in note_fragments if normalize_text(fragment)) |
|
|
|
|
| def _clean_latex_tabular_body(body: str) -> str: |
| cleaned = body.replace("\r", "\n").replace("\\tabularnewline", "\\\\") |
| cleaned = _LATEX_TABLENOTES_ENV_RE.sub("", cleaned) |
| cleaned = _replace_latex_command_occurrences( |
| cleaned, |
| "captionof", |
| required_arg_count=2, |
| optional_arg_count=1, |
| allow_star=True, |
| replacement=r"\\", |
| predicate=lambda payload: "table" in normalize_text(payload["args"][0]).lower(), |
| ) |
| cleaned = _replace_latex_command_occurrences( |
| cleaned, |
| "caption", |
| required_arg_count=1, |
| optional_arg_count=1, |
| allow_star=True, |
| replacement=r"\\", |
| ) |
| cleaned = _replace_latex_command_occurrences( |
| cleaned, |
| "label", |
| required_arg_count=1, |
| replacement="", |
| ) |
| cleaned = _replace_latex_command_occurrences( |
| cleaned, |
| "endfirsthead", |
| required_arg_count=0, |
| replacement=r"\\", |
| ) |
| cleaned = _replace_latex_command_occurrences( |
| cleaned, |
| "endhead", |
| required_arg_count=0, |
| replacement=r"\\", |
| ) |
| cleaned = _replace_latex_command_occurrences( |
| cleaned, |
| "endfoot", |
| required_arg_count=0, |
| replacement=r"\\", |
| ) |
| cleaned = _replace_latex_command_occurrences( |
| cleaned, |
| "endlastfoot", |
| required_arg_count=0, |
| replacement=r"\\", |
| ) |
| cleaned = _LATEX_RULE_COMMAND_RE.sub("", cleaned) |
| cleaned = _LATEX_LAYOUT_COMMAND_RE.sub(" ", cleaned) |
| cleaned = _LATEX_HEADER_FOOTER_MARKER_RE.sub(lambda _match: r"\\", cleaned) |
| return cleaned |
|
|
|
|
| def _parse_latex_tabular_body(body: str) -> List[List[Dict[str, Any]]]: |
| cleaned = _clean_latex_tabular_body(body) |
| rows: List[List[Dict[str, Any]]] = [] |
| for raw_row in _split_latex_top_level(cleaned, separator="\\\\"): |
| row_text = normalize_text(raw_row) |
| if not row_text: |
| continue |
| row_cells = [ |
| _parse_latex_cell(raw_cell) |
| for raw_cell in _split_latex_top_level(row_text, separator="&") |
| ] |
| row_cells = [ |
| cell |
| for cell in row_cells |
| if cell["html"] or cell["colspan"] > 1 or cell["rowspan"] > 1 |
| ] |
| if row_cells: |
| rows.append(row_cells) |
| return rows |
|
|
|
|
| def _latex_table_score( |
| rows: List[List[Dict[str, Any]]], |
| *, |
| caption_html: str = "", |
| notes_html: str = "", |
| ) -> float: |
| if not rows: |
| return 0.0 |
| max_width = max(sum(int(cell["colspan"]) for cell in row) for row in rows) |
| alpha_count = sum( |
| 1 |
| for row in rows |
| for cell in row |
| for char in html.unescape(str(cell["html"])) |
| if char.isalpha() |
| ) |
| caption_bonus = len(html.unescape(caption_html or "")) |
| notes_bonus = len(html.unescape(notes_html or "")) |
| return float(len(rows) * max_width * 4 + alpha_count + caption_bonus + (notes_bonus * 0.5)) |
|
|
|
|
| def _render_latex_table_html( |
| rows: List[List[Dict[str, Any]]], |
| *, |
| caption_html: str = "", |
| notes_html: str = "", |
| ) -> str: |
| max_width = max((sum(int(cell["colspan"]) for cell in row) for row in rows), default=1) |
| colspan_attr = f' colspan="{max_width}"' if max_width > 1 else "" |
| parts: List[str] = ["<table>"] |
| if caption_html: |
| parts.append(f"<tr><td{colspan_attr}>{caption_html}</td></tr>") |
| for row in rows: |
| parts.append("<tr>") |
| for cell in row: |
| attrs = "" |
| if int(cell["colspan"]) > 1: |
| attrs += f' colspan="{int(cell["colspan"])}"' |
| if int(cell["rowspan"]) > 1: |
| attrs += f' rowspan="{int(cell["rowspan"])}"' |
| parts.append(f"<td{attrs}>{cell['html']}</td>") |
| parts.append("</tr>") |
| if notes_html: |
| parts.append(f"<tr><td{colspan_attr}>{notes_html}</td></tr>") |
| parts.append("</table>") |
| return "".join(parts) |
|
|
|
|
| def _looks_like_latex_table_markup(text: str) -> bool: |
| if not text: |
| return False |
| if re.search(r"\\begin\s*\{(?:array|longtable|longtabu|nicetabular|tabu|tabular\*?|tabularx|tabulary|tblr|xltabular)\}", text, re.IGNORECASE): |
| return True |
| return bool( |
| re.search( |
| r"\\(?:caption(?:of)?|cmidrule|multicolumn|multirow|toprule|midrule|bottomrule)\b", |
| text, |
| re.IGNORECASE, |
| ) |
| ) |
|
|
|
|
| def _latex_table_renderer_command_template() -> Optional[str]: |
| configured = normalize_text(os.getenv("LATEX_TABLE_RENDER_COMMAND", "")) |
| if configured: |
| return configured |
| if shutil.which("latexmlc"): |
| return "latexmlc --quiet --nocomments --format=html5 --dest={output_path} {input_path}" |
| if shutil.which("pandoc"): |
| return "pandoc --from=latex --to=html {input_path}" |
| return None |
|
|
|
|
| def _latex_table_renderer_timeout_s() -> int: |
| return _env_int("LATEX_TABLE_RENDER_TIMEOUT_S", 20) |
|
|
|
|
| def _wrap_latex_table_fragment_for_renderer(text: str) -> str: |
| cleaned = strip_code_fences(text) |
| if not cleaned: |
| return cleaned |
| if re.search(r"\\documentclass\b", cleaned, re.IGNORECASE): |
| return cleaned |
| package_names = [ |
| "array", |
| "booktabs", |
| "graphicx", |
| "longtable", |
| "makecell", |
| "multirow", |
| "nicematrix", |
| "tabularray", |
| "tabularx", |
| "tabulary", |
| "threeparttable", |
| "ulem", |
| "xcolor", |
| ] |
| preamble = "\n".join(f"\\usepackage{{{package_name}}}" for package_name in package_names) |
| return ( |
| "\\documentclass{article}\n" |
| f"{preamble}\n" |
| "\\begin{document}\n" |
| f"{cleaned}\n" |
| "\\end{document}\n" |
| ) |
|
|
|
|
| def _extract_best_html_table_fragment(rendered_html: str) -> Optional[str]: |
| candidates = _HTML_TABLE_FRAGMENT_RE.findall(rendered_html or "") |
| if not candidates: |
| return None |
|
|
| def candidate_score(fragment: str) -> int: |
| visible_text = re.sub(r"<[^>]+>", " ", fragment) |
| return len(normalize_text(html.unescape(visible_text))) |
|
|
| best_fragment = max(candidates, key=candidate_score) |
| return normalize_text(best_fragment) or None |
|
|
|
|
| def _maybe_render_latex_tabular_with_external_renderer(text: str) -> Optional[str]: |
| cleaned = strip_code_fences(text) |
| if not cleaned or _HTML_TABLE_RE.search(cleaned) or not _looks_like_latex_table_markup(cleaned): |
| return None |
|
|
| command_template = _latex_table_renderer_command_template() |
| if not command_template: |
| return None |
|
|
| with TemporaryDirectory(prefix="latex_table_renderer_") as tmpdir: |
| tmpdir_path = Path(tmpdir) |
| input_path = tmpdir_path / "table.tex" |
| output_path = tmpdir_path / "table.html" |
| input_path.write_text(_wrap_latex_table_fragment_for_renderer(cleaned), encoding="utf-8") |
| try: |
| completed = subprocess.run( |
| command_template.format( |
| input_path=shlex.quote(str(input_path)), |
| output_path=shlex.quote(str(output_path)), |
| ), |
| shell=True, |
| capture_output=True, |
| text=True, |
| timeout=_latex_table_renderer_timeout_s(), |
| ) |
| except (OSError, subprocess.SubprocessError): |
| return None |
| if completed.returncode != 0: |
| return None |
|
|
| rendered_html = "" |
| if output_path.exists(): |
| rendered_html = output_path.read_text(encoding="utf-8", errors="replace") |
| if not rendered_html: |
| rendered_html = completed.stdout or "" |
| return _extract_best_html_table_fragment(rendered_html) |
|
|
|
|
| def maybe_convert_latex_tabular_to_html(text: str) -> Optional[str]: |
| cleaned = strip_code_fences(text) |
| if not cleaned or _HTML_TABLE_RE.search(cleaned): |
| return None |
| if not _looks_like_latex_table_markup(cleaned): |
| return None |
|
|
| rendered_html = _maybe_render_latex_tabular_with_external_renderer(cleaned) |
| if rendered_html: |
| return rendered_html |
|
|
| best_rendered_html: Optional[str] = None |
| best_score = 0.0 |
| for candidate in _extract_latex_table_candidates(cleaned): |
| rows = _parse_latex_tabular_body(candidate["body"]) |
| if not rows: |
| continue |
| source_text = str(candidate["source"]) |
| context_start = max(0, int(candidate["start"]) - 600) |
| context_end = min(len(cleaned), int(candidate["end"]) + 600) |
| context_text = cleaned[context_start:context_end] |
| caption_html = _extract_latex_caption_html(source_text) or _extract_latex_caption_html(context_text) |
| notes_html = _extract_latex_tablenotes_html(source_text) or _extract_latex_tablenotes_html(context_text) |
| score = _latex_table_score(rows, caption_html=caption_html, notes_html=notes_html) |
| if score > best_score: |
| best_rendered_html = _render_latex_table_html( |
| rows, |
| caption_html=caption_html, |
| notes_html=notes_html, |
| ) |
| best_score = score |
|
|
| if not best_rendered_html: |
| return None |
|
|
| return best_rendered_html |
|
|
|
|
| def _env_int(name: str, default: int) -> int: |
| raw_value = os.getenv(name, "").strip() |
| if not raw_value: |
| return default |
| try: |
| return max(1, int(raw_value)) |
| except ValueError: |
| return default |
|
|
|
|
| def _env_float( |
| name: str, |
| default: float, |
| *, |
| min_value: Optional[float] = None, |
| max_value: Optional[float] = None, |
| ) -> float: |
| raw_value = os.getenv(name, "").strip() |
| if not raw_value: |
| value = float(default) |
| else: |
| try: |
| value = float(raw_value) |
| except ValueError: |
| value = float(default) |
| if min_value is not None: |
| value = max(float(min_value), value) |
| if max_value is not None: |
| value = min(float(max_value), value) |
| return value |
|
|
|
|
| def _env_flag(name: str, default: str = "0") -> bool: |
| return os.getenv(name, default).strip().lower() in {"1", "true", "yes", "on"} |
|
|
|
|
| def _env_nonnegative_int(name: str, default: int) -> int: |
| raw_value = os.getenv(name, "").strip() |
| if not raw_value: |
| return default |
| try: |
| return max(0, int(raw_value)) |
| except ValueError: |
| return default |
|
|
|
|
| def _table_ocr_normalized_max_image_pixels() -> int: |
| fallback = _env_nonnegative_int("TABLE_OCR_MAX_IMAGE_PIXELS", 2_000_000) |
| return _env_nonnegative_int("TABLE_OCR_NORMALIZED_MAX_IMAGE_PIXELS", fallback) |
|
|
|
|
| def _table_ocr_allow_backend_image_resize() -> bool: |
| return _env_flag("TABLE_OCR_ALLOW_BACKEND_IMAGE_RESIZE", "0") |
|
|
|
|
| def load_module_from_path(module_name: str, path: Path): |
| spec = importlib.util.spec_from_file_location(module_name, path) |
| if spec is None or spec.loader is None: |
| raise RuntimeError(f"Unable to load module from {path}") |
| module = importlib.util.module_from_spec(spec) |
| spec.loader.exec_module(module) |
| return module |
|
|
|
|
| @lru_cache(maxsize=1) |
| def load_sec_parser_module(): |
| sec_parser_dir = ROOT / "sec_parser" |
| if not sec_parser_dir.exists(): |
| raise RuntimeError(f"Expected sec_parser directory at {sec_parser_dir}") |
| dir_str = str(sec_parser_dir) |
| if dir_str not in sys.path: |
| sys.path.insert(0, dir_str) |
| import sec_parser as sec_parser_mod |
|
|
| return sec_parser_mod |
|
|
|
|
| class TableOCRRemoteResponseError(RuntimeError): |
| def __init__( |
| self, |
| message: str, |
| *, |
| response_payload: Optional[Dict[str, Any]] = None, |
| details: Optional[Dict[str, Any]] = None, |
| ): |
| super().__init__(message) |
| self.response_payload = response_payload or {} |
| self.details = details or {} |
|
|
|
|
| class TableOCROpenRouterResponseError(TableOCRRemoteResponseError): |
| pass |
|
|
|
|
| class TableOCRRemoteChatClient: |
| provider_label = "Remote OCR" |
| response_error_cls = TableOCRRemoteResponseError |
| api_key_env_name = "" |
| model_id_env_name = "" |
| base_url_env_name = "" |
| reasoning_level_env_name = "" |
| default_base_url = "" |
| require_api_key = True |
|
|
| def __init__( |
| self, |
| *, |
| api_key: Optional[str] = None, |
| model_id: Optional[str] = None, |
| base_url: Optional[str] = None, |
| reasoning_level: Optional[str] = None, |
| timeout_s: int = 180, |
| ): |
| api_key_value = api_key |
| if api_key_value is None and self.api_key_env_name: |
| api_key_value = os.getenv(self.api_key_env_name, "") |
| self.api_key = normalize_text(api_key_value or "") |
|
|
| model_id_value = model_id |
| if model_id_value is None and self.model_id_env_name: |
| model_id_value = os.getenv(self.model_id_env_name, "") |
| self.model_id = normalize_text(model_id_value or "") |
|
|
| base_url_value = base_url |
| if base_url_value is None and self.base_url_env_name: |
| base_url_value = os.getenv(self.base_url_env_name, self.default_base_url) |
| self.base_url = normalize_text(base_url_value or self.default_base_url).rstrip("/") |
|
|
| reasoning_level_value = reasoning_level |
| if reasoning_level_value is None and self.reasoning_level_env_name: |
| reasoning_level_value = os.getenv(self.reasoning_level_env_name, "") |
| self.reasoning_level = normalize_text(reasoning_level_value or "") |
| self.timeout_s = timeout_s |
|
|
| if self.require_api_key and not self.api_key: |
| raise RuntimeError(f"Missing {self.api_key_env_name} for {self.provider_label} OCR transcription.") |
| if not self.model_id: |
| raise RuntimeError(f"Missing {self.provider_label} OCR model id.") |
| if not self.base_url: |
| raise RuntimeError(f"Missing {self.provider_label} OCR base URL.") |
|
|
| self.session = requests.Session() |
| self.headers = self._build_headers() |
|
|
| def _build_headers(self) -> Dict[str, str]: |
| headers = { |
| "Content-Type": "application/json", |
| } |
| if self.api_key: |
| headers["Authorization"] = f"Bearer {self.api_key}" |
| return headers |
|
|
| def _augment_request_body(self, body: Dict[str, Any], *, include_reasoning: bool) -> None: |
| del body, include_reasoning |
|
|
| def _raise_request_error( |
| self, |
| message: str, |
| *, |
| response_payload: Optional[Dict[str, Any]] = None, |
| details: Optional[Dict[str, Any]] = None, |
| ) -> None: |
| raise self.response_error_cls( |
| message, |
| response_payload=response_payload, |
| details=details, |
| ) |
|
|
| def chat_completion( |
| self, |
| *, |
| messages: List[Dict[str, Any]], |
| max_tokens: int = 4096, |
| temperature: float = 0.0, |
| progress_label: str = "", |
| progress_handler: ProgressHandler = None, |
| ) -> Dict[str, Any]: |
| url = f"{self.base_url}/chat/completions" |
| body: Dict[str, Any] = { |
| "model": self.model_id, |
| "messages": messages, |
| "max_tokens": max_tokens, |
| "temperature": temperature, |
| } |
| self._augment_request_body(body, include_reasoning=False) |
|
|
| if progress_label: |
| emit_progress( |
| f"{progress_label} | model {self.model_id}", |
| progress_handler=progress_handler, |
| ) |
| try: |
| response = self.session.post( |
| url, |
| headers=self.headers, |
| json=body, |
| timeout=self.timeout_s, |
| ) |
| except requests.RequestException as exc: |
| self._raise_request_error( |
| f"{self.provider_label} request failed: {exc}", |
| details={ |
| "status_code": None, |
| "model_id": self.model_id, |
| }, |
| ) |
| if response.status_code >= 400: |
| response_payload: Optional[Dict[str, Any]] = None |
| try: |
| decoded_payload = response.json() |
| if isinstance(decoded_payload, dict): |
| response_payload = decoded_payload |
| except ValueError: |
| response_payload = None |
| self._raise_request_error( |
| f"{self.provider_label} HTTP {response.status_code}: {response.text[:1500]}", |
| response_payload=response_payload, |
| details={ |
| "status_code": response.status_code, |
| "model_id": self.model_id, |
| }, |
| ) |
| if progress_label: |
| emit_progress( |
| f"{progress_label} | response received", |
| progress_handler=progress_handler, |
| ) |
| return response.json() |
|
|
| def chat_completion_streaming( |
| self, |
| *, |
| messages: List[Dict[str, Any]], |
| max_tokens: int = 4096, |
| temperature: float = 0.0, |
| progress_label: str = "", |
| progress_handler: ProgressHandler = None, |
| include_reasoning: bool = False, |
| ) -> Dict[str, Any]: |
| url = f"{self.base_url}/chat/completions" |
| body: Dict[str, Any] = { |
| "model": self.model_id, |
| "messages": messages, |
| "max_tokens": max_tokens, |
| "temperature": temperature, |
| "stream": True, |
| "stream_options": {"include_usage": True}, |
| } |
| self._augment_request_body(body, include_reasoning=include_reasoning) |
|
|
| if progress_label: |
| emit_progress( |
| f"{progress_label} | model {self.model_id}", |
| progress_handler=progress_handler, |
| ) |
| started_at = time.perf_counter() |
| try: |
| response = self.session.post( |
| url, |
| headers=self.headers, |
| json=body, |
| timeout=self.timeout_s, |
| stream=True, |
| ) |
| except requests.RequestException as exc: |
| self._raise_request_error( |
| f"{self.provider_label} request failed: {exc}", |
| details={ |
| "status_code": None, |
| "model_id": self.model_id, |
| }, |
| ) |
| if response.status_code >= 400: |
| response_payload: Optional[Dict[str, Any]] = None |
| response_text = "" |
| try: |
| response_payload = response.json() |
| response_text = json.dumps(response_payload, ensure_ascii=False) |
| except ValueError: |
| try: |
| response_text = response.text |
| except Exception: |
| response_text = "" |
| finally: |
| response.close() |
| self._raise_request_error( |
| f"{self.provider_label} HTTP {response.status_code}: {response_text[:1500]}", |
| response_payload=response_payload, |
| details={ |
| "status_code": response.status_code, |
| "model_id": self.model_id, |
| }, |
| ) |
| if progress_label: |
| emit_progress( |
| f"{progress_label} | response received", |
| progress_handler=progress_handler, |
| ) |
| content_parts: List[str] = [] |
| refusal_parts: List[str] = [] |
| tool_calls: List[Any] = [] |
| provider_payload: Any = None |
| usage_payload: Dict[str, Any] = {} |
| finish_reason = "" |
| message_role = "" |
| first_content_latency_s: Optional[float] = None |
| first_reasoning_latency_s: Optional[float] = None |
| last_reasoning_latency_s: Optional[float] = None |
| saw_reasoning = False |
|
|
| def process_event_payload(event_payload: Dict[str, Any]) -> None: |
| nonlocal provider_payload, usage_payload, finish_reason, message_role |
| nonlocal first_content_latency_s, first_reasoning_latency_s, last_reasoning_latency_s, saw_reasoning |
| if provider_payload is None and event_payload.get("provider") is not None: |
| provider_payload = event_payload.get("provider") |
| event_usage = event_payload.get("usage") |
| if isinstance(event_usage, dict): |
| usage_payload = event_usage |
| choices = event_payload.get("choices") |
| first_choice = choices[0] if isinstance(choices, list) and choices else {} |
| if not isinstance(first_choice, dict): |
| return |
| finish_reason = normalize_text(first_choice.get("finish_reason", "")) or finish_reason |
| delta = first_choice.get("delta") |
| if not isinstance(delta, dict): |
| delta = {} |
| if not message_role: |
| message_role = normalize_text(delta.get("role", "")) or message_role |
| delta_tool_calls = delta.get("tool_calls") |
| if isinstance(delta_tool_calls, list): |
| tool_calls.extend(delta_tool_calls) |
| delta_refusal = delta.get("refusal") |
| if isinstance(delta_refusal, str) and delta_refusal.strip(): |
| refusal_parts.append(delta_refusal) |
| reasoning_payload = ( |
| delta.get("reasoning_details") |
| or delta.get("reasoning") |
| or delta.get("reasoning_content") |
| or first_choice.get("reasoning_details") |
| or first_choice.get("reasoning") |
| or first_choice.get("reasoning_content") |
| ) |
| if reasoning_payload: |
| saw_reasoning = True |
| reasoning_latency_s = time.perf_counter() - started_at |
| if first_reasoning_latency_s is None: |
| first_reasoning_latency_s = reasoning_latency_s |
| last_reasoning_latency_s = reasoning_latency_s |
| content_piece = extract_text_from_openrouter_content(delta.get("content")) |
| if content_piece is None: |
| message_payload = first_choice.get("message") |
| if isinstance(message_payload, dict): |
| content_piece = extract_text_from_openrouter_content(message_payload.get("content")) |
| if content_piece is not None: |
| if first_content_latency_s is None: |
| first_content_latency_s = time.perf_counter() - started_at |
| content_parts.append(content_piece) |
|
|
| current_event_lines: List[str] = [] |
| with contextlib.closing(response): |
| for raw_line in response.iter_lines(decode_unicode=True): |
| if raw_line is None: |
| continue |
| line = raw_line if isinstance(raw_line, str) else raw_line.decode("utf-8", errors="replace") |
| if line.startswith(":"): |
| continue |
| if line == "": |
| if current_event_lines: |
| payload_text = "\n".join(current_event_lines).strip() |
| current_event_lines = [] |
| if payload_text and payload_text != "[DONE]": |
| try: |
| decoded_payload = json.loads(payload_text) |
| except json.JSONDecodeError: |
| decoded_payload = None |
| if isinstance(decoded_payload, dict): |
| process_event_payload(decoded_payload) |
| continue |
| if line.startswith("data:"): |
| current_event_lines.append(line[len("data:") :].lstrip()) |
| if current_event_lines: |
| payload_text = "\n".join(current_event_lines).strip() |
| if payload_text and payload_text != "[DONE]": |
| try: |
| decoded_payload = json.loads(payload_text) |
| except json.JSONDecodeError: |
| decoded_payload = None |
| if isinstance(decoded_payload, dict): |
| process_event_payload(decoded_payload) |
|
|
| content_text = "".join(content_parts) if content_parts else None |
| total_latency_s = time.perf_counter() - started_at |
|
|
| return { |
| "choices": [ |
| { |
| "message": { |
| "role": message_role or "assistant", |
| "content": content_text, |
| "tool_calls": tool_calls, |
| "refusal": "".join(refusal_parts) if refusal_parts else None, |
| }, |
| "finish_reason": finish_reason or None, |
| } |
| ], |
| "provider": provider_payload, |
| "usage": usage_payload or None, |
| "_stream_timing": { |
| "first_content_latency_s": first_content_latency_s, |
| "first_reasoning_latency_s": first_reasoning_latency_s, |
| "last_reasoning_latency_s": last_reasoning_latency_s, |
| "thinking_latency_s": ( |
| last_reasoning_latency_s |
| if (saw_reasoning and last_reasoning_latency_s is not None and content_text is not None) |
| else None |
| ), |
| "total_latency_s": max(0.0, float(total_latency_s)), |
| "saw_reasoning": bool(saw_reasoning), |
| }, |
| } |
|
|
|
|
| class TableOCROpenRouterClient(TableOCRRemoteChatClient): |
| provider_label = "OpenRouter" |
| response_error_cls = TableOCROpenRouterResponseError |
| api_key_env_name = "OPENROUTER_API_KEY" |
| model_id_env_name = "OPENROUTER_MODEL_ID" |
| base_url_env_name = "OPENROUTER_BASE_URL" |
| reasoning_level_env_name = "OPENROUTER_REASONING_LEVEL" |
| default_base_url = "https://openrouter.ai/api/v1" |
|
|
| def _build_headers(self) -> Dict[str, str]: |
| headers = super()._build_headers() |
| headers["HTTP-Referer"] = "http://localhost" |
| headers["X-Title"] = "OCR-Bench" |
| return headers |
|
|
| def _augment_request_body(self, body: Dict[str, Any], *, include_reasoning: bool) -> None: |
| if self.reasoning_level: |
| body["reasoning"] = { |
| "effort": self.reasoning_level, |
| "exclude": (not include_reasoning), |
| } |
|
|
|
|
| class TableOCRQwenClient(TableOCRRemoteChatClient): |
| provider_label = "Qwen OCR" |
| api_key_env_name = "QWEN_OCR_API_KEY" |
| model_id_env_name = "QWEN_OCR_MODEL_ID" |
| base_url_env_name = "QWEN_OCR_BASE_URL" |
| default_base_url = "http://127.0.0.1:8000/v1" |
| require_api_key = False |
|
|
| def _augment_request_body(self, body: Dict[str, Any], *, include_reasoning: bool) -> None: |
| if include_reasoning or _env_flag("QWEN_OCR_PRESERVE_THINKING", "1"): |
| body["chat_template_kwargs"] = { |
| "preserve_thinking": True, |
| } |
|
|
|
|
| class TableOCRGemma4VLLMClient(TableOCRRemoteChatClient): |
| provider_label = "Gemma 4 vLLM" |
| api_key_env_name = "GEMMA4_VLLM_API_KEY" |
| model_id_env_name = "GEMMA4_VLLM_MODEL_ID" |
| base_url_env_name = "GEMMA4_VLLM_BASE_URL" |
| default_base_url = "http://127.0.0.1:8000/v1" |
| require_api_key = False |
|
|
| def _augment_request_body(self, body: Dict[str, Any], *, include_reasoning: bool) -> None: |
| del include_reasoning |
| if _env_flag("GEMMA4_VLLM_ENABLE_THINKING", "1"): |
| body["chat_template_kwargs"] = { |
| "enable_thinking": True, |
| } |
|
|
|
|
| class TableOCRGemma4LlamaCppClient(TableOCRRemoteChatClient): |
| provider_label = "Gemma 4 llama.cpp" |
| api_key_env_name = "GEMMA4_LLAMA_CPP_API_KEY" |
| model_id_env_name = "GEMMA4_LLAMA_CPP_MODEL_ID" |
| base_url_env_name = "GEMMA4_LLAMA_CPP_BASE_URL" |
| default_base_url = "http://127.0.0.1:8080/v1" |
| require_api_key = False |
|
|
|
|
| def extract_text_from_openrouter_content(content: Any) -> Optional[str]: |
| if isinstance(content, str): |
| normalized = normalize_text(content) |
| return content if normalized else None |
| if not isinstance(content, list): |
| return None |
|
|
| parts: List[str] = [] |
| for item in content: |
| if isinstance(item, str): |
| parts.append(item) |
| continue |
| if not isinstance(item, dict): |
| continue |
| for key in ("text", "content", "value"): |
| value = item.get(key) |
| if isinstance(value, str) and value.strip(): |
| parts.append(value) |
| break |
|
|
| joined = "\n".join(part for part in parts if normalize_text(part)) |
| return joined or None |
|
|
|
|
| def estimate_openrouter_thinking_latency_s( |
| total_latency_s: float, |
| *, |
| reasoning_tokens: Optional[int], |
| completion_tokens: Optional[int], |
| ) -> Optional[float]: |
| if not isinstance(total_latency_s, (int, float)) or total_latency_s < 0: |
| return None |
| if not isinstance(reasoning_tokens, int) or reasoning_tokens <= 0: |
| return None |
| if not isinstance(completion_tokens, int) or completion_tokens <= 0: |
| return None |
| ratio = max(0.0, min(1.0, float(reasoning_tokens) / float(completion_tokens))) |
| return float(total_latency_s) * ratio |
|
|
|
|
| def summarize_openrouter_chat_response(response: Dict[str, Any]) -> Dict[str, Any]: |
| choices = response.get("choices") |
| first_choice = choices[0] if isinstance(choices, list) and choices else {} |
| if not isinstance(first_choice, dict): |
| first_choice = {} |
|
|
| message = first_choice.get("message") |
| if not isinstance(message, dict): |
| message = {} |
|
|
| provider = response.get("provider") |
| if isinstance(provider, dict): |
| provider_summary = ( |
| provider.get("name") |
| or provider.get("provider_name") |
| or normalize_text(json.dumps(provider, ensure_ascii=False)) |
| ) |
| else: |
| provider_summary = normalize_text(provider) |
|
|
| tool_calls = message.get("tool_calls") |
| tool_call_count = len(tool_calls) if isinstance(tool_calls, list) else 0 |
| content = message.get("content") |
| refusal = message.get("refusal") |
| finish_reason = normalize_text(first_choice.get("finish_reason", "")) |
| usage = response.get("usage") |
| usage_payload = usage if isinstance(usage, dict) else {} |
| completion_tokens_raw = usage_payload.get("completion_tokens") |
| if not isinstance(completion_tokens_raw, (int, float)): |
| completion_tokens_raw = usage_payload.get("output_tokens") |
| completion_tokens = int(completion_tokens_raw) if isinstance(completion_tokens_raw, (int, float)) else None |
| completion_details = usage_payload.get("completion_tokens_details") |
| if not isinstance(completion_details, dict): |
| completion_details = {} |
| reasoning_tokens_raw = completion_details.get("reasoning_tokens") |
| reasoning_tokens = int(reasoning_tokens_raw) if isinstance(reasoning_tokens_raw, (int, float)) else None |
| stream_timing = response.get("_stream_timing") |
| if not isinstance(stream_timing, dict): |
| stream_timing = {} |
| thinking_latency_s = stream_timing.get("thinking_latency_s") |
| if not isinstance(thinking_latency_s, (int, float)): |
| thinking_latency_s = None |
| total_latency_s = stream_timing.get("total_latency_s") |
| if not isinstance(total_latency_s, (int, float)): |
| total_latency_s = None |
| first_content_latency_s = stream_timing.get("first_content_latency_s") |
| if not isinstance(first_content_latency_s, (int, float)): |
| first_content_latency_s = None |
|
|
| return { |
| "content": content, |
| "content_type": type(content).__name__, |
| "finish_reason": finish_reason or None, |
| "provider": provider_summary or None, |
| "tool_call_count": tool_call_count, |
| "tool_calls": tool_calls if isinstance(tool_calls, list) else [], |
| "refusal": refusal, |
| "message_role": normalize_text(message.get("role", "")) or None, |
| "completion_tokens": completion_tokens, |
| "reasoning_tokens": reasoning_tokens, |
| "thinking_latency_s": (max(0.0, float(thinking_latency_s)) if thinking_latency_s is not None else None), |
| "total_latency_s": (max(0.0, float(total_latency_s)) if total_latency_s is not None else None), |
| "first_content_latency_s": (max(0.0, float(first_content_latency_s)) if first_content_latency_s is not None else None), |
| } |
|
|
|
|
| def openrouter_ocr_model_ids() -> set[str]: |
| return { |
| value.strip() |
| for value in [ |
| os.getenv("OPENROUTER_MODEL_ID", ""), |
| os.getenv("TESTMODEL", ""), |
| ] |
| if value and value.strip() |
| } |
|
|
|
|
| def default_openrouter_retry_model_ids(model_id: str) -> List[str]: |
| normalized = normalize_text(model_id) |
| if normalized.endswith(":free"): |
| paid_slug = normalized[: -len(":free")] |
| return [paid_slug] if paid_slug else [] |
| return [] |
|
|
|
|
| def default_openrouter_ocr_max_attempts() -> int: |
| return _env_int("OPENROUTER_OCR_MAX_ATTEMPTS", 3) |
|
|
|
|
| def build_openrouter_retry_chain(model_id: str) -> List[str]: |
| chain: List[str] = [] |
| for candidate in [normalize_text(model_id), *default_openrouter_retry_model_ids(model_id)]: |
| if candidate and candidate not in chain: |
| chain.append(candidate) |
| return chain |
|
|
|
|
| def build_openrouter_attempt_plan(model_id: str, *, max_attempts: int) -> List[str]: |
| retry_chain = build_openrouter_retry_chain(model_id) |
| if not retry_chain: |
| return [] |
| attempt_limit = max(1, int(max_attempts)) |
| plan: List[str] = [] |
| for candidate in retry_chain: |
| if len(plan) >= attempt_limit: |
| break |
| plan.append(candidate) |
| while len(plan) < attempt_limit: |
| plan.append(retry_chain[-1]) |
| return plan |
|
|
|
|
| def default_firered_model_id() -> str: |
| return normalize_text(os.getenv("FIRERED_MODEL_ID", "FireRedTeam/FireRed-OCR")) or "FireRedTeam/FireRed-OCR" |
|
|
|
|
| def default_qianfan_ocr_model_id() -> str: |
| return normalize_text(os.getenv("QIANFAN_OCR_MODEL_ID", _QIANFAN_OCR_DEFAULT_MODEL_ID)) or _QIANFAN_OCR_DEFAULT_MODEL_ID |
|
|
|
|
| def default_glm_ocr_model_id() -> str: |
| return normalize_text(os.getenv("GLM_OCR_MODEL_ID", _GLM_OCR_DEFAULT_MODEL_ID)) or _GLM_OCR_DEFAULT_MODEL_ID |
|
|
|
|
| def default_paddleocr_vl_model_id() -> str: |
| return normalize_text(os.getenv("PADDLEOCR_VL_MODEL_ID", _PADDLEOCR_VL_DEFAULT_MODEL_ID)) or _PADDLEOCR_VL_DEFAULT_MODEL_ID |
|
|
|
|
| def default_got_ocr_model_id() -> str: |
| return normalize_text(os.getenv("GOT_OCR_MODEL_ID", _GOT_OCR_DEFAULT_MODEL_ID)) or _GOT_OCR_DEFAULT_MODEL_ID |
|
|
|
|
| def default_monkeyocr_model_id() -> str: |
| return normalize_text(os.getenv("MONKEYOCR_MODEL_ID", _MONKEYOCR_DEFAULT_MODEL_ID)) or _MONKEYOCR_DEFAULT_MODEL_ID |
|
|
|
|
| def default_qwen_ocr_model_id() -> str: |
| return normalize_text(os.getenv("QWEN_OCR_MODEL_ID", _QWEN_OCR_DEFAULT_MODEL_ID)) or _QWEN_OCR_DEFAULT_MODEL_ID |
|
|
|
|
| def default_gemma4_hf_model_id() -> str: |
| return ( |
| normalize_text(os.getenv("GEMMA4_HF_MODEL_ID", _GEMMA4_VLLM_DEFAULT_MODEL_ID)) |
| or _GEMMA4_VLLM_DEFAULT_MODEL_ID |
| ) |
|
|
|
|
| def default_gemma4_vllm_model_id() -> str: |
| return ( |
| normalize_text(os.getenv("GEMMA4_VLLM_MODEL_ID", _GEMMA4_VLLM_DEFAULT_MODEL_ID)) |
| or _GEMMA4_VLLM_DEFAULT_MODEL_ID |
| ) |
|
|
|
|
| def default_gemma4_llamacpp_model_id() -> str: |
| return ( |
| normalize_text(os.getenv("GEMMA4_LLAMA_CPP_MODEL_ID", _GEMMA4_LLAMA_CPP_DEFAULT_MODEL_ID)) |
| or _GEMMA4_LLAMA_CPP_DEFAULT_MODEL_ID |
| ) |
|
|
|
|
| def resolve_firered_model_id(model_id: Optional[str]) -> str: |
| normalized = normalize_text(model_id or "") |
| if not normalized: |
| return default_firered_model_id() |
| if normalized.lower() in {"firered", "firered-local", "local-firered"}: |
| return default_firered_model_id() |
| return normalized |
|
|
|
|
| def resolve_qianfan_model_id(model_id: Optional[str]) -> str: |
| normalized = normalize_text(model_id or "") |
| if not normalized: |
| return default_qianfan_ocr_model_id() |
| if normalized.lower() in {"qianfan", "qianfan-ocr"}: |
| return default_qianfan_ocr_model_id() |
| return normalized |
|
|
|
|
| def resolve_glm_ocr_model_id(model_id: Optional[str]) -> str: |
| normalized = normalize_text(model_id or "") |
| if not normalized: |
| return default_glm_ocr_model_id() |
| if normalized.lower() in {"glm-ocr", "glmocr"}: |
| return default_glm_ocr_model_id() |
| return normalized |
|
|
|
|
| def resolve_paddleocr_vl_model_id(model_id: Optional[str]) -> str: |
| normalized = normalize_text(model_id or "") |
| if not normalized: |
| return default_paddleocr_vl_model_id() |
| if normalized.lower() in {"paddleocr-vl", "paddleocr-vl-1.5", "paddleocr_vl"}: |
| return default_paddleocr_vl_model_id() |
| return normalized |
|
|
|
|
| def resolve_got_ocr_model_id(model_id: Optional[str]) -> str: |
| normalized = normalize_text(model_id or "") |
| if not normalized: |
| return default_got_ocr_model_id() |
| if normalized.lower() in {"got-ocr", "got-ocr2", "got-ocr2_0", "got_ocr", "gotocr", "gotocr2"}: |
| return default_got_ocr_model_id() |
| return normalized |
|
|
|
|
| def resolve_monkeyocr_model_id(model_id: Optional[str]) -> str: |
| normalized = normalize_text(model_id or "") |
| if not normalized: |
| return default_monkeyocr_model_id() |
| if normalized.lower() in {"monkeyocr", "monkeyocr-pro-3b", "monkeyocr_pro_3b"}: |
| return default_monkeyocr_model_id() |
| return normalized |
|
|
|
|
| def resolve_qwen_ocr_model_id(model_id: Optional[str]) -> str: |
| normalized = normalize_text(model_id or "") |
| if not normalized: |
| return default_qwen_ocr_model_id() |
| if normalized.lower() in { |
| "qwen_ocr", |
| "qwen-ocr", |
| "qwenocr", |
| "qwen3.5-35b-a3b", |
| "qwen3.5-35b-a3b-fp8", |
| "qwen3.6-35b-a3b", |
| "qwen3.6-35b-a3b-fp8", |
| }: |
| return default_qwen_ocr_model_id() |
| return normalized |
|
|
|
|
| def resolve_gemma4_vllm_model_id(model_id: Optional[str]) -> str: |
| normalized = normalize_text(model_id or "") |
| if not normalized: |
| return default_gemma4_vllm_model_id() |
| if normalized.lower().replace("_", "-") in { |
| "gemma4-vllm", |
| "gemma-4-vllm", |
| "gemma4-26b-vllm", |
| "gemma-4-26b-vllm", |
| "gemma4-26b-a4b", |
| "gemma-4-26b-a4b", |
| "gemma4-26b-a4b-it", |
| "gemma-4-26b-a4b-it", |
| }: |
| return default_gemma4_vllm_model_id() |
| return normalized |
|
|
|
|
| def resolve_gemma4_hf_model_id(model_id: Optional[str]) -> str: |
| normalized = normalize_text(model_id or "") |
| if not normalized: |
| return default_gemma4_hf_model_id() |
| if normalized.lower().replace("_", "-") in { |
| "gemma4", |
| "gemma-4", |
| "gemma4-hf", |
| "gemma-4-hf", |
| "gemma4-local", |
| "gemma-4-local", |
| "gemma4-huggingface", |
| "gemma-4-huggingface", |
| "gemma4-transformers", |
| "gemma-4-transformers", |
| }: |
| return default_gemma4_hf_model_id() |
| return normalized |
|
|
|
|
| def resolve_gemma4_llamacpp_model_id(model_id: Optional[str]) -> str: |
| normalized = normalize_text(model_id or "") |
| if not normalized: |
| return default_gemma4_llamacpp_model_id() |
| if normalized.lower().replace("_", "-") in { |
| "gemma4", |
| "gemma-4", |
| "gemma4-26b", |
| "gemma-4-26b", |
| "gemma4-26b-it", |
| "gemma-4-26b-it", |
| "gemma4-llamacpp", |
| "gemma-4-llamacpp", |
| "gemma4-26b-llamacpp", |
| "gemma-4-26b-llamacpp", |
| "gemma4-26b-llama-cpp", |
| "gemma-4-26b-llama-cpp", |
| "gemma4-llama-cpp", |
| "gemma-4-llama-cpp", |
| "llamacpp-gemma4-26b", |
| "llama-cpp-gemma4-26b", |
| "llamacpp-gemma4", |
| "llama-cpp-gemma4", |
| }: |
| return default_gemma4_llamacpp_model_id() |
| return normalized |
|
|
|
|
| def _resolve_monkeyocr_entrypoint(repo_path: Path) -> Path: |
| for candidate in ( |
| repo_path / "parse.py", |
| repo_path / "infer.py", |
| repo_path / "inference.py", |
| repo_path / "run.py", |
| repo_path / "demo.py", |
| repo_path / "app.py", |
| ): |
| if candidate.exists(): |
| return candidate |
| raise RuntimeError( |
| "MonkeyOCR backend needs either MONKEYOCR_RUN_COMMAND or one of parse.py/infer.py/inference.py/run.py/demo.py/app.py in MONKEYOCR_REPO_DIR." |
| ) |
|
|
|
|
| def _extract_monkeyocr_text_payload(payload: Any, *, _depth: int = 0) -> Optional[str]: |
| if _depth > 8: |
| return None |
| if isinstance(payload, str): |
| normalized = normalize_text(payload) |
| return payload if normalized else None |
| if isinstance(payload, dict): |
| preferred_keys = ( |
| "html", |
| "markdown", |
| "md", |
| "table_html", |
| "table_markdown", |
| "output_html", |
| "output_markdown", |
| "prediction", |
| "pred", |
| "result", |
| "response", |
| "text", |
| "content", |
| "output", |
| "ocr", |
| "table", |
| "data", |
| ) |
| for key in preferred_keys: |
| extracted = _extract_monkeyocr_text_payload(payload.get(key), _depth=_depth + 1) |
| if extracted: |
| return extracted |
| for value in payload.values(): |
| extracted = _extract_monkeyocr_text_payload(value, _depth=_depth + 1) |
| if extracted: |
| return extracted |
| return None |
| if isinstance(payload, list): |
| candidates = [ |
| extracted |
| for extracted in ( |
| _extract_monkeyocr_text_payload(item, _depth=_depth + 1) |
| for item in payload |
| ) |
| if extracted |
| ] |
| if not candidates: |
| return None |
| return max(candidates, key=lambda item: len(normalize_text(re.sub(r"<[^>]+>", " ", item)))) |
| return None |
|
|
|
|
| def _coerce_monkeyocr_output_text(raw_text: str) -> str: |
| text = str(raw_text or "") |
| normalized = normalize_text(text) |
| if not normalized: |
| return "" |
| if normalized.startswith("{") or normalized.startswith("["): |
| try: |
| payload = json.loads(normalized) |
| except Exception: |
| return text |
| extracted = _extract_monkeyocr_text_payload(payload) |
| if extracted: |
| return extracted |
| return text |
|
|
|
|
| def _read_monkeyocr_output_artifact(path: Path) -> str: |
| suffix = path.suffix.lower() |
| text = path.read_text(encoding="utf-8", errors="replace") |
| if suffix == ".json": |
| try: |
| payload = json.loads(text) |
| except Exception: |
| return _coerce_monkeyocr_output_text(text) |
| extracted = _extract_monkeyocr_text_payload(payload) |
| return extracted or "" |
| return _coerce_monkeyocr_output_text(text) |
|
|
|
|
| def firered_mps_retry_pixel_limits() -> List[int]: |
| raw_value = normalize_text(os.getenv("FIRERED_MPS_RETRY_MAX_IMAGE_PIXELS", "1500000,1000000")) |
| limits: List[int] = [] |
| seen: set[int] = set() |
| for piece in raw_value.split(","): |
| try: |
| value = max(0, int(piece.strip())) |
| except ValueError: |
| continue |
| if value <= 0 or value in seen: |
| continue |
| limits.append(value) |
| seen.add(value) |
| return limits |
|
|
|
|
| def default_deepseek_ocr_model_id() -> str: |
| return normalize_text(os.getenv("DEEPSEEK_OCR_MODEL_ID", "deepseek-ai/DeepSeek-OCR-2")) or "deepseek-ai/DeepSeek-OCR-2" |
|
|
|
|
| def default_mistral_ocr_model_id() -> str: |
| return normalize_text(os.getenv("MISTRAL_OCR_MODEL_ID", "mistral-ocr-latest")) or "mistral-ocr-latest" |
|
|
|
|
| def default_pdf_page_ocr_model_id() -> str: |
| candidates = [ |
| normalize_text(os.getenv("PDF_PAGE_OCR_MODEL_ID", "")), |
| normalize_text(os.getenv("MISTRAL_OCR_MODEL_ID", "")), |
| default_mistral_ocr_model_id(), |
| ] |
| for candidate in candidates: |
| if candidate: |
| return candidate |
| return default_mistral_ocr_model_id() |
|
|
|
|
| def _mistral_ocr_table_format() -> str: |
| value = normalize_text(os.getenv("MISTRAL_OCR_TABLE_FORMAT", "html")).lower() |
| if value in {"markdown", "html"}: |
| return value |
| return "html" |
|
|
|
|
| def _deepseek_ocr_local_dir() -> Optional[str]: |
| value = normalize_text(os.getenv("DEEPSEEK_OCR_MODEL_LOCAL_DIR", "")) |
| return value or None |
|
|
|
|
| def _deepseek_ocr_cache_dir() -> Optional[str]: |
| value = normalize_text(os.getenv("DEEPSEEK_OCR_MODEL_CACHE_DIR", "")) |
| return value or None |
|
|
|
|
| def _deepseek_ocr_revision() -> Optional[str]: |
| value = normalize_text(os.getenv("DEEPSEEK_OCR_MODEL_REVISION", "")) |
| return value or None |
|
|
|
|
| def _deepseek_ocr_local_files_only() -> bool: |
| return _env_flag("DEEPSEEK_OCR_LOCAL_FILES_ONLY") |
|
|
|
|
| def _deepseek_ocr_trust_remote_code() -> bool: |
| return _env_flag("DEEPSEEK_OCR_TRUST_REMOTE_CODE", "1") |
|
|
|
|
| def _deepseek_ocr_device() -> str: |
| return normalize_text(os.getenv("DEEPSEEK_OCR_DEVICE", "auto")).lower() or "auto" |
|
|
|
|
| def _deepseek_ocr_device_map() -> str: |
| return normalize_text(os.getenv("DEEPSEEK_OCR_DEVICE_MAP", "auto")) or "auto" |
|
|
|
|
| def _deepseek_ocr_max_new_tokens() -> int: |
| return _env_int("DEEPSEEK_OCR_MAX_NEW_TOKENS", 4096) |
|
|
|
|
| def _deepseek_ocr_max_image_pixels() -> int: |
| return _env_int("DEEPSEEK_OCR_MAX_IMAGE_PIXELS", 0) |
|
|
|
|
| def _backend_env_name(prefix: str, suffix: str) -> str: |
| return f"{prefix.upper()}_{suffix.upper()}" |
|
|
|
|
| def _backend_model_local_dir(prefix: str) -> Optional[str]: |
| value = normalize_text(os.getenv(_backend_env_name(prefix, "MODEL_LOCAL_DIR"), "")) |
| return value or None |
|
|
|
|
| def _backend_model_cache_dir(prefix: str) -> Optional[str]: |
| value = normalize_text(os.getenv(_backend_env_name(prefix, "MODEL_CACHE_DIR"), "")) |
| return value or None |
|
|
|
|
| def _backend_model_revision(prefix: str) -> Optional[str]: |
| value = normalize_text(os.getenv(_backend_env_name(prefix, "MODEL_REVISION"), "")) |
| return value or None |
|
|
|
|
| def _backend_local_files_only(prefix: str) -> bool: |
| return _env_flag(_backend_env_name(prefix, "LOCAL_FILES_ONLY")) |
|
|
|
|
| def _backend_trust_remote_code(prefix: str, default: Optional[str] = None) -> bool: |
| if default is None: |
| |
| |
| default = "0" if str(prefix or "").upper() == "PADDLEOCR_VL" else "1" |
| return _env_flag(_backend_env_name(prefix, "TRUST_REMOTE_CODE"), default) |
|
|
|
|
| def _backend_device(prefix: str) -> str: |
| return normalize_text(os.getenv(_backend_env_name(prefix, "DEVICE"), "auto")).lower() or "auto" |
|
|
|
|
| def _backend_device_map(prefix: str) -> str: |
| return normalize_text(os.getenv(_backend_env_name(prefix, "DEVICE_MAP"), "auto")) or "auto" |
|
|
|
|
| def _backend_max_new_tokens(prefix: str, default: int = 4096) -> int: |
| return _env_int(_backend_env_name(prefix, "MAX_NEW_TOKENS"), default) |
|
|
|
|
| def _backend_max_image_pixels(prefix: str, default: int = 0) -> int: |
| return _env_int(_backend_env_name(prefix, "MAX_IMAGE_PIXELS"), default) |
|
|
|
|
| def _backend_attn_implementation(prefix: str) -> Optional[str]: |
| value = normalize_text(os.getenv(_backend_env_name(prefix, "ATTN_IMPLEMENTATION"), "")) |
| return value or None |
|
|
|
|
| def _select_generic_ocr_runtime(torch, requested: str): |
| normalized = normalize_text(requested or "auto").lower() or "auto" |
| mps_available = bool(getattr(torch.backends, "mps", None)) and torch.backends.mps.is_available() |
| if normalized not in {"auto", "cuda", "mps", "cpu"}: |
| raise RuntimeError( |
| f"Unsupported OCR device '{requested}'. Expected one of: auto, cuda, mps, cpu." |
| ) |
| if normalized in {"auto", "cuda"} and torch.cuda.is_available(): |
| return "cuda", torch.bfloat16, True |
| if normalized == "cuda": |
| raise RuntimeError("CUDA was requested for OCR, but CUDA is not available.") |
| if normalized in {"auto", "mps"} and mps_available: |
| return "mps", torch.float16, False |
| if normalized == "mps": |
| raise RuntimeError("MPS was requested for OCR, but MPS is not available.") |
| return "cpu", torch.float32, False |
|
|
|
|
| def _resolve_generic_model_source(model_id: str, *, env_prefix: str) -> str: |
| local_dir = _backend_model_local_dir(env_prefix) |
| if not local_dir: |
| return model_id |
| local_path = Path(local_dir).expanduser() |
| if str(local_path).startswith("/content") and not Path("/content").exists(): |
| raise RuntimeError( |
| f"{_backend_env_name(env_prefix, 'MODEL_LOCAL_DIR')} points to '{local_dir}', which looks like a Colab path, " |
| "but this process is running outside Colab." |
| ) |
| if not local_path.exists(): |
| raise RuntimeError(f"{_backend_env_name(env_prefix, 'MODEL_LOCAL_DIR')}='{local_dir}' does not exist.") |
| if not local_path.is_dir(): |
| raise RuntimeError(f"{_backend_env_name(env_prefix, 'MODEL_LOCAL_DIR')}='{local_dir}' is not a directory.") |
| return str(local_path) |
|
|
|
|
| def _prepare_backend_image(image, *, env_prefix: str, runtime_device: str): |
| del runtime_device |
| if not _table_ocr_allow_backend_image_resize(): |
| return image |
| max_pixels = _backend_max_image_pixels(env_prefix, 0) |
| width, height = image.size |
| total_pixels = width * height |
| if max_pixels <= 0 or total_pixels <= max_pixels: |
| return image |
| scale = math.sqrt(float(max_pixels) / float(total_pixels)) |
| new_width = max(28, int(width * scale)) |
| new_height = max(28, int(height * scale)) |
| emit_progress( |
| f"Resizing {env_prefix} OCR image from {width}x{height} to {new_width}x{new_height} " |
| f"to stay within {max_pixels:,} pixels." |
| ) |
| return image.resize((new_width, new_height)) |
|
|
|
|
| def _ensure_transformers_dynamic_cache_compatibility() -> None: |
| try: |
| from transformers.cache_utils import Cache, DynamicCache |
| except ImportError: |
| return |
|
|
| def get_seq_length_compat(cache_obj: Any, layer_idx: int = 0) -> int: |
| get_seq_length = getattr(cache_obj, "get_seq_length", None) |
| if callable(get_seq_length): |
| try: |
| return int(get_seq_length(layer_idx)) |
| except TypeError: |
| try: |
| return int(get_seq_length()) |
| except Exception: |
| return 0 |
| except Exception: |
| return 0 |
| return 0 |
|
|
| def get_max_length_compat(cache_obj: Any, layer_idx: int = 0) -> Optional[int]: |
| get_max_cache_shape = getattr(cache_obj, "get_max_cache_shape", None) |
| if not callable(get_max_cache_shape): |
| return None |
| try: |
| max_length = get_max_cache_shape(layer_idx) |
| except TypeError: |
| try: |
| max_length = get_max_cache_shape() |
| except Exception: |
| return None |
| except Exception: |
| return None |
| if not isinstance(max_length, (int, float)): |
| return None |
| resolved = int(max_length) |
| return None if resolved < 0 else resolved |
|
|
| def install_cache_compatibility_shims(cache_cls: Any) -> None: |
| if hasattr(cache_cls, "seen_tokens"): |
| seen_tokens_missing = False |
| else: |
| seen_tokens_missing = True |
|
|
| if seen_tokens_missing: |
| def get_seen_tokens(self) -> int: |
| override = getattr(self, "_codex_seen_tokens_override", None) |
| if override is not None: |
| try: |
| return int(override) |
| except Exception: |
| return 0 |
| return get_seq_length_compat(self) |
|
|
| def set_seen_tokens(self, value: Any) -> None: |
| try: |
| self._codex_seen_tokens_override = int(value) |
| except Exception: |
| self._codex_seen_tokens_override = value |
|
|
| setattr(cache_cls, "seen_tokens", property(get_seen_tokens, set_seen_tokens)) |
|
|
| if not hasattr(cache_cls, "get_max_length"): |
| def get_max_length(self, layer_idx: int = 0) -> Optional[int]: |
| return get_max_length_compat(self, layer_idx=layer_idx) |
|
|
| setattr(cache_cls, "get_max_length", get_max_length) |
|
|
| if not hasattr(cache_cls, "get_usable_length"): |
| def get_usable_length(self, new_seq_length: int, layer_idx: int = 0) -> int: |
| previous_seq_length = get_seq_length_compat(self, layer_idx=layer_idx) |
| max_length = get_max_length_compat(self, layer_idx=layer_idx) |
| if max_length is not None and previous_seq_length + int(new_seq_length) > max_length: |
| return max(0, max_length - int(new_seq_length)) |
| return previous_seq_length |
|
|
| setattr(cache_cls, "get_usable_length", get_usable_length) |
|
|
| install_cache_compatibility_shims(Cache) |
| install_cache_compatibility_shims(DynamicCache) |
|
|
|
|
| def _maybe_to_model_device(batch: Any, model_device: Any): |
| try: |
| return batch.to(model_device) |
| except Exception: |
| return batch |
|
|
|
|
| def _get_batch_item(batch: Any, key: str): |
| if isinstance(batch, dict): |
| return batch.get(key) |
| return getattr(batch, key, None) |
|
|
|
|
| def _decode_generated_text(processor: Any, generated_ids: Any, input_ids: Any = None) -> str: |
| trimmed_ids = generated_ids |
| if input_ids is not None: |
| try: |
| trimmed_ids = generated_ids[:, input_ids.shape[1] :] |
| except Exception: |
| trimmed_ids = generated_ids |
| decode_targets = [ |
| processor, |
| getattr(processor, "tokenizer", None), |
| ] |
| for target in decode_targets: |
| if target is None or not hasattr(target, "batch_decode"): |
| continue |
| try: |
| decoded = target.batch_decode( |
| trimmed_ids, |
| skip_special_tokens=True, |
| clean_up_tokenization_spaces=False, |
| ) |
| if isinstance(decoded, list) and decoded: |
| return decoded[0] |
| except Exception: |
| continue |
| raise RuntimeError("Could not decode OCR model output with the loaded processor/tokenizer.") |
|
|
|
|
| def _build_generic_messages(prompt_text: str, page_image) -> List[Dict[str, Any]]: |
| return [ |
| { |
| "role": "user", |
| "content": [ |
| {"type": "image", "image": page_image}, |
| {"type": "text", "text": prompt_text}, |
| ], |
| } |
| ] |
|
|
|
|
| def _build_transformers_multimodal_inputs(processor: Any, page_image, prompt_text: str): |
| messages = _build_generic_messages(prompt_text, page_image) |
| if hasattr(processor, "apply_chat_template"): |
| try: |
| chat_text = processor.apply_chat_template( |
| messages, |
| tokenize=False, |
| add_generation_prompt=True, |
| ) |
| try: |
| return processor(text=[chat_text], images=[page_image], return_tensors="pt") |
| except Exception: |
| return processor(text=chat_text, images=page_image, return_tensors="pt") |
| except Exception: |
| pass |
| attempts = [ |
| {"text": [prompt_text], "images": [page_image], "return_tensors": "pt"}, |
| {"text": prompt_text, "images": page_image, "return_tensors": "pt"}, |
| {"text": [prompt_text], "image": [page_image], "return_tensors": "pt"}, |
| {"text": prompt_text, "image": page_image, "return_tensors": "pt"}, |
| ] |
| last_exc: Optional[Exception] = None |
| for kwargs in attempts: |
| try: |
| return processor(**kwargs) |
| except Exception as exc: |
| last_exc = exc |
| raise RuntimeError(f"Could not build multimodal OCR inputs: {last_exc}") |
|
|
|
|
| def _normalize_chat_result(result: Any) -> str: |
| if isinstance(result, str): |
| return result |
| if isinstance(result, tuple): |
| for item in result: |
| if isinstance(item, str) and normalize_text(item): |
| return item |
| if isinstance(result, dict): |
| for key in ("text", "response", "content", "answer", "output", "transcription"): |
| value = result.get(key) |
| if isinstance(value, str) and normalize_text(value): |
| return value |
| raise RuntimeError(f"OCR chat backend returned unsupported result type: {type(result).__name__}") |
|
|
|
|
| def _build_generation_config_dict(prefix: str) -> Dict[str, Any]: |
| return { |
| "max_new_tokens": _backend_max_new_tokens(prefix, 4096), |
| "do_sample": False, |
| } |
|
|
|
|
| @contextlib.contextmanager |
| def _known_model_load_warnings_context(*, env_prefix: str): |
| if env_prefix != "GOT_OCR": |
| yield |
| return |
| with warnings.catch_warnings(): |
| warnings.filterwarnings( |
| "ignore", |
| message=r".*invalid escape sequence '\\l'.*", |
| category=SyntaxWarning, |
| ) |
| yield |
|
|
|
|
| def _call_from_pretrained_with_dtype_fallback(loader: Any, model_source: str, **kwargs: Any): |
| try: |
| return loader(model_source, **kwargs) |
| except TypeError as exc: |
| if "unexpected keyword argument 'dtype'" not in str(exc) or "dtype" not in kwargs: |
| raise |
| fallback_kwargs = dict(kwargs) |
| fallback_kwargs["torch_dtype"] = fallback_kwargs.pop("dtype") |
| return loader(model_source, **fallback_kwargs) |
|
|
|
|
| def _build_got_ocr_prompt_and_stop_text() -> tuple[str, str]: |
| stop_text = "<|im_end|>" |
| system_prompt = ( |
| "<|im_start|>system\n" |
| " You should follow the instructions carefully and explain your answers in detail." |
| ) |
| image_prompt = "<img>" + ("<imgpad>" * 256) + "</img>\nOCR with format: " |
| prompt = ( |
| system_prompt |
| + stop_text |
| + "<|im_start|>user\n" |
| + image_prompt |
| + stop_text |
| + "<|im_start|>assistant\n" |
| ) |
| return prompt, stop_text |
|
|
|
|
| def _build_got_ocr_image_tensor(page_image): |
| try: |
| import torchvision.transforms as T |
| from torchvision.transforms.functional import InterpolationMode |
| except ImportError as exc: |
| raise RuntimeError("GOT_OCR local backend requires torchvision for image preprocessing.") from exc |
|
|
| transform = T.Compose( |
| [ |
| T.Lambda(lambda img: img.convert("RGB") if img.mode != "RGB" else img), |
| T.Resize((1024, 1024), interpolation=InterpolationMode.BICUBIC), |
| T.ToTensor(), |
| T.Normalize( |
| mean=(0.48145466, 0.4578275, 0.40821073), |
| std=(0.26862954, 0.26130258, 0.27577711), |
| ), |
| ] |
| ) |
| return transform(page_image).unsqueeze(0) |
|
|
|
|
| def _run_got_ocr_generate_backend( |
| model_obj: Any, |
| tokenizer_like: Any, |
| page_image, |
| *, |
| model_device: Any, |
| ) -> str: |
| prompt, stop_text = _build_got_ocr_prompt_and_stop_text() |
| try: |
| tokenized_inputs = tokenizer_like([prompt], return_tensors="pt") |
| except Exception: |
| tokenized_inputs = tokenizer_like(prompt, return_tensors="pt") |
|
|
| input_ids = _get_batch_item(tokenized_inputs, "input_ids") |
| if input_ids is None: |
| raise RuntimeError("GOT_OCR tokenizer did not return input_ids for the native OCR prompt.") |
| attention_mask = _get_batch_item(tokenized_inputs, "attention_mask") |
|
|
| try: |
| input_ids = input_ids.to(model_device) |
| except Exception: |
| pass |
| if attention_mask is None: |
| try: |
| import torch |
|
|
| attention_mask = torch.ones_like(input_ids) |
| except Exception: |
| attention_mask = None |
| elif hasattr(attention_mask, "to"): |
| try: |
| attention_mask = attention_mask.to(model_device) |
| except Exception: |
| pass |
|
|
| image_tensor = _build_got_ocr_image_tensor(page_image) |
| image_dtype = getattr(model_obj, "dtype", None) |
| try: |
| image_tensor = image_tensor.to(device=model_device, dtype=image_dtype) |
| except Exception: |
| try: |
| image_tensor = image_tensor.to(model_device) |
| except Exception: |
| pass |
|
|
| generate_kwargs: Dict[str, Any] = { |
| "input_ids": input_ids, |
| "images": [image_tensor], |
| "do_sample": False, |
| "num_beams": 1, |
| "no_repeat_ngram_size": 20, |
| "max_new_tokens": _backend_max_new_tokens("GOT_OCR", 4096), |
| |
| "use_cache": False, |
| } |
| if attention_mask is not None: |
| generate_kwargs["attention_mask"] = attention_mask |
| eos_token_id = getattr(tokenizer_like, "eos_token_id", None) |
| pad_token_id = getattr(tokenizer_like, "pad_token_id", None) |
| if pad_token_id is None: |
| pad_token_id = eos_token_id |
| if pad_token_id is not None: |
| generate_kwargs["pad_token_id"] = pad_token_id |
| if eos_token_id is not None: |
| generate_kwargs["eos_token_id"] = eos_token_id |
|
|
| with __import__("torch").inference_mode(): |
| generated_ids = model_obj.generate(**generate_kwargs) |
|
|
| decoded_output = _decode_generated_text(tokenizer_like, generated_ids, input_ids=input_ids) |
| if stop_text and stop_text in decoded_output: |
| decoded_output = decoded_output.split(stop_text, 1)[0] |
| return decoded_output.strip() |
|
|
|
|
| def _run_got_ocr_backend( |
| model_obj: Any, |
| tokenizer_like: Any, |
| page_image, |
| *, |
| model_device: Any, |
| ) -> str: |
| errors: List[str] = [] |
| try: |
| emit_progress("Trying GOT_OCR OCR via direct generate(..., attention_mask=..., use_cache=False)") |
| return _run_got_ocr_generate_backend( |
| model_obj, |
| tokenizer_like, |
| page_image, |
| model_device=model_device, |
| ) |
| except Exception as exc: |
| errors.append(f"direct generate(..., attention_mask=..., use_cache=False): {exc}") |
|
|
| try: |
| return _run_chat_style_backend( |
| model_obj, |
| tokenizer_like, |
| page_image, |
| default_table_image_transcription_prompt(), |
| env_prefix="GOT_OCR", |
| model_device=model_device, |
| ) |
| except Exception as exc: |
| errors.append(f"legacy chat(...): {exc}") |
|
|
| raise RuntimeError("GOT_OCR backend exhausted: " + " ; ".join(errors)) |
|
|
|
|
| def _run_generate_style_backend( |
| model_obj: Any, |
| processor: Any, |
| page_image, |
| prompt_text: str, |
| *, |
| model_device: Any, |
| env_prefix: str, |
| ) -> str: |
| batch = _build_transformers_multimodal_inputs(processor, page_image, prompt_text) |
| batch = _maybe_to_model_device(batch, model_device) |
| input_ids = _get_batch_item(batch, "input_ids") |
| generate_fn = getattr(model_obj, "generate", None) |
| if not callable(generate_fn): |
| raise RuntimeError(f"{env_prefix} OCR model does not expose a callable generate() method.") |
| with __import__("torch").inference_mode(): |
| generated_ids = generate_fn( |
| **batch, |
| max_new_tokens=_backend_max_new_tokens(env_prefix, 4096), |
| do_sample=False, |
| ) |
| return _decode_generated_text(processor, generated_ids, input_ids=input_ids) |
|
|
|
|
| def _build_image_processor_inputs(processor: Any, page_image): |
| attempts = [ |
| {"images": [page_image], "return_tensors": "pt"}, |
| {"images": page_image, "return_tensors": "pt"}, |
| {"image": [page_image], "return_tensors": "pt"}, |
| {"image": page_image, "return_tensors": "pt"}, |
| ] |
| for kwargs in attempts: |
| try: |
| outputs = processor(**kwargs) |
| except Exception: |
| continue |
| pixel_values = _get_batch_item(outputs, "pixel_values") |
| if pixel_values is not None: |
| return pixel_values |
| return None |
|
|
|
|
| def _build_qianfan_pixel_values(page_image, *, image_size: int = 448, max_num: int = 12): |
| try: |
| import torch |
| import torchvision.transforms as T |
| from torchvision.transforms.functional import InterpolationMode |
| except ImportError as exc: |
| raise RuntimeError( |
| "QIANFAN_OCR local backend requires torch and torchvision." |
| ) from exc |
|
|
| imagenet_mean = (0.485, 0.456, 0.406) |
| imagenet_std = (0.229, 0.224, 0.225) |
| transform = T.Compose( |
| [ |
| T.Lambda(lambda img: img.convert("RGB") if img.mode != "RGB" else img), |
| T.Resize((image_size, image_size), interpolation=InterpolationMode.BICUBIC), |
| T.ToTensor(), |
| T.Normalize(mean=imagenet_mean, std=imagenet_std), |
| ] |
| ) |
|
|
| orig_width, orig_height = page_image.size |
| aspect_ratio = orig_width / max(1, orig_height) |
| target_ratios = sorted( |
| { |
| (i, j) |
| for n in range(1, max_num + 1) |
| for i in range(1, n + 1) |
| for j in range(1, n + 1) |
| if 1 <= i * j <= max_num |
| }, |
| key=lambda ratio: ratio[0] * ratio[1], |
| ) |
| best_ratio = (1, 1) |
| best_ratio_diff = float("inf") |
| area = orig_width * orig_height |
| for ratio in target_ratios: |
| target_aspect_ratio = ratio[0] / ratio[1] |
| ratio_diff = abs(aspect_ratio - target_aspect_ratio) |
| if ratio_diff < best_ratio_diff: |
| best_ratio_diff = ratio_diff |
| best_ratio = ratio |
| elif ratio_diff == best_ratio_diff: |
| if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]: |
| best_ratio = ratio |
|
|
| target_width = image_size * best_ratio[0] |
| target_height = image_size * best_ratio[1] |
| resized_img = page_image.resize((target_width, target_height)) |
| processed_images = [] |
| blocks = best_ratio[0] * best_ratio[1] |
| for block_index in range(blocks): |
| box = ( |
| (block_index % best_ratio[0]) * image_size, |
| (block_index // best_ratio[0]) * image_size, |
| ((block_index % best_ratio[0]) + 1) * image_size, |
| ((block_index // best_ratio[0]) + 1) * image_size, |
| ) |
| processed_images.append(resized_img.crop(box)) |
| if len(processed_images) != 1: |
| processed_images.append(page_image.resize((image_size, image_size))) |
| return torch.stack([transform(image) for image in processed_images]) |
|
|
|
|
| def _run_chat_style_backend( |
| model_obj: Any, |
| tokenizer_like: Any, |
| page_image, |
| prompt_text: str, |
| *, |
| env_prefix: str, |
| model_device: Any, |
| ) -> str: |
| chat_fn = getattr(model_obj, "chat", None) |
| if not callable(chat_fn): |
| raise RuntimeError(f"{env_prefix} OCR model does not expose a callable chat() method.") |
| signature = inspect.signature(chat_fn) |
| param_names = set(signature.parameters) |
| generation_config = _build_generation_config_dict(env_prefix) |
|
|
| pixel_values = None |
| processor = getattr(model_obj, "processor", None) |
| if processor is not None: |
| pixel_values = _build_image_processor_inputs(processor, page_image) |
| if pixel_values is None and tokenizer_like is not None and not _processor_is_tokenizer_like(tokenizer_like): |
| pixel_values = _build_image_processor_inputs(tokenizer_like, page_image) |
| if pixel_values is not None: |
| try: |
| pixel_values = pixel_values.to(model_device) |
| except Exception: |
| pass |
|
|
| with TemporaryDirectory(prefix=f"{env_prefix.lower()}_ocr_") as tmpdir: |
| image_path = Path(tmpdir) / "page.png" |
| page_image.convert("RGB").save(image_path) |
| attempts: List[tuple[str, Dict[str, Any]]] = [] |
| if {"tokenizer", "pixel_values", "question"}.issubset(param_names) and pixel_values is not None: |
| attempts.append( |
| ( |
| "chat(tokenizer=..., pixel_values=..., question=...)", |
| { |
| "tokenizer": tokenizer_like, |
| "pixel_values": pixel_values, |
| "question": prompt_text, |
| "generation_config": generation_config, |
| }, |
| ) |
| ) |
| if {"tokenizer", "image_file", "ocr_type"}.issubset(param_names): |
| kwargs = { |
| "tokenizer": tokenizer_like, |
| "image_file": str(image_path), |
| "ocr_type": "format", |
| } |
| if "question" in param_names: |
| kwargs["question"] = prompt_text |
| if "query" in param_names: |
| kwargs["query"] = prompt_text |
| if "generation_config" in param_names: |
| kwargs["generation_config"] = generation_config |
| attempts.append(("chat(tokenizer=..., image_file=..., ocr_type='format')", kwargs)) |
| if {"tokenizer", "image_file", "question"}.issubset(param_names): |
| attempts.append( |
| ( |
| "chat(tokenizer=..., image_file=..., question=...)", |
| { |
| "tokenizer": tokenizer_like, |
| "image_file": str(image_path), |
| "question": prompt_text, |
| "generation_config": generation_config, |
| }, |
| ) |
| ) |
| if {"tokenizer", "image", "query"}.issubset(param_names): |
| attempts.append( |
| ( |
| "chat(tokenizer=..., image=..., query=...)", |
| { |
| "tokenizer": tokenizer_like, |
| "image": page_image, |
| "query": prompt_text, |
| "generation_config": generation_config, |
| }, |
| ) |
| ) |
| if {"tokenizer", "msgs"}.issubset(param_names): |
| attempts.append( |
| ( |
| "chat(tokenizer=..., msgs=...)", |
| { |
| "tokenizer": tokenizer_like, |
| "msgs": [{"role": "user", "content": prompt_text}], |
| "image": page_image, |
| }, |
| ) |
| ) |
|
|
| errors: List[str] = [] |
| for label, kwargs in attempts: |
| filtered_kwargs = {key: value for key, value in kwargs.items() if key in param_names and value is not None} |
| try: |
| emit_progress(f"Trying {env_prefix} OCR via {label}") |
| return _normalize_chat_result(chat_fn(**filtered_kwargs)) |
| except Exception as exc: |
| errors.append(f"{label}: {exc}") |
|
|
| raise RuntimeError(f"{env_prefix} OCR chat fallback exhausted: {' ; '.join(errors)}") |
|
|
|
|
| def _build_transformers_model_kwargs( |
| *, |
| env_prefix: str, |
| model_source: str, |
| dtype: Any, |
| runtime_device: str, |
| use_device_map: bool, |
| ) -> Dict[str, Any]: |
| kwargs: Dict[str, Any] = { |
| "trust_remote_code": _backend_trust_remote_code(env_prefix), |
| "local_files_only": _backend_local_files_only(env_prefix), |
| } |
| cache_dir = _backend_model_cache_dir(env_prefix) |
| if cache_dir: |
| kwargs["cache_dir"] = cache_dir |
| revision = _backend_model_revision(env_prefix) |
| if revision: |
| kwargs["revision"] = revision |
| attn_implementation = _backend_attn_implementation(env_prefix) |
| if attn_implementation: |
| kwargs["attn_implementation"] = attn_implementation |
| if runtime_device == "cuda": |
| kwargs["dtype"] = dtype |
| if use_device_map: |
| kwargs["device_map"] = _backend_device_map(env_prefix) |
| elif runtime_device == "cpu": |
| kwargs["dtype"] = dtype |
| return kwargs |
|
|
|
|
| def _load_first_available_processor(model_source: str, *, env_prefix: str, prefer_tokenizer: bool = False): |
| try: |
| from transformers import AutoProcessor, AutoTokenizer |
| except ImportError as exc: |
| raise RuntimeError( |
| f"{env_prefix} OCR requires transformers. Install a recent transformers build first." |
| ) from exc |
|
|
| common_kwargs: Dict[str, Any] = { |
| "trust_remote_code": _backend_trust_remote_code(env_prefix), |
| "local_files_only": _backend_local_files_only(env_prefix), |
| } |
| cache_dir = _backend_model_cache_dir(env_prefix) |
| if cache_dir: |
| common_kwargs["cache_dir"] = cache_dir |
| revision = _backend_model_revision(env_prefix) |
| if revision: |
| common_kwargs["revision"] = revision |
|
|
| constructors = [AutoProcessor, AutoTokenizer] |
| if prefer_tokenizer: |
| constructors = [AutoTokenizer, AutoProcessor] |
| failures: List[str] = [] |
| for constructor in constructors: |
| constructor_kwargs = dict(common_kwargs) |
| if constructor.__name__ == "AutoTokenizer": |
| constructor_kwargs.setdefault("use_fast", False) |
| try: |
| with _known_model_load_warnings_context(env_prefix=env_prefix): |
| return constructor.from_pretrained(model_source, **constructor_kwargs) |
| except Exception as exc: |
| failures.append(f"{constructor.__name__}: {exc}") |
| raise RuntimeError( |
| f"Could not load processor/tokenizer for {model_source}: " + " | ".join(failures) |
| ) |
|
|
|
|
| def _load_first_available_model(model_source: str, *, env_prefix: str): |
| try: |
| import torch |
| from transformers import AutoModel, AutoModelForCausalLM |
| except ImportError as exc: |
| raise RuntimeError( |
| f"{env_prefix} OCR requires transformers and torch." |
| ) from exc |
|
|
| try: |
| from transformers import AutoModelForImageTextToText |
| except ImportError: |
| AutoModelForImageTextToText = None |
| try: |
| from transformers import AutoModelForVision2Seq |
| except ImportError: |
| AutoModelForVision2Seq = None |
|
|
| runtime_device, dtype, use_device_map = _select_generic_ocr_runtime(torch, _backend_device(env_prefix)) |
| common_kwargs = _build_transformers_model_kwargs( |
| env_prefix=env_prefix, |
| model_source=model_source, |
| dtype=dtype, |
| runtime_device=runtime_device, |
| use_device_map=use_device_map, |
| ) |
| constructors = [ |
| constructor |
| for constructor in ( |
| AutoModelForImageTextToText, |
| AutoModelForVision2Seq, |
| AutoModelForCausalLM, |
| AutoModel, |
| ) |
| if constructor is not None |
| ] |
| last_exc: Optional[Exception] = None |
| for constructor in constructors: |
| try: |
| with _known_model_load_warnings_context(env_prefix=env_prefix): |
| model_obj = _call_from_pretrained_with_dtype_fallback( |
| constructor.from_pretrained, |
| model_source, |
| **common_kwargs, |
| ) |
| if runtime_device in {"cpu", "mps"}: |
| model_obj = model_obj.to(runtime_device) |
| return model_obj, runtime_device |
| except Exception as exc: |
| last_exc = exc |
| raise RuntimeError(f"Could not load model for {model_source}: {last_exc}") |
|
|
|
|
| def _get_generic_ocr_backend( |
| model_id: str, |
| *, |
| env_prefix: str, |
| prefer_tokenizer: bool = False, |
| ): |
| cache_key = (env_prefix, model_id) |
| cached = _GENERIC_OCR_BACKENDS.get(cache_key) |
| if cached is not None: |
| return cached |
| model_source = _resolve_generic_model_source(model_id, env_prefix=env_prefix) |
| processor = _load_first_available_processor( |
| model_source, |
| env_prefix=env_prefix, |
| prefer_tokenizer=prefer_tokenizer, |
| ) |
| model_obj, runtime_device = _load_first_available_model( |
| model_source, |
| env_prefix=env_prefix, |
| ) |
| cached = (model_obj, processor, runtime_device) |
| _GENERIC_OCR_BACKENDS[cache_key] = cached |
| return cached |
|
|
|
|
| def _run_monkeyocr_command( |
| page_image, |
| prompt_text: str, |
| *, |
| model_id: str, |
| progress_handler: ProgressHandler = None, |
| ) -> str: |
| repo_dir = normalize_text(os.getenv("MONKEYOCR_REPO_DIR", "")) |
| if not repo_dir: |
| raise RuntimeError( |
| "MonkeyOCR local backend requires MONKEYOCR_REPO_DIR to point at a local MonkeyOCR checkout." |
| ) |
| repo_path = Path(repo_dir).expanduser() |
| if not repo_path.exists(): |
| raise RuntimeError(f"MONKEYOCR_REPO_DIR='{repo_dir}' does not exist.") |
| runner_template = normalize_text(os.getenv("MONKEYOCR_RUN_COMMAND", "")) |
| if not runner_template: |
| default_entrypoint = _resolve_monkeyocr_entrypoint(repo_path) |
| runner_template = ( |
| "{python_bin} {entrypoint} {image_path} -o {output_dir}" |
| ) |
| python_bin = normalize_text(os.getenv("MONKEYOCR_PYTHON", sys.executable)) or sys.executable |
| timeout_s = _env_int("MONKEYOCR_TIMEOUT_S", 900) |
| output_glob = normalize_text(os.getenv("MONKEYOCR_OUTPUT_GLOB", "")) |
| emit_progress( |
| f"MonkeyOCR local wrapper | repo {repo_path} | model {model_id}", |
| progress_handler=progress_handler, |
| ) |
| with TemporaryDirectory(prefix="monkeyocr_") as tmpdir: |
| tmpdir_path = Path(tmpdir) |
| image_path = Path(tmpdir) / "page.png" |
| page_image.convert("RGB").save(image_path) |
| prompt_path = tmpdir_path / "prompt.txt" |
| prompt_path.write_text(str(prompt_text or ""), encoding="utf-8") |
| entrypoint = _resolve_monkeyocr_entrypoint(repo_path) |
| formatted_command = runner_template.format( |
| python_bin=shlex.quote(python_bin), |
| entrypoint=shlex.quote(str(entrypoint)), |
| image_path=shlex.quote(str(image_path)), |
| output_dir=shlex.quote(tmpdir), |
| prompt=shlex.quote(prompt_text), |
| prompt_path=shlex.quote(str(prompt_path)), |
| model_id=shlex.quote(model_id), |
| repo_dir=shlex.quote(str(repo_path)), |
| ) |
| child_env = os.environ.copy() |
| child_env["MONKEYOCR_IMAGE_PATH"] = str(image_path) |
| child_env["MONKEYOCR_OUTPUT_DIR_ACTIVE"] = str(tmpdir_path) |
| child_env["MONKEYOCR_PROMPT"] = str(prompt_text or "") |
| child_env["MONKEYOCR_PROMPT_FILE"] = str(prompt_path) |
| child_env["MONKEYOCR_MODEL_ID_ACTIVE"] = str(model_id) |
| try: |
| completed = subprocess.run( |
| formatted_command, |
| cwd=str(repo_path), |
| shell=True, |
| capture_output=True, |
| text=True, |
| env=child_env, |
| timeout=timeout_s, |
| ) |
| except subprocess.TimeoutExpired as exc: |
| raise RuntimeError( |
| f"MonkeyOCR command timed out after {timeout_s}s: {formatted_command}" |
| ) from exc |
| if completed.returncode != 0: |
| raise RuntimeError( |
| "MonkeyOCR command failed: " |
| f"{normalize_text(completed.stderr) or normalize_text(completed.stdout) or completed.returncode}" |
| ) |
| for candidate in ( |
| "result.md", |
| "result.html", |
| "result.json", |
| "output.md", |
| "output.html", |
| "output.json", |
| "pred.md", |
| "pred.html", |
| "pred.json", |
| "response.json", |
| "response.md", |
| "response.html", |
| "table.html", |
| "table.md", |
| "table.json", |
| "ocr_result.json", |
| "ocr_result.md", |
| "ocr_result.html", |
| "result.txt", |
| "output.txt", |
| ): |
| candidate_path = tmpdir_path / candidate |
| if candidate_path.exists(): |
| extracted = _read_monkeyocr_output_artifact(candidate_path) |
| if normalize_text(extracted): |
| return extracted |
| if output_glob: |
| for pattern in [piece.strip() for piece in output_glob.split(",") if piece.strip()]: |
| for candidate_path in sorted(tmpdir_path.glob(pattern)): |
| if candidate_path.is_file(): |
| if candidate_path.name in {"prompt.txt", image_path.name}: |
| continue |
| extracted = _read_monkeyocr_output_artifact(candidate_path) |
| if normalize_text(extracted): |
| return extracted |
| for candidate_path in ( |
| sorted(tmpdir_path.glob("**/*.html")) |
| + sorted(tmpdir_path.glob("**/*.md")) |
| + sorted(tmpdir_path.glob("**/*.json")) |
| + sorted(tmpdir_path.glob("**/*.txt")) |
| ): |
| if candidate_path.is_file(): |
| if candidate_path.name in {"prompt.txt", image_path.name}: |
| continue |
| extracted = _read_monkeyocr_output_artifact(candidate_path) |
| if normalize_text(extracted): |
| return extracted |
| stdout_text = _coerce_monkeyocr_output_text(completed.stdout) |
| if stdout_text: |
| return stdout_text |
| raise RuntimeError( |
| "MonkeyOCR command completed but did not produce a readable OCR output file. " |
| "Set MONKEYOCR_RUN_COMMAND if your checkout uses a nonstandard entrypoint, output path, or task flags." |
| ) |
|
|
|
|
| def is_firered_model_id(model_id: str) -> bool: |
| normalized = normalize_text(model_id).lower() |
| if not normalized: |
| return False |
| configured = default_firered_model_id().lower() |
| return normalized == configured or "firered" in normalized |
|
|
|
|
| def is_deepseek_model_id(model_id: str) -> bool: |
| normalized = normalize_text(model_id).lower() |
| if not normalized: |
| return False |
| configured = default_deepseek_ocr_model_id().lower() |
| return ( |
| normalized == configured |
| or normalized.startswith("deepseek-ai/") |
| or "deepseek-ocr" in normalized |
| ) |
|
|
|
|
| def is_mistral_model_id(model_id: str) -> bool: |
| normalized = normalize_text(model_id).lower() |
| if not normalized: |
| return False |
| configured = default_mistral_ocr_model_id().lower() |
| return ( |
| normalized == configured |
| or normalized.startswith("mistral-ocr") |
| or normalized.startswith("ocr-") |
| or ("mistral" in normalized and "ocr" in normalized) |
| ) |
|
|
|
|
| def is_qianfan_model_id(model_id: str) -> bool: |
| normalized = normalize_text(model_id).lower() |
| if not normalized: |
| return False |
| configured = default_qianfan_ocr_model_id().lower() |
| return ( |
| normalized == configured |
| or normalized.startswith("baidu/qianfan") |
| or normalized in {"qianfan", "qianfan-ocr"} |
| or "qianfan-ocr" in normalized |
| ) |
|
|
|
|
| def is_glm_ocr_model_id(model_id: str) -> bool: |
| normalized = normalize_text(model_id).lower() |
| if not normalized: |
| return False |
| configured = default_glm_ocr_model_id().lower() |
| return ( |
| normalized == configured |
| or normalized.startswith("zai-org/glm-ocr") |
| or normalized in {"glm-ocr", "glmocr"} |
| or normalized.endswith("/glm-ocr") |
| ) |
|
|
|
|
| def is_paddleocr_vl_model_id(model_id: str) -> bool: |
| normalized = normalize_text(model_id).lower() |
| if not normalized: |
| return False |
| configured = default_paddleocr_vl_model_id().lower() |
| return ( |
| normalized == configured |
| or normalized.startswith("paddlepaddle/paddleocr-vl") |
| or normalized in {"paddleocr-vl", "paddleocr-vl-1.5", "paddleocr_vl"} |
| or "paddleocr-vl" in normalized |
| ) |
|
|
|
|
| def is_got_ocr_model_id(model_id: str) -> bool: |
| normalized = normalize_text(model_id).lower() |
| if not normalized: |
| return False |
| configured = default_got_ocr_model_id().lower() |
| return ( |
| normalized == configured |
| or normalized.startswith("stepfun-ai/got-ocr") |
| or normalized in {"got-ocr", "got-ocr2", "got-ocr2_0", "gotocr", "gotocr2"} |
| or "got-ocr" in normalized |
| or "got_ocr" in normalized |
| ) |
|
|
|
|
| def is_monkeyocr_model_id(model_id: str) -> bool: |
| normalized = normalize_text(model_id).lower() |
| if not normalized: |
| return False |
| configured = default_monkeyocr_model_id().lower() |
| return ( |
| normalized == configured |
| or normalized.startswith("echo840/monkeyocr") |
| or normalized in {"monkeyocr", "monkeyocr-pro-3b", "monkeyocr_pro_3b"} |
| or "monkeyocr" in normalized |
| ) |
|
|
|
|
| def is_qwen_ocr_model_id(model_id: str) -> bool: |
| normalized = normalize_text(model_id).lower() |
| if not normalized: |
| return False |
| configured = default_qwen_ocr_model_id().lower() |
| return ( |
| normalized == configured |
| or normalized.startswith("qwen/qwen3.5-35b-a3b") |
| or normalized.startswith("qwen/qwen3.6-35b-a3b") |
| or normalized in { |
| "qwen_ocr", |
| "qwen-ocr", |
| "qwenocr", |
| "qwen3.5-35b-a3b", |
| "qwen3.5-35b-a3b-fp8", |
| "qwen3.6-35b-a3b", |
| "qwen3.6-35b-a3b-fp8", |
| } |
| or "qwen3.5-35b-a3b" in normalized |
| or "qwen3.6-35b-a3b" in normalized |
| ) |
|
|
|
|
| def is_gemma4_vllm_model_id(model_id: str) -> bool: |
| normalized = normalize_text(model_id).lower().replace("_", "-") |
| if not normalized: |
| return False |
| configured = default_gemma4_vllm_model_id().lower().replace("_", "-") |
| if normalized == configured: |
| return True |
| if "llamacpp" in normalized or "llama-cpp" in normalized or "llama.cpp" in normalized or "gguf" in normalized: |
| return False |
| return ( |
| normalized.startswith("google/gemma-4-") |
| or normalized.startswith("gemma-4-") |
| or normalized.startswith("gemma4-") |
| or normalized in {"gemma4-vllm", "gemma-4-vllm"} |
| ) |
|
|
|
|
| def is_gemma4_hf_model_id(model_id: str) -> bool: |
| normalized = normalize_text(model_id).lower().replace("_", "-") |
| if not normalized: |
| return False |
| if "vllm" in normalized or "llamacpp" in normalized or "llama-cpp" in normalized or "llama.cpp" in normalized or "gguf" in normalized: |
| return False |
| configured = default_gemma4_hf_model_id().lower().replace("_", "-") |
| if normalized == configured: |
| return True |
| return ( |
| normalized in { |
| "gemma4", |
| "gemma-4", |
| "gemma4-hf", |
| "gemma-4-hf", |
| "gemma4-local", |
| "gemma-4-local", |
| "gemma4-huggingface", |
| "gemma-4-huggingface", |
| "gemma4-transformers", |
| "gemma-4-transformers", |
| } |
| or normalized.startswith("google/gemma-4-") |
| or normalized.startswith("gemma-4-") |
| or normalized.startswith("gemma4-") |
| ) |
|
|
|
|
| def is_gemma4_llamacpp_model_id(model_id: str) -> bool: |
| normalized = normalize_text(model_id).lower().replace("_", "-") |
| if not normalized: |
| return False |
| configured = default_gemma4_llamacpp_model_id().lower().replace("_", "-") |
| if normalized == configured: |
| return True |
| if normalized in { |
| "gemma4", |
| "gemma-4", |
| "gemma4-26b", |
| "gemma-4-26b", |
| "gemma4-26b-it", |
| "gemma-4-26b-it", |
| "gemma4-llamacpp", |
| "gemma-4-llamacpp", |
| "gemma4-26b-llamacpp", |
| "gemma-4-26b-llamacpp", |
| "gemma4-26b-llama-cpp", |
| "gemma-4-26b-llama-cpp", |
| "gemma4-llama-cpp", |
| "gemma-4-llama-cpp", |
| "llamacpp-gemma4-26b", |
| "llama-cpp-gemma4-26b", |
| "llamacpp-gemma4", |
| "llama-cpp-gemma4", |
| }: |
| return True |
| return ("gemma4" in normalized or "gemma-4" in normalized) and ( |
| "llamacpp" in normalized |
| or "llama-cpp" in normalized |
| or "llama.cpp" in normalized |
| or "gguf" in normalized |
| ) |
|
|
|
|
| def resolve_table_ocr_backend(model_id: str) -> str: |
| normalized = normalize_text(model_id) |
| if is_firered_model_id(normalized): |
| return "firered" |
| if is_deepseek_model_id(normalized): |
| return "deepseek" |
| if is_mistral_model_id(normalized): |
| return "mistral" |
| if is_qianfan_model_id(normalized): |
| return "qianfan" |
| if is_glm_ocr_model_id(normalized): |
| return "glm_ocr" |
| if is_paddleocr_vl_model_id(normalized): |
| return "paddleocr_vl" |
| if is_got_ocr_model_id(normalized): |
| return "got_ocr" |
| if is_monkeyocr_model_id(normalized): |
| return "monkeyocr" |
| if is_qwen_ocr_model_id(normalized): |
| return "qwen_ocr" |
| if is_gemma4_hf_model_id(normalized): |
| return "gemma4_hf" |
| if is_gemma4_vllm_model_id(normalized): |
| return "gemma4_vllm" |
| if is_gemma4_llamacpp_model_id(normalized): |
| return "gemma4_llamacpp" |
| if normalized in openrouter_ocr_model_ids(): |
| return "openrouter" |
| return "openrouter" |
|
|
|
|
| def should_use_openrouter_ocr(model_id: str) -> bool: |
| return resolve_table_ocr_backend(model_id) == "openrouter" |
|
|
|
|
| def _resolve_deepseek_ocr_model_source(model_id: str) -> str: |
| local_dir = _deepseek_ocr_local_dir() |
| if not local_dir: |
| return model_id |
|
|
| local_path = Path(local_dir).expanduser() |
| if str(local_path).startswith("/content") and not Path("/content").exists(): |
| raise RuntimeError( |
| f"DEEPSEEK_OCR_MODEL_LOCAL_DIR points to '{local_dir}', which looks like a Colab path, " |
| "but this Python process is running outside Colab." |
| ) |
| if not local_path.exists(): |
| raise RuntimeError( |
| f"DEEPSEEK_OCR_MODEL_LOCAL_DIR='{local_dir}' does not exist." |
| ) |
| if not local_path.is_dir(): |
| raise RuntimeError(f"DEEPSEEK_OCR_MODEL_LOCAL_DIR='{local_dir}' is not a directory.") |
| if not (local_path / "config.json").exists(): |
| raise RuntimeError( |
| f"DEEPSEEK_OCR_MODEL_LOCAL_DIR='{local_dir}' does not look like a downloaded model directory " |
| "(missing config.json)." |
| ) |
| return str(local_path) |
|
|
|
|
| def _select_deepseek_ocr_runtime(torch): |
| requested = _deepseek_ocr_device() |
| mps_available = bool(getattr(torch.backends, "mps", None)) and torch.backends.mps.is_available() |
|
|
| if requested not in {"auto", "cuda", "mps", "cpu"}: |
| raise RuntimeError( |
| f"Unsupported DEEPSEEK_OCR_DEVICE='{requested}'. Expected one of: auto, cuda, mps, cpu." |
| ) |
|
|
| if requested in {"auto", "cuda"} and torch.cuda.is_available(): |
| return "cuda", torch.bfloat16, True |
| if requested == "cuda": |
| raise RuntimeError("DEEPSEEK_OCR_DEVICE='cuda' was requested, but CUDA is not available.") |
|
|
| if requested in {"auto", "mps"} and mps_available: |
| return "mps", torch.float16, False |
| if requested == "mps": |
| raise RuntimeError("DEEPSEEK_OCR_DEVICE='mps' was requested, but MPS is not available.") |
|
|
| return "cpu", torch.float32, False |
|
|
|
|
| def _prepare_deepseek_ocr_image(image, runtime_device: str): |
| if not _table_ocr_allow_backend_image_resize(): |
| return image |
| max_pixels = _deepseek_ocr_max_image_pixels() |
| width, height = image.size |
| total_pixels = width * height |
| if max_pixels <= 0 or total_pixels <= max_pixels: |
| return image |
|
|
| scale = math.sqrt(max_pixels / total_pixels) |
| new_width = max(28, int(width * scale)) |
| new_height = max(28, int(height * scale)) |
| emit_progress( |
| f"Resizing DeepSeek OCR image for {runtime_device} from {width}x{height} to " |
| f"{new_width}x{new_height} to stay within {max_pixels:,} pixels." |
| ) |
| return image.resize((new_width, new_height)) |
|
|
|
|
| def _build_deepseek_prompt_text(processor, prompt_text: str) -> str: |
| image_token = getattr(processor, "image_token", None) |
| if image_token is None: |
| image_token = getattr(getattr(processor, "tokenizer", None), "image_token", None) |
| if image_token: |
| return f"{image_token}\n{prompt_text}" |
| return prompt_text |
|
|
|
|
| def _build_deepseek_infer_prompt(prompt_text: str) -> str: |
| normalized = str(prompt_text or "").strip() |
| if not normalized: |
| normalized = default_table_image_transcription_prompt() |
| return normalized |
|
|
|
|
| def _build_deepseek_infer_prompt_candidates(prompt_text: str) -> List[tuple[str, str]]: |
| normalized = _build_deepseek_infer_prompt(prompt_text) |
| default_prompt = normalize_text(default_table_image_transcription_prompt()) |
| candidates: List[tuple[str, str]] = [] |
| seen: set[str] = set() |
|
|
| def add_candidate(label: str, candidate_prompt: str) -> None: |
| candidate_text = normalize_text(candidate_prompt) |
| if not candidate_text or candidate_text in seen: |
| return |
| seen.add(candidate_text) |
| candidates.append((label, candidate_text)) |
|
|
| |
| |
| if normalize_text(normalized) == default_prompt: |
| add_candidate("grounding markdown prompt", "<image>\n<|grounding|>Convert the document to markdown. ") |
| add_candidate("free OCR prompt", "<image>\nFree OCR. ") |
| add_candidate("plain markdown prompt", "<image>\nConvert the document to markdown. ") |
| add_candidate("caller HTML prompt with image token", f"<image>\n{normalized}") |
| else: |
| add_candidate("caller prompt with image token", f"<image>\n{normalized}") |
| add_candidate("grounding markdown prompt", "<image>\n<|grounding|>Convert the document to markdown. ") |
| add_candidate("free OCR prompt", "<image>\nFree OCR. ") |
|
|
| return candidates |
|
|
|
|
| def _require_nonempty_ocr_text_output( |
| text: str, |
| *, |
| backend_label: str, |
| details: Optional[Dict[str, Any]] = None, |
| ) -> str: |
| cleaned = strip_code_fences(text) |
| if cleaned and _HTML_TABLE_RE.search(cleaned): |
| cleaned = normalize_inline_latex_in_html_fragment(cleaned) |
| elif cleaned: |
| latex_html = maybe_convert_latex_tabular_to_html(cleaned) |
| if latex_html: |
| cleaned = latex_html |
| if cleaned: |
| return cleaned |
| raise TableOCRContentError( |
| f"OCR backend returned empty text content via {backend_label}", |
| details=details or {"backend": backend_label}, |
| ) |
|
|
|
|
| def _resize_image_to_max_pixels(img, max_pixels: int): |
| if max_pixels <= 0: |
| return img |
| width, height = img.size |
| total_pixels = width * height |
| if total_pixels <= max_pixels: |
| return img |
| scale = math.sqrt(float(max_pixels) / float(total_pixels)) |
| new_width = max(28, int(width * scale)) |
| new_height = max(28, int(height * scale)) |
| return img.resize((new_width, new_height)) |
|
|
|
|
| def _is_firered_buffer_error(exc: Exception) -> bool: |
| message = normalize_text(str(exc)).lower() |
| return ( |
| "invalid buffer size" in message |
| or "out of memory" in message |
| or ("mps" in message and "buffer" in message) |
| ) |
|
|
|
|
| def _build_deepseek_inputs(processor, page_image, prompt_text: str): |
| messages = [ |
| { |
| "role": "user", |
| "content": [ |
| {"type": "image", "image": page_image}, |
| {"type": "text", "text": prompt_text}, |
| ], |
| } |
| ] |
|
|
| if hasattr(processor, "apply_chat_template"): |
| try: |
| chat_text = processor.apply_chat_template( |
| messages, |
| tokenize=False, |
| add_generation_prompt=True, |
| ) |
| return processor(text=[chat_text], images=[page_image], return_tensors="pt") |
| except Exception: |
| pass |
|
|
| fallback_prompt = _build_deepseek_prompt_text(processor, prompt_text) |
| try: |
| return processor(text=[fallback_prompt], images=[page_image], return_tensors="pt") |
| except Exception: |
| return processor(text=fallback_prompt, images=page_image, return_tensors="pt") |
|
|
|
|
| def _normalize_deepseek_chat_output(result: Any) -> str: |
| if isinstance(result, str): |
| return result |
| if isinstance(result, tuple) and result: |
| first = result[0] |
| if isinstance(first, str): |
| return first |
| if isinstance(result, dict): |
| for key in ("text", "response", "content", "answer", "output"): |
| value = result.get(key) |
| if isinstance(value, str): |
| return value |
| raise RuntimeError(f"DeepSeek chat fallback returned an unsupported result type: {type(result).__name__}") |
|
|
|
|
| def _processor_is_tokenizer_like(processor: Any) -> bool: |
| class_name = type(processor).__name__.lower() |
| if "tokenizer" in class_name: |
| return True |
| try: |
| from transformers import PreTrainedTokenizerBase |
|
|
| return isinstance(processor, PreTrainedTokenizerBase) |
| except Exception: |
| return False |
|
|
|
|
| def _model_supports_image_aware_fallback(model_obj: Any) -> bool: |
| if callable(getattr(model_obj, "chat", None)): |
| return True |
|
|
| for attr_name in ("generate", "forward"): |
| method = getattr(model_obj, attr_name, None) |
| if not callable(method): |
| continue |
| try: |
| param_names = set(inspect.signature(method).parameters) |
| except Exception: |
| continue |
| if {"images", "image"} & param_names: |
| return True |
| if {"pixel_values", "image_tensors", "input_images"} & param_names: |
| return True |
| return False |
|
|
|
|
| def _decode_deepseek_generated_output(processor: Any, generated_ids: Any, input_ids: Any = None) -> str: |
| trimmed_ids = generated_ids |
| if input_ids is not None: |
| try: |
| trimmed_ids = generated_ids[:, input_ids.shape[1] :] |
| except Exception: |
| trimmed_ids = generated_ids |
|
|
| if hasattr(processor, "batch_decode"): |
| return processor.batch_decode( |
| trimmed_ids, |
| skip_special_tokens=True, |
| clean_up_tokenization_spaces=False, |
| )[0] |
| if hasattr(processor, "tokenizer") and hasattr(processor.tokenizer, "batch_decode"): |
| return processor.tokenizer.batch_decode( |
| trimmed_ids, |
| skip_special_tokens=True, |
| clean_up_tokenization_spaces=False, |
| )[0] |
| raise RuntimeError("DeepSeek fallback could not find a decode path on the loaded processor/tokenizer.") |
|
|
|
|
| def _maybe_build_pixel_values(model_obj: Any, page_image, model_device: Any): |
| candidates = [] |
| image_processor = getattr(model_obj, "image_processor", None) |
| if image_processor is not None: |
| candidates.append(image_processor) |
| vision_tower = getattr(model_obj, "vision_tower", None) |
| if vision_tower is not None: |
| nested_processor = getattr(vision_tower, "image_processor", None) |
| if nested_processor is not None: |
| candidates.append(nested_processor) |
|
|
| for candidate in candidates: |
| try: |
| if hasattr(candidate, "preprocess"): |
| outputs = candidate.preprocess(page_image, return_tensors="pt") |
| elif callable(candidate): |
| outputs = candidate(page_image, return_tensors="pt") |
| else: |
| continue |
| if isinstance(outputs, dict): |
| pixel_values = outputs.get("pixel_values") |
| else: |
| pixel_values = getattr(outputs, "pixel_values", None) |
| if pixel_values is None: |
| continue |
| return pixel_values.to(model_device) |
| except Exception: |
| continue |
| return None |
|
|
|
|
| def _run_deepseek_generate_fallback(model_obj, processor, page_image, prompt_text: str, model_device: Any) -> str: |
| generate_fn = getattr(model_obj, "generate", None) |
| if not callable(generate_fn): |
| raise RuntimeError("DeepSeek-OCR-2 did not expose a callable `generate` fallback.") |
|
|
| prompt_with_image = _build_deepseek_prompt_text(processor, prompt_text) |
| prompt_for_tokenizer = prompt_with_image |
| if hasattr(processor, "apply_chat_template"): |
| try: |
| prompt_for_tokenizer = processor.apply_chat_template( |
| [ |
| { |
| "role": "user", |
| "content": [ |
| {"type": "image", "image": page_image}, |
| {"type": "text", "text": prompt_text}, |
| ], |
| } |
| ], |
| tokenize=False, |
| add_generation_prompt=True, |
| ) |
| except Exception: |
| prompt_for_tokenizer = prompt_with_image |
| try: |
| text_inputs = processor(text=[prompt_for_tokenizer], return_tensors="pt") |
| except Exception: |
| try: |
| text_inputs = processor([prompt_for_tokenizer], return_tensors="pt") |
| except Exception: |
| text_inputs = processor(prompt_for_tokenizer, return_tensors="pt") |
|
|
| text_inputs = text_inputs.to(model_device) |
| input_ids = getattr(text_inputs, "input_ids", None) |
| if input_ids is None and isinstance(text_inputs, dict): |
| input_ids = text_inputs.get("input_ids") |
|
|
| candidate_image_kwargs: List[tuple[str, Dict[str, Any]]] = [ |
| ("generate(..., images=[[PIL]])", {"images": [[page_image]]}), |
| ("generate(..., images=[PIL])", {"images": [page_image]}), |
| ("generate(..., images=PIL)", {"images": page_image}), |
| ("generate(..., image=PIL)", {"image": page_image}), |
| ("generate(..., input_images=[PIL])", {"input_images": [page_image]}), |
| ] |
|
|
| pixel_values = _maybe_build_pixel_values(model_obj, page_image, model_device) |
| if pixel_values is not None: |
| candidate_image_kwargs.append(("generate(..., pixel_values=tensor)", {"pixel_values": pixel_values})) |
|
|
| errors: List[str] = [] |
| for label, extra_kwargs in candidate_image_kwargs: |
| try: |
| emit_progress(f"Trying DeepSeek generate fallback via {label}") |
| generated_ids = generate_fn( |
| **text_inputs, |
| **extra_kwargs, |
| max_new_tokens=_deepseek_ocr_max_new_tokens(), |
| do_sample=False, |
| ) |
| return _decode_deepseek_generated_output(processor, generated_ids, input_ids=input_ids) |
| except Exception as exc: |
| errors.append(f"{label}: {exc}") |
|
|
| prepare_inputs_embeds_fn = getattr(model_obj, "prepare_inputs_embeds", None) |
| language_model = getattr(model_obj, "language", None) |
| language_generate_fn = getattr(language_model, "generate", None) if language_model is not None else None |
| attention_mask = getattr(text_inputs, "attention_mask", None) |
| if attention_mask is None and isinstance(text_inputs, dict): |
| attention_mask = text_inputs.get("attention_mask") |
| if callable(prepare_inputs_embeds_fn): |
| embed_attempts: List[tuple[str, Dict[str, Any]]] = [ |
| ("prepare_inputs_embeds(..., images=[[PIL]])", {"images": [[page_image]]}), |
| ("prepare_inputs_embeds(..., images=[PIL])", {"images": [page_image]}), |
| ] |
| for label, extra_kwargs in embed_attempts: |
| try: |
| emit_progress(f"Trying DeepSeek embedding fallback via {label}") |
| inputs_embeds = prepare_inputs_embeds_fn( |
| input_ids=input_ids, |
| **extra_kwargs, |
| ) |
| if callable(language_generate_fn): |
| generated_ids = language_generate_fn( |
| inputs_embeds=inputs_embeds, |
| attention_mask=attention_mask, |
| max_new_tokens=_deepseek_ocr_max_new_tokens(), |
| do_sample=False, |
| ) |
| else: |
| generated_ids = generate_fn( |
| inputs_embeds=inputs_embeds, |
| attention_mask=attention_mask, |
| max_new_tokens=_deepseek_ocr_max_new_tokens(), |
| do_sample=False, |
| ) |
| return _decode_deepseek_generated_output(processor, generated_ids, input_ids=None) |
| except Exception as exc: |
| errors.append(f"{label}: {exc}") |
|
|
| raise RuntimeError(" ; ".join(errors)) |
|
|
|
|
| def _run_deepseek_infer_fallback( |
| model_obj, |
| processor, |
| page_image, |
| prompt_text: str, |
| runtime_device: str, |
| ) -> str: |
| infer_fn = getattr(model_obj, "infer", None) |
| if not callable(infer_fn): |
| raise RuntimeError("DeepSeek-OCR-2 did not expose a callable `infer` helper.") |
| if runtime_device != "cuda": |
| raise RuntimeError( |
| f"DeepSeek-OCR-2 remote-code infer helper currently requires CUDA, but runtime device is {runtime_device}." |
| ) |
|
|
| signature = inspect.signature(infer_fn) |
| param_names = set(signature.parameters) |
|
|
| with TemporaryDirectory(prefix="deepseek_ocr_") as tmpdir: |
| image_path = Path(tmpdir) / "page.png" |
| result_mmd_path = Path(tmpdir) / "result.mmd" |
| page_image.convert("RGB").save(image_path) |
|
|
| infer_kwargs_base: Dict[str, Any] = { |
| "tokenizer": processor, |
| "image_file": str(image_path), |
| "output_path": tmpdir, |
| "eval_mode": True, |
| } |
| if "save_results" in param_names: |
| infer_kwargs_base["save_results"] = False |
| if "test_compress" in param_names: |
| infer_kwargs_base["test_compress"] = False |
| if "crop_mode" in param_names: |
| infer_kwargs_base["crop_mode"] = True |
| if "base_size" in param_names: |
| infer_kwargs_base["base_size"] = 1024 |
| if "image_size" in param_names: |
| infer_kwargs_base["image_size"] = 768 |
|
|
| errors: List[str] = [] |
| emit_progress("Trying DeepSeek remote-code infer(...) fallback") |
| for attempt_label, infer_prompt in _build_deepseek_infer_prompt_candidates(prompt_text): |
| emit_progress(f"Trying DeepSeek infer prompt variant: {attempt_label}") |
|
|
| try: |
| infer_kwargs = dict(infer_kwargs_base) |
| infer_kwargs["prompt"] = infer_prompt |
| infer_kwargs["eval_mode"] = True |
| result = infer_fn(**{key: value for key, value in infer_kwargs.items() if key in param_names}) |
| normalized_result = "" |
| if result is not None: |
| normalized_result = strip_code_fences(_normalize_deepseek_chat_output(result)) |
| if normalized_result: |
| return normalized_result |
| except Exception as exc: |
| errors.append(f"{attempt_label} eval_mode=True: {exc}") |
| continue |
|
|
| if "save_results" not in param_names: |
| errors.append(f"{attempt_label} eval_mode=True: empty text output") |
| continue |
|
|
| try: |
| if result_mmd_path.exists(): |
| result_mmd_path.unlink() |
| infer_kwargs = dict(infer_kwargs_base) |
| infer_kwargs["prompt"] = infer_prompt |
| infer_kwargs["eval_mode"] = False |
| infer_kwargs["save_results"] = True |
| with contextlib.redirect_stdout(io.StringIO()): |
| result = infer_fn(**{key: value for key, value in infer_kwargs.items() if key in param_names}) |
| normalized_result = "" |
| if result is not None: |
| normalized_result = strip_code_fences(_normalize_deepseek_chat_output(result)) |
| if not normalized_result and result_mmd_path.exists(): |
| normalized_result = strip_code_fences(result_mmd_path.read_text(encoding="utf-8")) |
| if normalized_result: |
| return normalized_result |
| errors.append(f"{attempt_label} save_results=True: empty text output") |
| except Exception as exc: |
| errors.append(f"{attempt_label} save_results=True: {exc}") |
|
|
| raise RuntimeError(" ; ".join(errors)) |
|
|
|
|
| def _deepseek_ocr_version_guidance_message() -> str: |
| return ( |
| "DeepSeek-OCR-2 remote code expects the Hugging Face stack from the model card, " |
| "especially `transformers==4.46.3` and `tokenizers==0.20.3`. " |
| "In Colab, reinstall those versions, restart the runtime, and reload the model." |
| ) |
|
|
|
|
| def _run_deepseek_chat_fallback(model_obj, processor, page_image, prompt_text: str) -> str: |
| chat_fn = getattr(model_obj, "chat", None) |
| if not callable(chat_fn): |
| raise RuntimeError( |
| "DeepSeek-OCR-2 loaded a tokenizer-like processor instead of a multimodal processor, " |
| "and the model does not expose a callable `chat` fallback." |
| ) |
|
|
| signature = inspect.signature(chat_fn) |
| param_names = set(signature.parameters) |
| max_new_tokens = _deepseek_ocr_max_new_tokens() |
|
|
| call_attempts: List[tuple[str, Dict[str, Any]]] = [] |
| if {"image", "msgs", "tokenizer"}.issubset(param_names): |
| kwargs: Dict[str, Any] = { |
| "image": page_image, |
| "msgs": [{"role": "user", "content": prompt_text}], |
| "tokenizer": processor, |
| } |
| if "sampling" in param_names: |
| kwargs["sampling"] = False |
| if "do_sample" in param_names: |
| kwargs["do_sample"] = False |
| if "temperature" in param_names: |
| kwargs["temperature"] = 0.0 |
| if "max_new_tokens" in param_names: |
| kwargs["max_new_tokens"] = max_new_tokens |
| if "ocr_type" in param_names: |
| kwargs["ocr_type"] = "format" |
| call_attempts.append(("chat(image=..., msgs=..., tokenizer=...)", kwargs)) |
|
|
| if {"tokenizer", "query", "image"}.issubset(param_names): |
| kwargs = { |
| "tokenizer": processor, |
| "query": prompt_text, |
| "image": page_image, |
| } |
| if "max_new_tokens" in param_names: |
| kwargs["max_new_tokens"] = max_new_tokens |
| if "temperature" in param_names: |
| kwargs["temperature"] = 0.0 |
| call_attempts.append(("chat(tokenizer=..., query=..., image=...)", kwargs)) |
|
|
| if {"tokenizer", "prompt", "image"}.issubset(param_names): |
| kwargs = { |
| "tokenizer": processor, |
| "prompt": prompt_text, |
| "image": page_image, |
| } |
| if "max_new_tokens" in param_names: |
| kwargs["max_new_tokens"] = max_new_tokens |
| if "temperature" in param_names: |
| kwargs["temperature"] = 0.0 |
| call_attempts.append(("chat(tokenizer=..., prompt=..., image=...)", kwargs)) |
|
|
| if {"tokenizer", "text", "image"}.issubset(param_names): |
| kwargs = { |
| "tokenizer": processor, |
| "text": prompt_text, |
| "image": page_image, |
| } |
| if "max_new_tokens" in param_names: |
| kwargs["max_new_tokens"] = max_new_tokens |
| if "temperature" in param_names: |
| kwargs["temperature"] = 0.0 |
| call_attempts.append(("chat(tokenizer=..., text=..., image=...)", kwargs)) |
|
|
| if not call_attempts: |
| raise RuntimeError( |
| "DeepSeek-OCR-2 exposed a `chat` method, but this backend does not yet recognize its signature: " |
| f"{signature}" |
| ) |
|
|
| errors: List[str] = [] |
| for label, kwargs in call_attempts: |
| try: |
| emit_progress(f"Trying DeepSeek chat fallback via {label}") |
| return _normalize_deepseek_chat_output(chat_fn(**kwargs)) |
| except Exception as exc: |
| errors.append(f"{label}: {exc}") |
|
|
| raise RuntimeError(" ; ".join(errors)) |
|
|
|
|
| def _patch_missing_llama_flash_attention2() -> None: |
| try: |
| import transformers.models.llama.modeling_llama as llama_modeling |
| except Exception: |
| return |
|
|
| if hasattr(llama_modeling, "LlamaFlashAttention2"): |
| return |
|
|
| replacement = getattr(llama_modeling, "LlamaSdpaAttention", None) |
| if replacement is None: |
| replacement = getattr(llama_modeling, "LlamaAttention", None) |
| if replacement is None: |
| return |
|
|
| setattr(llama_modeling, "LlamaFlashAttention2", replacement) |
| emit_progress( |
| "Applied compatibility shim: aliased transformers.models.llama.modeling_llama." |
| "LlamaFlashAttention2 to an available Llama attention implementation." |
| ) |
|
|
|
|
| def _get_deepseek_table_backend(model_id: str): |
| global _DEEPSEEK_OCR_MODEL, _DEEPSEEK_OCR_PROCESSOR, _DEEPSEEK_OCR_MODEL_ID, _DEEPSEEK_OCR_LOAD_ERROR |
|
|
| if ( |
| _DEEPSEEK_OCR_MODEL is not None |
| and _DEEPSEEK_OCR_PROCESSOR is not None |
| and _DEEPSEEK_OCR_MODEL_ID == model_id |
| ): |
| return _DEEPSEEK_OCR_MODEL, _DEEPSEEK_OCR_PROCESSOR |
|
|
| try: |
| import torch |
| except ImportError as exc: |
| raise RuntimeError("PyTorch is required for DeepSeek OCR transcription. Install `torch` first.") from exc |
| try: |
| import torchvision |
| except ImportError as exc: |
| raise RuntimeError( |
| "DeepSeek OCR transcription requires `torchvision`. Install `torchvision` and restart Python." |
| ) from exc |
|
|
| try: |
| from transformers import AutoConfig, AutoModelForCausalLM, AutoProcessor |
| except ImportError as exc: |
| raise RuntimeError( |
| "DeepSeek OCR transcription requires a recent Hugging Face stack. " |
| "Install or upgrade `accelerate` and `safetensors`, then install the latest " |
| "`transformers` build (for example `pip install -U git+https://github.com/huggingface/transformers`)." |
| ) from exc |
|
|
| _patch_missing_llama_flash_attention2() |
|
|
| try: |
| from transformers import AutoModel |
| except ImportError: |
| AutoModel = None |
|
|
| try: |
| from transformers import AutoModelForImageTextToText |
| except ImportError: |
| AutoModelForImageTextToText = None |
|
|
| try: |
| from transformers import AutoModelForVision2Seq |
| except ImportError: |
| AutoModelForVision2Seq = None |
|
|
| try: |
| from transformers.models.deepseek_vl import DeepseekVLForConditionalGeneration |
| except ImportError: |
| DeepseekVLForConditionalGeneration = None |
|
|
| try: |
| from transformers.models.deepseek_vl_hybrid import DeepseekVLHybridForConditionalGeneration |
| except ImportError: |
| DeepseekVLHybridForConditionalGeneration = None |
|
|
| runtime_device, dtype, use_device_map = _select_deepseek_ocr_runtime(torch) |
| try: |
| model_source = _resolve_deepseek_ocr_model_source(model_id) |
| common_kwargs: Dict[str, Any] = { |
| "trust_remote_code": _deepseek_ocr_trust_remote_code(), |
| } |
| cache_dir = _deepseek_ocr_cache_dir() |
| revision = _deepseek_ocr_revision() |
| if cache_dir: |
| common_kwargs["cache_dir"] = cache_dir |
| if revision and not _deepseek_ocr_local_dir(): |
| common_kwargs["revision"] = revision |
| if _deepseek_ocr_local_files_only(): |
| common_kwargs["local_files_only"] = True |
|
|
| with _known_model_load_warnings_context(env_prefix="DEEPSEEK_OCR"): |
| config = AutoConfig.from_pretrained(model_source, **common_kwargs) |
| processor = AutoProcessor.from_pretrained(model_source, **common_kwargs) |
| processor_is_tokenizer_like = _processor_is_tokenizer_like(processor) |
| model_type = normalize_text(getattr(config, "model_type", "")) |
|
|
| load_kwargs = dict(common_kwargs) |
| load_kwargs["dtype"] = dtype |
| if use_device_map: |
| load_kwargs["device_map"] = _deepseek_ocr_device_map() |
| else: |
| load_kwargs["low_cpu_mem_usage"] = True |
|
|
| loader_candidates = [] |
| if model_type == "deepseek_vl_hybrid" and DeepseekVLHybridForConditionalGeneration is not None: |
| loader_candidates.append( |
| ("DeepseekVLHybridForConditionalGeneration", DeepseekVLHybridForConditionalGeneration.from_pretrained) |
| ) |
| elif model_type == "deepseek_vl" and DeepseekVLForConditionalGeneration is not None: |
| loader_candidates.append( |
| ("DeepseekVLForConditionalGeneration", DeepseekVLForConditionalGeneration.from_pretrained) |
| ) |
| if AutoModelForImageTextToText is not None: |
| loader_candidates.append(("AutoModelForImageTextToText", AutoModelForImageTextToText.from_pretrained)) |
| if AutoModelForVision2Seq is not None: |
| loader_candidates.append(("AutoModelForVision2Seq", AutoModelForVision2Seq.from_pretrained)) |
| if AutoModel is not None: |
| loader_candidates.append(("AutoModel", AutoModel.from_pretrained)) |
| loader_candidates.append(("AutoModelForCausalLM", AutoModelForCausalLM.from_pretrained)) |
|
|
| if _deepseek_ocr_local_dir(): |
| emit_progress(f"Loading DeepSeek OCR model from local dir '{model_source}' on {runtime_device}...") |
| elif cache_dir: |
| emit_progress( |
| f"Loading DeepSeek OCR model '{model_source}' using cache '{cache_dir}' on {runtime_device}..." |
| ) |
| else: |
| emit_progress(f"Loading DeepSeek OCR model '{model_source}' on {runtime_device}...") |
|
|
| model_errors: List[str] = [] |
| model_obj = None |
| for loader_name, loader in loader_candidates: |
| try: |
| with _known_model_load_warnings_context(env_prefix="DEEPSEEK_OCR"): |
| candidate_model = _call_from_pretrained_with_dtype_fallback( |
| loader, |
| model_source, |
| **load_kwargs, |
| ) |
| if processor_is_tokenizer_like and not _model_supports_image_aware_fallback(candidate_model): |
| model_errors.append( |
| f"{loader_name}: loaded successfully but does not expose an image-aware fallback " |
| "(`chat`, `generate(images=...)`, or `forward(images=...)`) while the processor is tokenizer-like" |
| ) |
| continue |
| model_obj = candidate_model |
| break |
| except Exception as exc: |
| model_errors.append(f"{loader_name}: {exc}") |
|
|
| if model_obj is None: |
| raise RuntimeError(" ; ".join(model_errors)) |
|
|
| if not use_device_map: |
| model_obj = model_obj.to(runtime_device) |
|
|
| if getattr(model_obj, "generation_config", None) is not None: |
| model_obj.generation_config.temperature = None |
| model_obj.generation_config.top_p = None |
| model_obj.generation_config.top_k = None |
| model_obj.eval() |
|
|
| _DEEPSEEK_OCR_MODEL = model_obj |
| _DEEPSEEK_OCR_PROCESSOR = processor |
| _DEEPSEEK_OCR_MODEL_ID = model_id |
| _DEEPSEEK_OCR_LOAD_ERROR = None |
| emit_progress("Loaded DeepSeek OCR model.") |
| return _DEEPSEEK_OCR_MODEL, _DEEPSEEK_OCR_PROCESSOR |
| except Exception as exc: |
| if "LlamaFlashAttention2" in str(exc): |
| exc = RuntimeError( |
| "DeepSeek-OCR-2 remote code is not compatible with the currently installed Transformers stack. " |
| f"{_deepseek_ocr_version_guidance_message()}" |
| ) |
| _DEEPSEEK_OCR_LOAD_ERROR = (model_id, str(exc)) |
| raise |
|
|
|
|
| def transcribe_table_image_with_openrouter( |
| image_data_uri: str, |
| *, |
| model_id: str, |
| prompt: Optional[str] = None, |
| reasoning_level: Optional[str] = None, |
| max_tokens: int = 32000, |
| max_attempts: Optional[int] = None, |
| progress_handler: ProgressHandler = None, |
| ) -> str: |
| attempt_plan = build_openrouter_attempt_plan( |
| model_id, |
| max_attempts=max_attempts or default_openrouter_ocr_max_attempts(), |
| ) |
| prompt_text = prompt or default_table_image_transcription_prompt() |
| last_response_error: Optional[RuntimeError] = None |
|
|
| for attempt_index, candidate_model in enumerate(attempt_plan, start=1): |
| progress_label = f"[ocr {candidate_model}]" |
| client = TableOCROpenRouterClient( |
| model_id=candidate_model, |
| reasoning_level=reasoning_level or os.getenv("OPENROUTER_REASONING_LEVEL", "low"), |
| ) |
| try: |
| request_started_at = time.perf_counter() |
| response = client.chat_completion( |
| messages=[ |
| { |
| "role": "user", |
| "content": [ |
| {"type": "text", "text": prompt_text}, |
| {"type": "image_url", "image_url": {"url": image_data_uri}}, |
| ], |
| } |
| ], |
| max_tokens=max_tokens, |
| temperature=0.0, |
| progress_label=f"{progress_label} | request {attempt_index}/{len(attempt_plan)}", |
| progress_handler=progress_handler, |
| ) |
| request_latency_s = time.perf_counter() - request_started_at |
| except TableOCROpenRouterResponseError as exc: |
| last_response_error = exc |
| if attempt_index < len(attempt_plan): |
| next_model = attempt_plan[attempt_index] |
| if next_model != candidate_model: |
| emit_progress( |
| f"[ocr {model_id}] | retrying with fallback model {next_model} after error: {exc}", |
| progress_handler=progress_handler, |
| ) |
| else: |
| emit_progress( |
| f"[ocr {model_id}] | retrying attempt {attempt_index + 1}/{len(attempt_plan)} " |
| f"with model {candidate_model} after error: {exc}", |
| progress_handler=progress_handler, |
| ) |
| continue |
| raise |
|
|
| summary = summarize_openrouter_chat_response(response) |
| summary["total_latency_s"] = max(0.0, float(request_latency_s)) |
| text_content = extract_text_from_openrouter_content(summary["content"]) |
| if text_content is None: |
| summary["thinking_latency_s"] = None |
| summary["first_content_latency_s"] = None |
| else: |
| summary["thinking_latency_s"] = estimate_openrouter_thinking_latency_s( |
| request_latency_s, |
| reasoning_tokens=summary["reasoning_tokens"], |
| completion_tokens=summary["completion_tokens"], |
| ) |
| emit_progress( |
| f"{progress_label} | finish_reason={summary['finish_reason'] or 'unknown'} | " |
| f"provider={summary['provider'] or 'unknown'} | " |
| f"tool_calls={summary['tool_call_count']} | " |
| f"content_type={summary['content_type']}", |
| progress_handler=progress_handler, |
| ) |
| if summary["thinking_latency_s"] is not None: |
| reasoning_tokens_label = ( |
| str(summary["reasoning_tokens"]) |
| if isinstance(summary["reasoning_tokens"], int) |
| else "unknown" |
| ) |
| completion_tokens_label = ( |
| str(summary["completion_tokens"]) |
| if isinstance(summary["completion_tokens"], int) |
| else "unknown" |
| ) |
| emit_progress( |
| f"{progress_label} | total latency={summary['total_latency_s']:.2f}s" |
| f" | estimated thinking latency={summary['thinking_latency_s']:.2f}s" |
| f" | reasoning_tokens={reasoning_tokens_label}" |
| f" | completion_tokens={completion_tokens_label}", |
| progress_handler=progress_handler, |
| ) |
|
|
| if text_content is None: |
| detail_bits = [ |
| f"finish_reason={summary['finish_reason'] or 'unknown'}", |
| f"provider={summary['provider'] or 'unknown'}", |
| f"tool_calls={summary['tool_call_count']}", |
| f"content_type={summary['content_type']}", |
| ] |
| if summary["refusal"]: |
| detail_bits.append(f"refusal={normalize_text(summary['refusal'])[:200]}") |
| exc = TableOCRContentError( |
| f"OCR response did not contain text content ({', '.join(detail_bits)})", |
| response_payload=response, |
| details=summary, |
| ) |
| last_response_error = exc |
| if attempt_index < len(attempt_plan): |
| next_model = attempt_plan[attempt_index] |
| if next_model != candidate_model: |
| emit_progress( |
| f"[ocr {model_id}] | retrying with fallback model {next_model} after content error: {exc}", |
| progress_handler=progress_handler, |
| ) |
| else: |
| emit_progress( |
| f"[ocr {model_id}] | retrying attempt {attempt_index + 1}/{len(attempt_plan)} " |
| f"with model {candidate_model} after content error: {exc}", |
| progress_handler=progress_handler, |
| ) |
| continue |
| raise exc |
| return TableOCRTranscriptionText( |
| strip_code_fences(text_content), |
| effective_model_id=candidate_model, |
| thinking_latency_s=summary["thinking_latency_s"], |
| total_latency_s=summary["total_latency_s"], |
| first_content_latency_s=summary["first_content_latency_s"], |
| reasoning_tokens=summary["reasoning_tokens"], |
| completion_tokens=summary["completion_tokens"], |
| ) |
|
|
| if last_response_error is not None: |
| raise last_response_error |
| raise RuntimeError("OpenRouter OCR transcription failed without producing a result.") |
|
|
|
|
| def transcribe_table_image_with_qwen_ocr( |
| image_data_uri: str, |
| *, |
| model_id: Optional[str] = None, |
| prompt: Optional[str] = None, |
| max_tokens: int = 32000, |
| progress_handler: ProgressHandler = None, |
| ) -> str: |
| resolved_model = resolve_qwen_ocr_model_id(model_id) |
| prompt_text = prompt or default_table_image_transcription_prompt() |
| progress_label = f"[qwen_ocr {resolved_model}]" |
| client = TableOCRQwenClient(model_id=resolved_model) |
| configured_max_tokens = normalize_text(os.getenv("QWEN_OCR_MAX_TOKENS", "")) |
| if configured_max_tokens: |
| try: |
| max_tokens = max(1, int(configured_max_tokens)) |
| except ValueError: |
| pass |
| request_started_at = time.perf_counter() |
| response = client.chat_completion_streaming( |
| messages=[ |
| { |
| "role": "user", |
| "content": [ |
| {"type": "text", "text": prompt_text}, |
| {"type": "image_url", "image_url": {"url": image_data_uri}}, |
| ], |
| } |
| ], |
| max_tokens=max_tokens, |
| temperature=0.0, |
| progress_label=progress_label, |
| progress_handler=progress_handler, |
| include_reasoning=True, |
| ) |
| request_latency_s = time.perf_counter() - request_started_at |
|
|
| summary = summarize_openrouter_chat_response(response) |
| if summary["total_latency_s"] is None: |
| summary["total_latency_s"] = max(0.0, float(request_latency_s)) |
| text_content = extract_text_from_openrouter_content(summary["content"]) |
| if text_content is None: |
| raise TableOCRContentError( |
| "Qwen OCR response did not contain text content.", |
| response_payload=response, |
| details=summary, |
| ) |
|
|
| emit_progress( |
| f"{progress_label} | finish_reason={summary['finish_reason'] or 'unknown'} | " |
| f"provider={summary['provider'] or 'unknown'} | " |
| f"total latency={summary['total_latency_s']:.2f}s" |
| + ( |
| f" | thinking latency={summary['thinking_latency_s']:.2f}s" |
| if isinstance(summary["thinking_latency_s"], (int, float)) |
| else "" |
| ), |
| progress_handler=progress_handler, |
| ) |
| return TableOCRTranscriptionText( |
| strip_code_fences(text_content), |
| effective_model_id=resolved_model, |
| thinking_latency_s=summary["thinking_latency_s"], |
| total_latency_s=summary["total_latency_s"], |
| first_content_latency_s=summary["first_content_latency_s"], |
| reasoning_tokens=summary["reasoning_tokens"], |
| completion_tokens=summary["completion_tokens"], |
| ) |
|
|
|
|
| def gemma4_vllm_launch_hint() -> str: |
| return ( |
| "Start local vLLM for Gemma 4 with thinking and the full OCR vision budget, for example: " |
| "vllm serve google/gemma-4-26B-A4B-it --host 0.0.0.0 --port 8000 " |
| "--dtype bfloat16 --max-model-len 32768 --gpu-memory-utilization 0.90 " |
| "--limit-mm-per-prompt '{\"image\":1}' " |
| "--mm-processor-kwargs '{\"max_soft_tokens\":1120}' " |
| "--hf-overrides '{\"vision_config\":{\"default_output_length\":1120},\"vision_soft_tokens_per_image\":1120}' " |
| "--reasoning-parser gemma4 --default-chat-template-kwargs '{\"enable_thinking\":true}'" |
| ) |
|
|
|
|
| def gemma4_hf_load_hint() -> str: |
| return ( |
| "Use Gemma 4 locally through plain Hugging Face Transformers, for example after installing " |
| "`transformers torch torchvision accelerate` with model `google/gemma-4-26B-A4B-it`." |
| ) |
|
|
|
|
| def _gemma4_hf_max_new_tokens() -> int: |
| return _env_int("GEMMA4_HF_MAX_NEW_TOKENS", 8192) |
|
|
|
|
| def _gemma4_hf_device_map() -> str: |
| return normalize_text(os.getenv("GEMMA4_HF_DEVICE_MAP", "auto")) or "auto" |
|
|
|
|
| def _decode_image_data_uri_for_gemma4_hf(image_data_uri: str) -> tuple[bytes, str]: |
| import base64 |
|
|
| normalized = normalize_text(image_data_uri) |
| if not normalized.startswith("data:") or "," not in normalized: |
| raise RuntimeError("Gemma 4 HF backend expected a data URI image payload.") |
| header, payload = normalized.split(",", 1) |
| media_type = header[5:].split(";", 1)[0].strip().lower() |
| suffix = { |
| "image/png": ".png", |
| "image/jpeg": ".jpg", |
| "image/jpg": ".jpg", |
| "image/webp": ".webp", |
| "image/gif": ".gif", |
| "image/bmp": ".bmp", |
| }.get(media_type, ".png") |
| try: |
| return base64.b64decode(payload), suffix |
| except Exception as exc: |
| raise RuntimeError("Gemma 4 HF backend could not decode the image data URI.") from exc |
|
|
|
|
| def _load_gemma4_hf_backend(model_id: str) -> tuple[Any, Any]: |
| cache = getattr(_load_gemma4_hf_backend, "_cache", None) |
| if isinstance(cache, dict) and cache.get("model_id") == model_id: |
| return cache["processor"], cache["model"] |
|
|
| try: |
| import transformers |
| from transformers import AutoProcessor |
| except Exception as exc: |
| raise RuntimeError(f"Gemma 4 HF backend could not import Transformers. {gemma4_hf_load_hint()}") from exc |
|
|
| auto_model_cls = getattr(transformers, "AutoModelForMultimodalLM", None) |
| if auto_model_cls is None: |
| auto_model_cls = getattr(transformers, "AutoModelForImageTextToText", None) |
| if auto_model_cls is None: |
| raise RuntimeError( |
| "Installed Transformers does not expose `AutoModelForMultimodalLM` or `AutoModelForImageTextToText`. " |
| f"{gemma4_hf_load_hint()}" |
| ) |
|
|
| processor = AutoProcessor.from_pretrained(model_id) |
| try: |
| model = auto_model_cls.from_pretrained( |
| model_id, |
| device_map=_gemma4_hf_device_map(), |
| torch_dtype="auto", |
| ) |
| except TypeError: |
| model = auto_model_cls.from_pretrained( |
| model_id, |
| device_map=_gemma4_hf_device_map(), |
| dtype="auto", |
| ) |
| model.eval() |
| _load_gemma4_hf_backend._cache = { |
| "model_id": model_id, |
| "processor": processor, |
| "model": model, |
| } |
| return processor, model |
|
|
|
|
| def _gemma4_hf_input_device(model: Any) -> Any: |
| import torch |
|
|
| model_device = getattr(model, "device", None) |
| if isinstance(model_device, torch.device) and model_device.type != "meta": |
| return model_device |
| if torch.cuda.is_available(): |
| return torch.device("cuda") |
| return torch.device("cpu") |
|
|
|
|
| def _extract_gemma4_hf_text_value(value: Any) -> Optional[str]: |
| if isinstance(value, str): |
| return value if normalize_text(value) else None |
| if isinstance(value, dict): |
| for key in ("content", "text", "response", "output", "answer", "final", "generated_text"): |
| extracted = _extract_gemma4_hf_text_value(value.get(key)) |
| if extracted: |
| return extracted |
| for nested_value in value.values(): |
| extracted = _extract_gemma4_hf_text_value(nested_value) |
| if extracted: |
| return extracted |
| return None |
| if isinstance(value, list): |
| for item in value: |
| extracted = _extract_gemma4_hf_text_value(item) |
| if extracted: |
| return extracted |
| return None |
|
|
|
|
| def _extract_gemma4_hf_response_text(processor: Any, generated_text: str) -> str: |
| parse_response = getattr(processor, "parse_response", None) |
| if callable(parse_response): |
| try: |
| parsed = parse_response(generated_text) |
| except Exception: |
| parsed = None |
| parsed_text = _extract_gemma4_hf_text_value(parsed) |
| if parsed_text: |
| cleaned_text, _ = _strip_gemma4_unparsed_thinking(parsed_text) |
| if normalize_text(cleaned_text): |
| return cleaned_text |
| cleaned_text, _ = _strip_gemma4_unparsed_thinking(generated_text) |
| return cleaned_text |
|
|
|
|
| def transcribe_table_image_with_gemma4_hf( |
| image_data_uri: str, |
| *, |
| model_id: Optional[str] = None, |
| prompt: Optional[str] = None, |
| progress_handler: ProgressHandler = None, |
| ) -> str: |
| from pathlib import Path |
| import tempfile |
| import time |
| import torch |
|
|
| resolved_model = resolve_gemma4_hf_model_id(model_id) |
| prompt_text = prompt or default_table_image_transcription_prompt() |
| progress_label = f"[gemma4_hf {resolved_model}]" |
|
|
| emit_progress( |
| f"{progress_label} | local transformers multimodal | thinking=off", |
| progress_handler=progress_handler, |
| ) |
|
|
| try: |
| processor, model = _load_gemma4_hf_backend(resolved_model) |
| except Exception as exc: |
| raise RuntimeError(f"{exc}\n{gemma4_hf_load_hint()}") from exc |
|
|
| started_at = time.perf_counter() |
| try: |
| image_bytes, image_suffix = _decode_image_data_uri_for_gemma4_hf(image_data_uri) |
| with tempfile.TemporaryDirectory(prefix="gemma4_hf_ocr_") as tmp_dir: |
| image_path = Path(tmp_dir) / f"table{image_suffix}" |
| image_path.write_bytes(image_bytes) |
| messages = [ |
| { |
| "role": "user", |
| "content": [ |
| {"type": "image", "path": str(image_path)}, |
| {"type": "text", "text": prompt_text}, |
| ], |
| } |
| ] |
| inputs = processor.apply_chat_template( |
| messages, |
| tokenize=True, |
| return_dict=True, |
| return_tensors="pt", |
| add_generation_prompt=True, |
| ).to(_gemma4_hf_input_device(model)) |
| input_len = inputs["input_ids"].shape[-1] |
| with torch.inference_mode(): |
| outputs = model.generate( |
| **inputs, |
| max_new_tokens=_gemma4_hf_max_new_tokens(), |
| do_sample=False, |
| ) |
| generated_text = processor.decode(outputs[0][input_len:], skip_special_tokens=False) |
| except Exception as exc: |
| raise RuntimeError(f"Gemma 4 HF OCR call failed: {exc}\n{gemma4_hf_load_hint()}") from exc |
|
|
| total_latency_s = time.perf_counter() - started_at |
| text_content = _extract_gemma4_hf_response_text(processor, generated_text) |
| if not normalize_text(text_content): |
| raise TableOCRContentError( |
| "Gemma 4 Hugging Face response did not contain text content.", |
| details={ |
| "model_id": resolved_model, |
| "generated_text_preview": generated_text[-4000:], |
| }, |
| ) |
|
|
| emit_progress( |
| f"{progress_label} | local transformers response | total latency={total_latency_s:.2f}s | content_chars={len(text_content)}", |
| progress_handler=progress_handler, |
| ) |
| return TableOCRTranscriptionText( |
| strip_code_fences(text_content), |
| effective_model_id=resolved_model, |
| total_latency_s=total_latency_s, |
| ) |
|
|
|
|
| def _gemma4_vllm_max_tokens() -> int: |
| return _env_int("GEMMA4_VLLM_MAX_TOKENS", 32000) |
|
|
|
|
| def _gemma4_vllm_temperature() -> float: |
| return _env_float("GEMMA4_VLLM_TEMPERATURE", 0.0, min_value=0.0, max_value=2.0) |
|
|
|
|
| def _gemma4_vllm_enable_thinking() -> bool: |
| return _env_flag("GEMMA4_VLLM_ENABLE_THINKING", "1") |
|
|
|
|
| def _gemma4_vllm_require_thinking_latency() -> bool: |
| return _env_flag("GEMMA4_VLLM_REQUIRE_THINKING_LATENCY", "1") |
|
|
|
|
| def _strip_gemma4_unparsed_thinking(text: str) -> tuple[str, bool]: |
| rendered = str(text or "") |
| had_thought_block = bool(_GEMMA4_THOUGHT_BLOCK_RE.search(rendered) or _GEMMA4_THINK_BLOCK_RE.search(rendered)) |
| rendered = _GEMMA4_THOUGHT_BLOCK_RE.sub("", rendered) |
| rendered = _GEMMA4_THINK_BLOCK_RE.sub("", rendered) |
| if had_thought_block: |
| rendered = _GEMMA4_CHANNEL_MARKER_RE.sub("", rendered) |
| return rendered.strip(), had_thought_block |
|
|
|
|
| def transcribe_table_image_with_gemma4_vllm( |
| image_data_uri: str, |
| *, |
| model_id: Optional[str] = None, |
| prompt: Optional[str] = None, |
| progress_handler: ProgressHandler = None, |
| ) -> str: |
| resolved_model = resolve_gemma4_vllm_model_id(model_id) |
| prompt_text = prompt or default_table_image_transcription_prompt() |
| progress_label = f"[gemma4_vllm {resolved_model}]" |
| client = TableOCRGemma4VLLMClient(model_id=resolved_model) |
|
|
| emit_progress( |
| f"{progress_label} | local vLLM streaming | thinking=on | expects --reasoning-parser gemma4", |
| progress_handler=progress_handler, |
| ) |
| try: |
| response = client.chat_completion_streaming( |
| messages=[ |
| { |
| "role": "user", |
| "content": [ |
| {"type": "image_url", "image_url": {"url": image_data_uri}}, |
| {"type": "text", "text": prompt_text}, |
| ], |
| } |
| ], |
| max_tokens=_gemma4_vllm_max_tokens(), |
| temperature=_gemma4_vllm_temperature(), |
| progress_label=progress_label, |
| progress_handler=progress_handler, |
| include_reasoning=True, |
| ) |
| except TableOCRRemoteResponseError as exc: |
| raise RuntimeError(f"{exc}\n{gemma4_vllm_launch_hint()}") from exc |
|
|
| summary = summarize_openrouter_chat_response(response) |
| text_content = extract_text_from_openrouter_content(summary["content"]) |
| if text_content is None: |
| raise TableOCRContentError( |
| "Gemma 4 vLLM response did not contain text content.", |
| response_payload=response, |
| details=summary, |
| ) |
| text_content, stripped_unparsed_thinking = _strip_gemma4_unparsed_thinking(text_content) |
| if not normalize_text(text_content): |
| raise TableOCRContentError( |
| "Gemma 4 vLLM response contained only unparsed thinking content.", |
| response_payload=response, |
| details=summary, |
| ) |
| if ( |
| _gemma4_vllm_enable_thinking() |
| and _gemma4_vllm_require_thinking_latency() |
| and not isinstance(summary["thinking_latency_s"], (int, float)) |
| ): |
| raise TableOCRContentError( |
| "Gemma 4 vLLM thinking latency was unavailable. Restart vLLM with " |
| "`--reasoning-parser gemma4 --default-chat-template-kwargs '{\"enable_thinking\":true}'` " |
| "so reasoning is streamed separately from OCR content. " |
| "Set GEMMA4_VLLM_REQUIRE_THINKING_LATENCY=0 only if you intentionally want to score without it.", |
| response_payload=response, |
| details={ |
| **summary, |
| "stripped_unparsed_thinking": bool(stripped_unparsed_thinking), |
| "launch_hint": gemma4_vllm_launch_hint(), |
| }, |
| ) |
| if normalize_text(summary["finish_reason"]).lower() == "length": |
| raise TableOCRContentError( |
| "Gemma 4 vLLM hit finish_reason=length before producing a complete OCR answer. " |
| "This usually means thinking was not parsed out or GEMMA4_VLLM_MAX_TOKENS is too low.", |
| response_payload=response, |
| details={ |
| **summary, |
| "stripped_unparsed_thinking": bool(stripped_unparsed_thinking), |
| }, |
| ) |
|
|
| emit_progress( |
| f"{progress_label} | finish_reason={summary['finish_reason'] or 'unknown'} | " |
| f"total latency={summary['total_latency_s']:.2f}s | content_chars={len(text_content)}" |
| + ( |
| f" | thinking latency={summary['thinking_latency_s']:.2f}s" |
| if isinstance(summary["thinking_latency_s"], (int, float)) |
| else " | thinking latency=unavailable; confirm vLLM was started with --reasoning-parser gemma4" |
| ), |
| progress_handler=progress_handler, |
| ) |
| return TableOCRTranscriptionText( |
| strip_code_fences(text_content), |
| effective_model_id=resolved_model, |
| thinking_latency_s=summary["thinking_latency_s"], |
| total_latency_s=summary["total_latency_s"], |
| first_content_latency_s=summary["first_content_latency_s"], |
| reasoning_tokens=summary["reasoning_tokens"], |
| completion_tokens=summary["completion_tokens"], |
| ) |
|
|
|
|
| def gemma4_llamacpp_launch_hint() -> str: |
| return ( |
| "Start llama.cpp with Gemma 4 vision budget enabled, for example: " |
| "llama-server --model /models/Gemma4/gemma-4-26B-it-Q8_0.gguf " |
| "--mmproj /models/Gemma4/mmproj-F32.gguf --jinja " |
| "--chat-template-file /models/Gemma4/google-gemma-4-26B-it-interleaved.jinja " |
| "--image-min-tokens 560 --image-max-tokens 2240 " |
| "--batch-size 4096 --ubatch-size 4096" |
| ) |
|
|
|
|
| def _gemma4_llamacpp_max_tokens() -> int: |
| return _env_int("GEMMA4_LLAMA_CPP_MAX_TOKENS", 32000) |
|
|
|
|
| def _gemma4_llamacpp_temperature() -> float: |
| return _env_float("GEMMA4_LLAMA_CPP_TEMPERATURE", 0.0, min_value=0.0, max_value=2.0) |
|
|
|
|
| def transcribe_table_image_with_gemma4_llamacpp( |
| image_data_uri: str, |
| *, |
| model_id: Optional[str] = None, |
| prompt: Optional[str] = None, |
| progress_handler: ProgressHandler = None, |
| ) -> str: |
| resolved_model = resolve_gemma4_llamacpp_model_id(model_id) |
| prompt_text = prompt or default_table_image_transcription_prompt() |
| progress_label = f"[gemma4_llamacpp {resolved_model}]" |
| client = TableOCRGemma4LlamaCppClient(model_id=resolved_model) |
|
|
| emit_progress( |
| f"{progress_label} | expects llama-server with --image-min-tokens 560 " |
| "--image-max-tokens 2240 --batch-size 4096 --ubatch-size 4096", |
| progress_handler=progress_handler, |
| ) |
| normalized_max_pixels = _table_ocr_normalized_max_image_pixels() |
| if 0 < normalized_max_pixels < 2_600_000: |
| emit_progress( |
| f"{progress_label} | shared image cap is {normalized_max_pixels:,} pixels; " |
| "raise TABLE_OCR_NORMALIZED_MAX_IMAGE_PIXELS for full high-budget Gemma 4 vision tests", |
| progress_handler=progress_handler, |
| ) |
| request_started_at = time.perf_counter() |
| try: |
| response = client.chat_completion( |
| messages=[ |
| { |
| "role": "user", |
| "content": [ |
| {"type": "text", "text": prompt_text}, |
| {"type": "image_url", "image_url": {"url": image_data_uri}}, |
| ], |
| } |
| ], |
| max_tokens=_gemma4_llamacpp_max_tokens(), |
| temperature=_gemma4_llamacpp_temperature(), |
| progress_label=progress_label, |
| progress_handler=progress_handler, |
| ) |
| except TableOCRRemoteResponseError as exc: |
| raise RuntimeError(f"{exc}\n{gemma4_llamacpp_launch_hint()}") from exc |
| request_latency_s = time.perf_counter() - request_started_at |
|
|
| summary = summarize_openrouter_chat_response(response) |
| summary["total_latency_s"] = max(0.0, float(request_latency_s)) |
| text_content = extract_text_from_openrouter_content(summary["content"]) |
| if text_content is None: |
| raise TableOCRContentError( |
| "Gemma 4 llama.cpp response did not contain text content.", |
| response_payload=response, |
| details=summary, |
| ) |
|
|
| emit_progress( |
| f"{progress_label} | finish_reason={summary['finish_reason'] or 'unknown'} | " |
| f"total latency={summary['total_latency_s']:.2f}s | content_chars={len(text_content)}", |
| progress_handler=progress_handler, |
| ) |
| return TableOCRTranscriptionText( |
| strip_code_fences(text_content), |
| effective_model_id=resolved_model, |
| total_latency_s=summary["total_latency_s"], |
| first_content_latency_s=summary["first_content_latency_s"], |
| reasoning_tokens=summary["reasoning_tokens"], |
| completion_tokens=summary["completion_tokens"], |
| ) |
|
|
|
|
| def transcribe_table_image_with_deepseek( |
| image_data_uri: str, |
| *, |
| model_id: Optional[str] = None, |
| prompt: Optional[str] = None, |
| progress_handler: ProgressHandler = None, |
| ) -> str: |
| del progress_handler |
| sec_parser = load_sec_parser_module() |
| resolved_model = normalize_text(model_id or default_deepseek_ocr_model_id()) |
| if not resolved_model: |
| raise RuntimeError( |
| "Missing DeepSeek OCR model id. Pass a Hugging Face model id or set DEEPSEEK_OCR_MODEL_ID." |
| ) |
| try: |
| import torch |
| except ImportError as exc: |
| raise RuntimeError("PyTorch is required for DeepSeek OCR transcription.") from exc |
|
|
| model_obj, processor = _get_deepseek_table_backend(resolved_model) |
| model_device = sec_parser._model_input_device(model_obj) |
| runtime_device = model_device.type |
| page_image = _prepare_deepseek_ocr_image( |
| sec_parser._decode_data_uri_to_pil_image(image_data_uri), |
| runtime_device, |
| ) |
| prompt_text = prompt or default_table_image_transcription_prompt() |
| infer_exc: Optional[Exception] = None |
| if callable(getattr(model_obj, "infer", None)): |
| try: |
| return _require_nonempty_ocr_text_output( |
| _run_deepseek_infer_fallback( |
| model_obj, |
| processor, |
| page_image, |
| prompt_text, |
| runtime_device, |
| ), |
| backend_label="DeepSeek infer(...) fallback", |
| ) |
| except Exception as exc: |
| infer_exc = exc |
| infer_message = str(infer_exc) |
| if "position_embeddings" in infer_message or "LlamaAttention.forward()" in infer_message: |
| raise RuntimeError(_deepseek_ocr_version_guidance_message()) from infer_exc |
| emit_progress(f"DeepSeek infer fallback failed: {infer_exc}") |
| try: |
| inputs = _build_deepseek_inputs( |
| processor, |
| page_image, |
| prompt_text, |
| ) |
| except TypeError as exc: |
| if "unexpected keyword argument 'images'" not in str(exc): |
| raise |
| generate_exc: Optional[Exception] = None |
| try: |
| return _require_nonempty_ocr_text_output( |
| _run_deepseek_generate_fallback(model_obj, processor, page_image, prompt_text, model_device), |
| backend_label="DeepSeek generate() fallback", |
| details={"infer_failure": str(infer_exc) if infer_exc else None}, |
| ) |
| except Exception as exc2: |
| generate_exc = exc2 |
| emit_progress(f"DeepSeek generate fallback failed: {generate_exc}") |
| try: |
| return _require_nonempty_ocr_text_output( |
| _run_deepseek_chat_fallback(model_obj, processor, page_image, prompt_text), |
| backend_label="DeepSeek chat() fallback", |
| details={ |
| "infer_failure": str(infer_exc) if infer_exc else None, |
| "generate_failure": str(generate_exc) if generate_exc else None, |
| }, |
| ) |
| except Exception as chat_exc: |
| raise RuntimeError( |
| "DeepSeek OCR fallback exhausted. " |
| f"model={type(model_obj).__name__}, processor={type(processor).__name__}; " |
| f"infer fallback failed: {infer_exc}; " |
| f"generate fallback failed: {generate_exc}; " |
| f"chat fallback failed: {chat_exc}" |
| ) from chat_exc |
| inputs = inputs.to(model_device) |
|
|
| with torch.inference_mode(): |
| generated_ids = model_obj.generate( |
| **inputs, |
| max_new_tokens=_deepseek_ocr_max_new_tokens(), |
| do_sample=False, |
| ) |
|
|
| input_ids = getattr(inputs, "input_ids", None) |
| if input_ids is None and isinstance(inputs, dict): |
| input_ids = inputs.get("input_ids") |
| raw_output = _decode_deepseek_generated_output(processor, generated_ids, input_ids=input_ids) |
| return _require_nonempty_ocr_text_output( |
| raw_output, |
| backend_label="DeepSeek direct generate()", |
| details={"infer_failure": str(infer_exc) if infer_exc else None}, |
| ) |
|
|
|
|
| def _decode_image_data_uri_bytes(image_data_uri: str) -> bytes: |
| try: |
| header, encoded = image_data_uri.split(",", 1) |
| except ValueError as exc: |
| raise RuntimeError("Expected a valid data URI for Mistral OCR image transcription.") from exc |
| if ";base64" not in header: |
| raise RuntimeError("Expected a base64-encoded data URI for Mistral OCR image transcription.") |
| try: |
| return base64.b64decode(encoded) |
| except Exception as exc: |
| raise RuntimeError("Could not decode the Mistral OCR image data URI.") from exc |
|
|
|
|
| def _decode_table_ocr_image_data_uri_bytes(image_data_uri: str) -> bytes: |
| try: |
| header, encoded = image_data_uri.split(",", 1) |
| except ValueError as exc: |
| raise RuntimeError("Expected a valid data URI for table OCR image transcription.") from exc |
| if ";base64" not in header: |
| raise RuntimeError("Expected a base64-encoded data URI for table OCR image transcription.") |
| try: |
| return base64.b64decode(encoded) |
| except Exception as exc: |
| raise RuntimeError("Could not decode the table OCR image data URI.") from exc |
|
|
|
|
| def _normalize_table_ocr_image_data_uri( |
| image_data_uri: str, |
| *, |
| progress_handler: ProgressHandler = None, |
| ) -> str: |
| max_pixels = _table_ocr_normalized_max_image_pixels() |
| if max_pixels <= 0: |
| return image_data_uri |
|
|
| image_bytes = _decode_table_ocr_image_data_uri_bytes(image_data_uri) |
| try: |
| from PIL import Image |
|
|
| with Image.open(io.BytesIO(image_bytes)) as opened_image: |
| opened_image.load() |
| original_image = opened_image.convert("RGB") |
| except ImportError as exc: |
| raise RuntimeError("Pillow is required to normalize table OCR image resolution.") from exc |
| except Exception as exc: |
| raise RuntimeError("Could not read the table OCR image for resolution normalization.") from exc |
|
|
| original_width, original_height = original_image.size |
| normalized_image = _resize_image_to_max_pixels(original_image, max_pixels) |
| normalized_width, normalized_height = normalized_image.size |
| if (normalized_width, normalized_height) == (original_width, original_height): |
| return image_data_uri |
|
|
| output = io.BytesIO() |
| normalized_image.save(output, format="PNG") |
| emit_progress( |
| f"Normalized table OCR image from {original_width}x{original_height} " |
| f"to {normalized_width}x{normalized_height} with max {max_pixels:,} pixels.", |
| progress_handler=progress_handler, |
| ) |
| encoded = base64.b64encode(output.getvalue()).decode("ascii") |
| return f"data:image/png;base64,{encoded}" |
|
|
|
|
| def _data_uri_file_suffix(image_data_uri: str) -> str: |
| header = image_data_uri.split(",", 1)[0].lower() |
| if "image/jpeg" in header or "image/jpg" in header: |
| return ".jpg" |
| if "image/webp" in header: |
| return ".webp" |
| return ".png" |
|
|
|
|
| def _inline_mistral_table_placeholders(page_obj: Dict[str, Any], text_content: str) -> str: |
| rendered = str(text_content or "") |
| tables = page_obj.get("tables") |
| if not isinstance(tables, list) or not tables: |
| return rendered |
|
|
| inlined_count = 0 |
| fallback_contents: List[str] = [] |
| for table in tables: |
| if not isinstance(table, dict): |
| continue |
| table_id = normalize_text(table.get("id")) |
| table_content = normalize_text(table.get("content")) |
| if not table_content: |
| continue |
| if table_id: |
| placeholder = f"[{table_id}]({table_id})" |
| if placeholder in rendered: |
| rendered = rendered.replace(placeholder, table_content) |
| inlined_count += 1 |
| continue |
| fallback_contents.append(table_content) |
|
|
| |
| |
| if inlined_count == 0 and fallback_contents: |
| return "\n\n".join(fallback_contents) |
|
|
| return rendered |
|
|
|
|
| def _build_mistral_ocr_payload( |
| *, |
| model: str, |
| signed_url: str, |
| table_format: str, |
| prompt_text: str, |
| ) -> Dict[str, Any]: |
| payload: Dict[str, Any] = { |
| "model": model, |
| "document": { |
| "type": "document_url", |
| "document_url": signed_url, |
| }, |
| "table_format": table_format, |
| } |
| normalized_prompt = normalize_text(prompt_text) |
| if normalized_prompt: |
| |
| |
| |
| payload["document_annotation_format"] = { |
| "type": "json_schema", |
| "json_schema": { |
| "name": "ocr_table_transcription", |
| "strict": True, |
| "schema": { |
| "type": "object", |
| "properties": { |
| "transcription": {"type": "string"}, |
| }, |
| "required": ["transcription"], |
| "additionalProperties": False, |
| }, |
| }, |
| } |
| payload["document_annotation_prompt"] = normalized_prompt |
| return payload |
|
|
|
|
| def _extract_mistral_prompted_text(response_payload: Dict[str, Any]) -> str: |
| def unwrap_annotation_text(value: Any) -> str: |
| if not isinstance(value, str): |
| return "" |
| normalized = normalize_text(value) |
| if not normalized: |
| return "" |
| stripped = normalized.strip() |
| if stripped.startswith("{") and stripped.endswith("}"): |
| try: |
| payload = json.loads(stripped) |
| except Exception: |
| return normalized |
| if isinstance(payload, dict): |
| for key in ("transcription", "text", "content", "value", "html"): |
| nested = payload.get(key) |
| if isinstance(nested, str) and normalize_text(nested): |
| return normalize_text(nested) |
| return normalized |
|
|
| annotation = response_payload.get("document_annotation") |
| if isinstance(annotation, str): |
| return unwrap_annotation_text(annotation) |
| if isinstance(annotation, dict): |
| transcription = annotation.get("transcription") |
| if isinstance(transcription, str) and normalize_text(transcription): |
| return unwrap_annotation_text(transcription) |
| for key in ("text", "content", "value"): |
| value = annotation.get(key) |
| if isinstance(value, str) and normalize_text(value): |
| return unwrap_annotation_text(value) |
| return "" |
|
|
|
|
| def transcribe_table_image_with_mistral( |
| image_data_uri: str, |
| *, |
| model_id: Optional[str] = None, |
| prompt: Optional[str] = None, |
| progress_handler: ProgressHandler = None, |
| ) -> str: |
| resolved_model = normalize_text(model_id or default_mistral_ocr_model_id()) |
| if not resolved_model: |
| raise RuntimeError( |
| "Missing Mistral OCR model id. Pass a model id or set MISTRAL_OCR_MODEL_ID." |
| ) |
|
|
| api_key = normalize_text(os.getenv("MISTRAL_API_KEY", "")) |
| if not api_key: |
| raise RuntimeError("Missing MISTRAL_API_KEY for Mistral OCR transcription.") |
|
|
| sec_parser = load_sec_parser_module() |
| progress_label = f"[ocr {resolved_model}]" |
| image_bytes = _decode_image_data_uri_bytes(image_data_uri) |
| table_format = _mistral_ocr_table_format() |
| prompt_text = normalize_text(prompt or default_table_image_transcription_prompt()) |
|
|
| emit_progress( |
| f"{progress_label} | upload image to Mistral OCR", |
| progress_handler=progress_handler, |
| ) |
| client = sec_parser.Mistral(api_key=api_key) |
| upload = client.files.upload( |
| file={ |
| "file_name": f"ocr_table{_data_uri_file_suffix(image_data_uri)}", |
| "content": image_bytes, |
| }, |
| purpose="ocr", |
| ) |
| if not upload or not getattr(upload, "id", None): |
| raise RuntimeError("Mistral OCR image upload failed to return a valid file id.") |
|
|
| signed_url = sec_parser.get_signed_url_with_retry(client, file_id=upload.id) |
| payload = _build_mistral_ocr_payload( |
| model=resolved_model, |
| signed_url=signed_url, |
| table_format=table_format, |
| prompt_text=prompt_text, |
| ) |
|
|
| emit_progress( |
| f"{progress_label} | request 1/1 | table_format={table_format}", |
| progress_handler=progress_handler, |
| ) |
| response = requests.post( |
| sec_parser.OCR_API_URL, |
| headers={ |
| "Authorization": f"Bearer {api_key}", |
| "Content-Type": "application/json", |
| }, |
| json=payload, |
| timeout=600, |
| ) |
| if response.status_code >= 400: |
| raise RuntimeError(f"Mistral OCR HTTP {response.status_code}: {response.text[:1500]}") |
| response_payload = response.json() |
| emit_progress( |
| f"{progress_label} | response received", |
| progress_handler=progress_handler, |
| ) |
|
|
| prompted_text_content = _extract_mistral_prompted_text(response_payload) |
| if prompted_text_content: |
| emit_progress( |
| f"{progress_label} | using prompted document_annotation | content_chars={len(prompted_text_content)}", |
| progress_handler=progress_handler, |
| ) |
| return strip_code_fences(prompted_text_content) |
|
|
| pages = response_payload.get("pages") |
| if not isinstance(pages, list) or not pages: |
| raise TableOCRContentError( |
| "Mistral OCR response did not include any pages.", |
| response_payload=response_payload, |
| details={ |
| "top_level_keys": sorted(str(key) for key in response_payload.keys()), |
| }, |
| ) |
|
|
| first_page = pages[0] |
| if not isinstance(first_page, dict): |
| raise TableOCRContentError( |
| f"Mistral OCR first page was {type(first_page).__name__}, expected an object.", |
| response_payload=response_payload, |
| details={"page_type": type(first_page).__name__}, |
| ) |
|
|
| raw_text_content = normalize_text(sec_parser._pick_text(first_page)) |
| text_content = normalize_text(_inline_mistral_table_placeholders(first_page, raw_text_content)) |
| if not text_content: |
| raise TableOCRContentError( |
| "Mistral OCR response did not contain markdown/text content for the first page.", |
| response_payload=response_payload, |
| details={ |
| "page_keys": sorted(str(key) for key in first_page.keys()), |
| "page_count": len(pages), |
| "table_format": table_format, |
| }, |
| ) |
|
|
| emit_progress( |
| f"{progress_label} | pages={len(pages)} | content_chars={len(text_content)}", |
| progress_handler=progress_handler, |
| ) |
| return strip_code_fences(text_content) |
|
|
|
|
| def transcribe_table_image_with_firered( |
| image_data_uri: str, |
| *, |
| model_id: Optional[str] = None, |
| prompt: Optional[str] = None, |
| progress_handler: ProgressHandler = None, |
| ) -> str: |
| sec_parser = load_sec_parser_module() |
| model_name = resolve_firered_model_id(model_id) |
| if not model_name: |
| raise RuntimeError("Missing OCR model id for FireRed transcription.") |
| progress_label = f"[ocr {model_name}]" |
|
|
| try: |
| import torch |
| except ImportError as exc: |
| raise RuntimeError("PyTorch is required for OCR transcription.") from exc |
|
|
| emit_progress( |
| f"{progress_label} | local FireRed load", |
| progress_handler=progress_handler, |
| ) |
| model_obj, processor = sec_parser._get_firered_table_backend(model_name) |
| runtime_device = sec_parser._model_input_device(model_obj).type |
| original_image = sec_parser._decode_data_uri_to_pil_image(image_data_uri) |
| if _table_ocr_allow_backend_image_resize(): |
| base_page_image = sec_parser._prepare_firered_image(original_image, runtime_device) |
| else: |
| base_page_image = original_image.convert("RGB") |
|
|
| prompt_text = prompt or default_table_image_transcription_prompt() |
| candidate_images = [("initial", base_page_image)] |
| if runtime_device == "mps" and _table_ocr_allow_backend_image_resize(): |
| base_pixels = base_page_image.size[0] * base_page_image.size[1] |
| seen_sizes = {base_page_image.size} |
| for retry_limit in firered_mps_retry_pixel_limits(): |
| if retry_limit >= base_pixels: |
| continue |
| retry_image = _resize_image_to_max_pixels(original_image, retry_limit) |
| if retry_image.size in seen_sizes: |
| continue |
| candidate_images.append((f"retry_max_pixels={retry_limit}", retry_image)) |
| seen_sizes.add(retry_image.size) |
|
|
| last_exc: Optional[Exception] = None |
| for attempt_index, (attempt_label, page_image) in enumerate(candidate_images, start=1): |
| if attempt_index > 1: |
| emit_progress( |
| f"{progress_label} | retry local FireRed with {attempt_label} after buffer error: {last_exc}", |
| progress_handler=progress_handler, |
| ) |
|
|
| messages = [ |
| { |
| "role": "user", |
| "content": [ |
| {"type": "image", "image": page_image}, |
| {"type": "text", "text": prompt_text}, |
| ], |
| } |
| ] |
|
|
| chat_text = processor.apply_chat_template( |
| messages, |
| tokenize=False, |
| add_generation_prompt=True, |
| ) |
|
|
| inputs = processor( |
| text=[chat_text], |
| images=[page_image], |
| return_tensors="pt", |
| ) |
| inputs = inputs.to(sec_parser._model_input_device(model_obj)) |
|
|
| emit_progress( |
| f"{progress_label} | local FireRed generate | device={runtime_device} | attempt={attempt_index}/{len(candidate_images)} | image={page_image.size[0]}x{page_image.size[1]}", |
| progress_handler=progress_handler, |
| ) |
| try: |
| with torch.inference_mode(): |
| generated_ids = model_obj.generate( |
| **inputs, |
| max_new_tokens=sec_parser.Config.FIRERED_MAX_NEW_TOKENS, |
| do_sample=False, |
| ) |
| except Exception as exc: |
| last_exc = exc |
| if runtime_device == "mps" and attempt_index < len(candidate_images) and _is_firered_buffer_error(exc): |
| continue |
| raise |
|
|
| trimmed = generated_ids[:, inputs.input_ids.shape[1] :] |
| raw_output = processor.batch_decode( |
| trimmed, |
| skip_special_tokens=True, |
| clean_up_tokenization_spaces=False, |
| )[0] |
| cleaned_output = _require_nonempty_ocr_text_output( |
| raw_output, |
| backend_label="FireRed local generate()", |
| details={"model_id": model_name, "device": runtime_device}, |
| ) |
| emit_progress( |
| f"{progress_label} | local FireRed response | content_chars={len(cleaned_output)}", |
| progress_handler=progress_handler, |
| ) |
| return TableOCRTranscriptionText( |
| cleaned_output, |
| effective_model_id=model_name, |
| ) |
|
|
| if last_exc is not None: |
| raise last_exc |
| raise RuntimeError("FireRed OCR exhausted local retries without producing output.") |
|
|
|
|
| def transcribe_table_image_with_qianfan( |
| image_data_uri: str, |
| *, |
| model_id: Optional[str] = None, |
| prompt: Optional[str] = None, |
| progress_handler: ProgressHandler = None, |
| ) -> str: |
| del progress_handler |
| resolved_model = resolve_qianfan_model_id(model_id) |
| sec_parser = load_sec_parser_module() |
| model_obj, processor, runtime_device = _get_generic_ocr_backend( |
| resolved_model, |
| env_prefix="QIANFAN_OCR", |
| ) |
| model_device = sec_parser._model_input_device(model_obj) |
| page_image = _prepare_backend_image( |
| sec_parser._decode_data_uri_to_pil_image(image_data_uri), |
| env_prefix="QIANFAN_OCR", |
| runtime_device=runtime_device, |
| ) |
| prompt_text = prompt or default_table_image_transcription_prompt() |
| tokenizer_like = processor |
| pixel_values = _build_qianfan_pixel_values(page_image) |
| try: |
| pixel_values = pixel_values.to(device=model_device, dtype=getattr(model_obj, "dtype", None)) |
| except Exception: |
| try: |
| pixel_values = pixel_values.to(model_device) |
| except Exception: |
| pass |
| with __import__("torch").inference_mode(): |
| raw_output = model_obj.chat( |
| tokenizer_like, |
| pixel_values=pixel_values, |
| question=prompt_text, |
| generation_config={"max_new_tokens": _backend_max_new_tokens("QIANFAN_OCR", 4096)}, |
| ) |
| return _require_nonempty_ocr_text_output( |
| raw_output, |
| backend_label="Qianfan OCR local chat()", |
| details={"model_id": resolved_model, "device": runtime_device}, |
| ) |
|
|
|
|
| def transcribe_table_image_with_glm_ocr( |
| image_data_uri: str, |
| *, |
| model_id: Optional[str] = None, |
| prompt: Optional[str] = None, |
| progress_handler: ProgressHandler = None, |
| ) -> str: |
| del progress_handler |
| resolved_model = resolve_glm_ocr_model_id(model_id) |
| sec_parser = load_sec_parser_module() |
| model_obj, processor, runtime_device = _get_generic_ocr_backend( |
| resolved_model, |
| env_prefix="GLM_OCR", |
| ) |
| model_device = sec_parser._model_input_device(model_obj) |
| page_image = _prepare_backend_image( |
| sec_parser._decode_data_uri_to_pil_image(image_data_uri), |
| env_prefix="GLM_OCR", |
| runtime_device=runtime_device, |
| ) |
| prompt_text = prompt or "Table Recognition:" |
| raw_output = _run_generate_style_backend( |
| model_obj, |
| processor, |
| page_image, |
| prompt_text, |
| env_prefix="GLM_OCR", |
| model_device=model_device, |
| ) |
| return _require_nonempty_ocr_text_output( |
| raw_output, |
| backend_label="GLM-OCR local generate()", |
| details={"model_id": resolved_model, "device": runtime_device}, |
| ) |
|
|
|
|
| def transcribe_table_image_with_paddleocr_vl( |
| image_data_uri: str, |
| *, |
| model_id: Optional[str] = None, |
| prompt: Optional[str] = None, |
| progress_handler: ProgressHandler = None, |
| ) -> str: |
| del progress_handler |
| resolved_model = resolve_paddleocr_vl_model_id(model_id) |
| sec_parser = load_sec_parser_module() |
| model_obj, processor, runtime_device = _get_generic_ocr_backend( |
| resolved_model, |
| env_prefix="PADDLEOCR_VL", |
| ) |
| model_device = sec_parser._model_input_device(model_obj) |
| page_image = _prepare_backend_image( |
| sec_parser._decode_data_uri_to_pil_image(image_data_uri), |
| env_prefix="PADDLEOCR_VL", |
| runtime_device=runtime_device, |
| ) |
| prompt_text = prompt or default_table_image_transcription_prompt() |
| raw_output = _run_generate_style_backend( |
| model_obj, |
| processor, |
| page_image, |
| prompt_text, |
| env_prefix="PADDLEOCR_VL", |
| model_device=model_device, |
| ) |
| return _require_nonempty_ocr_text_output( |
| raw_output, |
| backend_label="PaddleOCR-VL local generate()", |
| details={"model_id": resolved_model, "device": runtime_device}, |
| ) |
|
|
|
|
| def transcribe_table_image_with_got_ocr( |
| image_data_uri: str, |
| *, |
| model_id: Optional[str] = None, |
| prompt: Optional[str] = None, |
| progress_handler: ProgressHandler = None, |
| ) -> str: |
| del progress_handler |
| |
| del prompt |
| _ensure_transformers_dynamic_cache_compatibility() |
| resolved_model = resolve_got_ocr_model_id(model_id) |
| sec_parser = load_sec_parser_module() |
| model_obj, processor, runtime_device = _get_generic_ocr_backend( |
| resolved_model, |
| env_prefix="GOT_OCR", |
| prefer_tokenizer=True, |
| ) |
| model_device = sec_parser._model_input_device(model_obj) |
| page_image = _prepare_backend_image( |
| sec_parser._decode_data_uri_to_pil_image(image_data_uri), |
| env_prefix="GOT_OCR", |
| runtime_device=runtime_device, |
| ) |
| raw_output = _run_got_ocr_backend( |
| model_obj, |
| processor, |
| page_image, |
| model_device=model_device, |
| ) |
| return _require_nonempty_ocr_text_output( |
| raw_output, |
| backend_label="GOT-OCR local backend", |
| details={"model_id": resolved_model, "device": runtime_device}, |
| ) |
|
|
|
|
| def transcribe_table_image_with_monkeyocr( |
| image_data_uri: str, |
| *, |
| model_id: Optional[str] = None, |
| prompt: Optional[str] = None, |
| progress_handler: ProgressHandler = None, |
| ) -> str: |
| resolved_model = resolve_monkeyocr_model_id(model_id) |
| sec_parser = load_sec_parser_module() |
| page_image = sec_parser._decode_data_uri_to_pil_image(image_data_uri) |
| prompt_text = prompt or default_table_image_transcription_prompt() |
| raw_output = _run_monkeyocr_command( |
| page_image, |
| prompt_text, |
| model_id=resolved_model, |
| progress_handler=progress_handler, |
| ) |
| return _require_nonempty_ocr_text_output( |
| raw_output, |
| backend_label="MonkeyOCR local wrapper", |
| details={"model_id": resolved_model}, |
| ) |
|
|
|
|
| def _pdf_fastpath_raster_fallback_model_id(explicit_model_id: Optional[str] = None) -> str: |
| candidates = [ |
| normalize_text(explicit_model_id or ""), |
| normalize_text(os.getenv("PDF_FASTPATH_RASTER_OCR_MODEL_ID", "")), |
| normalize_text(os.getenv("OPENROUTER_MODEL_ID", "")), |
| normalize_text(os.getenv("DEEPSEEK_OCR_MODEL_ID", "")), |
| normalize_text(os.getenv("FIRERED_MODEL_ID", "")), |
| normalize_text(os.getenv("GEMMA4_LLAMA_CPP_MODEL_ID", "")), |
| ] |
| for candidate in candidates: |
| if candidate and candidate.lower() != "pdf_native_fastpath": |
| return candidate |
| raise RuntimeError( |
| "Raster PDF fast-path fallback needs an OCR model id. Pass ocr_model_id=... " |
| "or set PDF_FASTPATH_RASTER_OCR_MODEL_ID / OPENROUTER_MODEL_ID." |
| ) |
|
|
|
|
| def _pdf_fastpath_raster_render_zoom() -> float: |
| raw_value = normalize_text(os.getenv("PDF_FASTPATH_RASTER_RENDER_ZOOM", "2.0")) |
| try: |
| return max(1.0, float(raw_value)) |
| except ValueError: |
| return 2.0 |
|
|
|
|
| def _pdf_fastpath_raster_padding_pt() -> float: |
| raw_value = normalize_text(os.getenv("PDF_FASTPATH_RASTER_PADDING_PT", "6.0")) |
| try: |
| return max(0.0, float(raw_value)) |
| except ValueError: |
| return 6.0 |
|
|
|
|
| def _pdf_fastpath_raster_max_tables() -> int: |
| return _env_int("PDF_FASTPATH_RASTER_MAX_TABLES", 3) |
|
|
|
|
| def _pdf_fastpath_page_render_zoom() -> float: |
| raw_value = normalize_text(os.getenv("PDF_FASTPATH_PAGE_RENDER_ZOOM", "2.0")) |
| try: |
| return max(1.0, float(raw_value)) |
| except ValueError: |
| return 2.0 |
|
|
|
|
| def _render_pdf_page_data_uri( |
| page: Any, |
| *, |
| zoom: float, |
| ) -> str: |
| try: |
| import fitz |
| except ImportError as exc: |
| raise RuntimeError("PyMuPDF is required for PDF page rendering.") from exc |
|
|
| pix = page.get_pixmap(matrix=fitz.Matrix(float(zoom), float(zoom)), alpha=False) |
| return f"data:image/png;base64,{base64.b64encode(pix.tobytes('png')).decode('utf-8')}" |
|
|
|
|
| def _render_pdf_table_region_data_uri( |
| page: Any, |
| *, |
| bbox: List[float] | tuple[float, float, float, float], |
| zoom: float, |
| padding_pt: float, |
| ) -> str: |
| try: |
| import fitz |
| except ImportError as exc: |
| raise RuntimeError("PyMuPDF is required for PDF fast-path raster fallback rendering.") from exc |
|
|
| page_rect = page.rect |
| left = max(float(page_rect.x0), float(bbox[0]) - padding_pt) |
| top = max(float(page_rect.y0), float(bbox[1]) - padding_pt) |
| right = min(float(page_rect.x1), float(bbox[2]) + padding_pt) |
| bottom = min(float(page_rect.y1), float(bbox[3]) + padding_pt) |
| clip_rect = fitz.Rect(left, top, right, bottom) |
| pix = page.get_pixmap(matrix=fitz.Matrix(float(zoom), float(zoom)), clip=clip_rect, alpha=False) |
| return f"data:image/png;base64,{base64.b64encode(pix.tobytes('png')).decode('utf-8')}" |
|
|
|
|
| def _normalize_table_match_text(text: str) -> str: |
| normalized = html.unescape(normalize_text(text)) |
| normalized = re.sub(r"\s+", " ", normalized) |
| return normalized.strip().lower() |
|
|
|
|
| def _bbox_to_attr_value(bbox: Any) -> str: |
| if not isinstance(bbox, (list, tuple)) or len(bbox) != 4: |
| return "" |
| try: |
| return ",".join(f"{float(value):.2f}" for value in bbox) |
| except (TypeError, ValueError): |
| return "" |
|
|
|
|
| def _sequence_similarity(left_text: str, right_text: str) -> float: |
| left_normalized = _normalize_table_match_text(left_text) |
| right_normalized = _normalize_table_match_text(right_text) |
| if not left_normalized or not right_normalized: |
| return 0.0 |
| return float(difflib.SequenceMatcher(None, left_normalized, right_normalized).ratio()) |
|
|
|
|
| def _table_text_overlap_score(left_texts: List[str], right_texts: List[str]) -> float: |
| left_set = {_normalize_table_match_text(text) for text in left_texts if _normalize_table_match_text(text)} |
| right_set = {_normalize_table_match_text(text) for text in right_texts if _normalize_table_match_text(text)} |
| if not left_set or not right_set: |
| return 0.0 |
| intersection = len(left_set & right_set) |
| union = max(1, len(left_set | right_set)) |
| return float(intersection) / float(union) |
|
|
|
|
| def _parse_html_table_cells(table_tag: Any, *, table_index: int) -> Dict[str, Any]: |
| occupied: Dict[Tuple[int, int], bool] = {} |
| cells: List[Dict[str, Any]] = [] |
| row_tags = list(table_tag.find_all("tr")) |
| max_column = 0 |
| for row_index, row_tag in enumerate(row_tags): |
| column_index = 0 |
| for cell_tag in row_tag.find_all(["th", "td"], recursive=False): |
| while occupied.get((row_index, column_index)): |
| column_index += 1 |
| try: |
| rowspan = max(1, int(cell_tag.get("rowspan", 1))) |
| except (TypeError, ValueError): |
| rowspan = 1 |
| try: |
| colspan = max(1, int(cell_tag.get("colspan", 1))) |
| except (TypeError, ValueError): |
| colspan = 1 |
| for row_offset in range(rowspan): |
| for column_offset in range(colspan): |
| occupied[(row_index + row_offset, column_index + column_offset)] = True |
| cell_text = normalize_text(cell_tag.get_text(" ", strip=True)) |
| cells.append( |
| { |
| "row": row_index, |
| "col": column_index, |
| "rowspan": rowspan, |
| "colspan": colspan, |
| "text": cell_text, |
| "match_text": _normalize_table_match_text(cell_text), |
| "tag": cell_tag, |
| } |
| ) |
| max_column = max(max_column, column_index + colspan) |
| column_index += colspan |
| return { |
| "index": table_index, |
| "tag": table_tag, |
| "cells": cells, |
| "texts": [cell["text"] for cell in cells if normalize_text(cell["text"])], |
| "row_count": len(row_tags), |
| "column_count": max_column, |
| "match_text": " | ".join( |
| _normalize_table_match_text(cell["text"]) |
| for cell in cells |
| if _normalize_table_match_text(cell["text"]) |
| ), |
| } |
|
|
|
|
| def _build_native_table_descriptor(table_payload: Dict[str, Any], *, table_index: int) -> Dict[str, Any]: |
| cells = [ |
| cell |
| for cell in list(table_payload.get("cells") or []) |
| if isinstance(cell, dict) |
| ] |
| row_count = max( |
| ( |
| int(cell.get("row", 0)) + max(1, int(cell.get("rowspan", 1))) |
| for cell in cells |
| ), |
| default=0, |
| ) |
| column_count = max( |
| ( |
| int(cell.get("col", 0)) + max(1, int(cell.get("colspan", 1))) |
| for cell in cells |
| ), |
| default=0, |
| ) |
| texts = [normalize_text(str(cell.get("text", ""))) for cell in cells if normalize_text(str(cell.get("text", "")))] |
| for cell in cells: |
| cell["match_text"] = _normalize_table_match_text(str(cell.get("text", ""))) |
| return { |
| "index": table_index, |
| "bbox": list(table_payload.get("bbox") or []), |
| "source": normalize_text(str(table_payload.get("source", ""))), |
| "cells": cells, |
| "texts": texts, |
| "row_count": row_count, |
| "column_count": column_count, |
| "match_text": " | ".join(cell["match_text"] for cell in cells if cell["match_text"]), |
| } |
|
|
|
|
| def _score_html_table_match(html_table: Dict[str, Any], native_table: Dict[str, Any]) -> float: |
| text_score = _sequence_similarity(str(html_table.get("match_text", "")), str(native_table.get("match_text", ""))) |
| overlap_score = _table_text_overlap_score( |
| list(html_table.get("texts") or []), |
| list(native_table.get("texts") or []), |
| ) |
| row_score = 1.0 / (1.0 + abs(int(html_table.get("row_count", 0)) - int(native_table.get("row_count", 0)))) |
| column_score = 1.0 / (1.0 + abs(int(html_table.get("column_count", 0)) - int(native_table.get("column_count", 0)))) |
| return (0.55 * text_score) + (0.20 * overlap_score) + (0.15 * row_score) + (0.10 * column_score) |
|
|
|
|
| def _match_html_tables_to_native_tables( |
| html_tables: List[Dict[str, Any]], |
| native_tables: List[Dict[str, Any]], |
| ) -> Dict[int, Dict[str, Any]]: |
| scored_pairs: List[Tuple[float, int, int]] = [] |
| for html_table in html_tables: |
| for native_table in native_tables: |
| scored_pairs.append( |
| ( |
| _score_html_table_match(html_table, native_table), |
| int(html_table["index"]), |
| int(native_table["index"]), |
| ) |
| ) |
| scored_pairs.sort(reverse=True) |
| assignments: Dict[int, Dict[str, Any]] = {} |
| used_html: set[int] = set() |
| used_native: set[int] = set() |
| for score, html_index, native_index in scored_pairs: |
| if score <= 0.10 or html_index in used_html or native_index in used_native: |
| continue |
| assignments[html_index] = { |
| "native_index": native_index, |
| "score": float(score), |
| } |
| used_html.add(html_index) |
| used_native.add(native_index) |
| return assignments |
|
|
|
|
| def _grid_iou(left_cell: Dict[str, Any], right_cell: Dict[str, Any]) -> float: |
| left_row_start = int(left_cell.get("row", 0)) |
| left_row_end = left_row_start + max(1, int(left_cell.get("rowspan", 1))) |
| left_col_start = int(left_cell.get("col", 0)) |
| left_col_end = left_col_start + max(1, int(left_cell.get("colspan", 1))) |
| right_row_start = int(right_cell.get("row", 0)) |
| right_row_end = right_row_start + max(1, int(right_cell.get("rowspan", 1))) |
| right_col_start = int(right_cell.get("col", 0)) |
| right_col_end = right_col_start + max(1, int(right_cell.get("colspan", 1))) |
|
|
| row_overlap = max(0, min(left_row_end, right_row_end) - max(left_row_start, right_row_start)) |
| col_overlap = max(0, min(left_col_end, right_col_end) - max(left_col_start, right_col_start)) |
| intersection = row_overlap * col_overlap |
| left_area = max(1, left_row_end - left_row_start) * max(1, left_col_end - left_col_start) |
| right_area = max(1, right_row_end - right_row_start) * max(1, right_col_end - right_col_start) |
| union = max(1, left_area + right_area - intersection) |
| return float(intersection) / float(union) |
|
|
|
|
| def _score_html_cell_match(html_cell: Dict[str, Any], native_cell: Dict[str, Any]) -> float: |
| text_score = _sequence_similarity(str(html_cell.get("text", "")), str(native_cell.get("text", ""))) |
| grid_score = _grid_iou(html_cell, native_cell) |
| same_anchor_score = ( |
| 1.0 |
| if ( |
| int(html_cell.get("row", -1)) == int(native_cell.get("row", -2)) |
| and int(html_cell.get("col", -1)) == int(native_cell.get("col", -2)) |
| ) |
| else 0.0 |
| ) |
| span_score = 1.0 / ( |
| 1.0 |
| + abs(int(html_cell.get("rowspan", 1)) - int(native_cell.get("rowspan", 1))) |
| + abs(int(html_cell.get("colspan", 1)) - int(native_cell.get("colspan", 1))) |
| ) |
| if not _normalize_table_match_text(str(html_cell.get("text", ""))) or not _normalize_table_match_text(str(native_cell.get("text", ""))): |
| return (0.65 * grid_score) + (0.20 * same_anchor_score) + (0.15 * span_score) |
| return (0.50 * text_score) + (0.25 * grid_score) + (0.15 * same_anchor_score) + (0.10 * span_score) |
|
|
|
|
| def _match_html_cells_to_native_cells( |
| html_table: Dict[str, Any], |
| native_table: Dict[str, Any], |
| ) -> Dict[int, Dict[str, Any]]: |
| matches: Dict[int, Dict[str, Any]] = {} |
| native_cells = list(native_table.get("cells") or []) |
| unused_native_indexes = set(range(len(native_cells))) |
| exact_key_to_indexes: Dict[Tuple[int, int, int, int], List[int]] = {} |
| for index, native_cell in enumerate(native_cells): |
| key = ( |
| int(native_cell.get("row", 0)), |
| int(native_cell.get("col", 0)), |
| max(1, int(native_cell.get("rowspan", 1))), |
| max(1, int(native_cell.get("colspan", 1))), |
| ) |
| exact_key_to_indexes.setdefault(key, []).append(index) |
|
|
| for html_cell_index, html_cell in enumerate(list(html_table.get("cells") or [])): |
| key = ( |
| int(html_cell.get("row", 0)), |
| int(html_cell.get("col", 0)), |
| max(1, int(html_cell.get("rowspan", 1))), |
| max(1, int(html_cell.get("colspan", 1))), |
| ) |
| exact_indexes = [index for index in exact_key_to_indexes.get(key, []) if index in unused_native_indexes] |
| if exact_indexes: |
| native_index = exact_indexes[0] |
| unused_native_indexes.remove(native_index) |
| matches[html_cell_index] = { |
| "native_cell": native_cells[native_index], |
| "score": 1.0, |
| } |
|
|
| for html_cell_index, html_cell in enumerate(list(html_table.get("cells") or [])): |
| if html_cell_index in matches: |
| continue |
| best_native_index: Optional[int] = None |
| best_score = 0.0 |
| for native_index in unused_native_indexes: |
| candidate_score = _score_html_cell_match(html_cell, native_cells[native_index]) |
| if candidate_score <= best_score: |
| continue |
| best_score = candidate_score |
| best_native_index = native_index |
| if best_native_index is None or best_score <= 0.15: |
| continue |
| unused_native_indexes.remove(best_native_index) |
| matches[html_cell_index] = { |
| "native_cell": native_cells[best_native_index], |
| "score": float(best_score), |
| } |
| return matches |
|
|
|
|
| def _soup_fragment_contents(soup: Any) -> str: |
| body = getattr(soup, "body", None) |
| if body is not None: |
| return body.decode_contents() |
| return str(soup) |
|
|
|
|
| def _cell_markup_has_tag(markup: str, *tag_names: str) -> bool: |
| if not markup: |
| return False |
| pattern = r"<(?:%s)\b" % "|".join(re.escape(tag_name) for tag_name in tag_names) |
| return bool(re.search(pattern, markup, flags=re.IGNORECASE)) |
|
|
|
|
| def _style_attr_text(value: Any) -> str: |
| return re.sub(r"\s+", " ", str(value or "")).strip().lower() |
|
|
|
|
| def _tag_and_descendants(tag: Any) -> List[Any]: |
| tags = [tag] |
| if hasattr(tag, "find_all"): |
| try: |
| tags.extend(list(tag.find_all(True))) |
| except Exception: |
| pass |
| return tags |
|
|
|
|
| def _tag_descendants_have_css_style(tag: Any, pattern: str) -> bool: |
| compiled = re.compile(pattern, re.IGNORECASE) |
| for candidate in _tag_and_descendants(tag): |
| style_text = "" |
| if hasattr(candidate, "get"): |
| style_text = _style_attr_text(candidate.get("style", "")) |
| if style_text and compiled.search(style_text): |
| return True |
| return False |
|
|
|
|
| def _tag_has_bold_style(tag: Any) -> bool: |
| return _cell_markup_has_tag(str(tag), "strong", "b") or _tag_descendants_have_css_style( |
| tag, |
| r"font-weight\s*:\s*(?:bold|bolder|[6-9]00)\b", |
| ) |
|
|
|
|
| def _tag_has_italic_style(tag: Any) -> bool: |
| return _cell_markup_has_tag(str(tag), "em", "i") or _tag_descendants_have_css_style( |
| tag, |
| r"font-style\s*:\s*(?:italic|oblique)\b", |
| ) |
|
|
|
|
| def _tag_has_underline_style(tag: Any) -> bool: |
| return _cell_markup_has_tag(str(tag), "u") or _tag_descendants_have_css_style( |
| tag, |
| r"text-decoration(?:-line)?\s*:[^;]*underline\b", |
| ) |
|
|
|
|
| def _replace_tag_contents_with_html(tag: Any, html_fragment: str) -> None: |
| try: |
| from bs4 import BeautifulSoup |
| except ImportError as exc: |
| raise RuntimeError("beautifulsoup4 is required for HTML fragment replacement.") from exc |
|
|
| fragment_soup = BeautifulSoup(f"<body>{html_fragment}</body>", "html.parser") |
| body = getattr(fragment_soup, "body", None) |
| replacement_nodes = list(body.contents if body is not None else fragment_soup.contents) |
| tag.clear() |
| for node in replacement_nodes: |
| tag.append(node) |
|
|
|
|
| def _wrap_cell_markup_with_style_tags(markup: str, *, bold: bool, italic: bool, underline: bool) -> str: |
| rendered = str(markup or "") |
| if bold and not _cell_markup_has_tag(rendered, "strong", "b"): |
| rendered = f"<strong>{rendered}</strong>" |
| if italic and not _cell_markup_has_tag(rendered, "em", "i"): |
| rendered = f"<em>{rendered}</em>" |
| if underline and not _cell_markup_has_tag(rendered, "u"): |
| rendered = f"<u>{rendered}</u>" |
| return rendered |
|
|
|
|
| def _style_overlay_min_match_score() -> float: |
| return _env_float("PDF_STYLE_OVERLAY_MIN_MATCH_SCORE", 0.75, min_value=0.0, max_value=1.0) |
|
|
|
|
| def _style_overlay_min_text_similarity() -> float: |
| return _env_float("PDF_STYLE_OVERLAY_MIN_TEXT_SIMILARITY", 0.90, min_value=0.0, max_value=1.0) |
|
|
|
|
| def _style_overlay_bold_min_match_score() -> float: |
| return _env_float("PDF_STYLE_OVERLAY_BOLD_MIN_MATCH_SCORE", 0.90, min_value=0.0, max_value=1.0) |
|
|
|
|
| def _style_overlay_bold_min_text_similarity() -> float: |
| return _env_float("PDF_STYLE_OVERLAY_BOLD_MIN_TEXT_SIMILARITY", 0.97, min_value=0.0, max_value=1.0) |
|
|
|
|
| def _resolved_style_overlay_flags( |
| *, |
| current_text: str, |
| native_text: str, |
| match_score: Optional[float], |
| bold: bool, |
| italic: bool, |
| underline: bool, |
| ) -> Dict[str, bool]: |
| normalized_current = _normalize_table_match_text(current_text) |
| normalized_native = _normalize_table_match_text(native_text) |
| if not normalized_current or not normalized_native: |
| return { |
| "bold": False, |
| "italic": False, |
| "underline": False, |
| } |
|
|
| score = max(0.0, min(1.0, float(match_score))) if isinstance(match_score, (int, float)) else 0.0 |
| text_similarity = _sequence_similarity(current_text, native_text) |
| texts_match = normalized_current == normalized_native |
|
|
| general_confident = score >= _style_overlay_min_match_score() and ( |
| texts_match or text_similarity >= _style_overlay_min_text_similarity() |
| ) |
| bold_confident = score >= _style_overlay_bold_min_match_score() and ( |
| texts_match or text_similarity >= _style_overlay_bold_min_text_similarity() |
| ) |
|
|
| return { |
| "bold": bool(bold) and bold_confident, |
| "italic": bool(italic) and general_confident, |
| "underline": bool(underline) and general_confident, |
| } |
|
|
|
|
| def _resolve_style_overlay_mode(style_overlay_mode: Optional[str]) -> str: |
| normalized = normalize_text(style_overlay_mode or "auto").lower() or "auto" |
| if normalized in {"none", "attrs_only", "formatting_only", "auto", "aggressive"}: |
| return normalized |
| return "auto" |
|
|
|
|
| def _apply_native_style_overlay_to_cell( |
| cell_tag: Any, |
| *, |
| html_cell: Dict[str, Any], |
| native_cell: Dict[str, Any], |
| style_overlay_mode: str, |
| match_score: Optional[float] = None, |
| ) -> str: |
| current_markup = str(cell_tag.decode_contents() or "") |
| native_html = normalize_text(str(native_cell.get("html", ""))) |
| current_text = str(html_cell.get("text", "")) |
| native_text = str(native_cell.get("text", "")) |
| text_similarity = _sequence_similarity(current_text, native_text) |
| texts_match = _normalize_table_match_text(current_text) == _normalize_table_match_text(native_text) |
| style_flags = _resolved_style_overlay_flags( |
| current_text=current_text, |
| native_text=native_text, |
| match_score=match_score, |
| bold=bool(native_cell.get("bold")), |
| italic=bool(native_cell.get("italic")), |
| underline=bool(native_cell.get("underline")), |
| ) |
| uniform_style_markup = _wrap_cell_markup_with_style_tags( |
| current_markup, |
| bold=style_flags["bold"], |
| italic=style_flags["italic"], |
| underline=style_flags["underline"], |
| ) |
|
|
| if style_overlay_mode == "none": |
| return current_markup |
| if style_overlay_mode == "attrs_only": |
| return current_markup |
| if style_overlay_mode == "formatting_only": |
| if uniform_style_markup != current_markup: |
| _replace_tag_contents_with_html(cell_tag, uniform_style_markup) |
| return str(cell_tag.decode_contents() or "") |
| if style_overlay_mode == "aggressive" and native_html: |
| _replace_tag_contents_with_html(cell_tag, native_html) |
| return str(cell_tag.decode_contents() or "") |
| if style_overlay_mode == "auto" and native_html and (texts_match or text_similarity >= 0.92): |
| _replace_tag_contents_with_html(cell_tag, native_html) |
| return str(cell_tag.decode_contents() or "") |
| if uniform_style_markup != current_markup: |
| _replace_tag_contents_with_html(cell_tag, uniform_style_markup) |
| return str(cell_tag.decode_contents() or "") |
|
|
|
|
| def annotate_table_html_with_pdf_cells( |
| table_html: str, |
| *, |
| pdf_path: str | Path, |
| page_number: int, |
| native_payload: Optional[Dict[str, Any]] = None, |
| style_overlay_mode: Optional[str] = None, |
| progress_handler: ProgressHandler = None, |
| ) -> Dict[str, Any]: |
| try: |
| import pdf_table_fastpath |
| except ImportError as exc: |
| raise RuntimeError( |
| "The pdf_table_fastpath module is required for PDF cell/style annotation." |
| ) from exc |
| try: |
| from bs4 import BeautifulSoup |
| except ImportError as exc: |
| raise RuntimeError( |
| "beautifulsoup4 is required for annotating Mistral HTML with PDF cell metadata." |
| ) from exc |
|
|
| resolved_pdf_path = Path(pdf_path).resolve() |
| resolved_page_number = max(1, int(page_number)) |
| resolved_style_overlay_mode = _resolve_style_overlay_mode(style_overlay_mode) |
| started_at = time.perf_counter() |
| if native_payload is None: |
| emit_progress( |
| f"[pdf-cell-overlay] {resolved_pdf_path.name} | page {resolved_page_number} | extract native cells", |
| progress_handler=progress_handler, |
| ) |
| native_payload = pdf_table_fastpath.extract_tables_from_pdf_page( |
| resolved_pdf_path, |
| page_number=resolved_page_number, |
| ) |
|
|
| soup = BeautifulSoup(str(table_html or ""), "html.parser") |
| html_tables = [ |
| _parse_html_table_cells(table_tag, table_index=index) |
| for index, table_tag in enumerate(soup.find_all("table")) |
| ] |
| native_tables = [ |
| _build_native_table_descriptor(table_payload, table_index=index) |
| for index, table_payload in enumerate(list((native_payload or {}).get("tables") or [])) |
| if isinstance(table_payload, dict) |
| ] |
|
|
| annotated_tables: List[Dict[str, Any]] = [] |
| if html_tables and native_tables and normalize_text(str((native_payload or {}).get("mode", ""))) == "pdf_native": |
| table_matches = _match_html_tables_to_native_tables(html_tables, native_tables) |
| for html_table in html_tables: |
| match = table_matches.get(int(html_table["index"])) |
| if match is None: |
| annotated_tables.append( |
| { |
| "table_index": int(html_table["index"]), |
| "matched": False, |
| "cells": [], |
| } |
| ) |
| continue |
|
|
| native_table = native_tables[int(match["native_index"])] |
| table_tag = html_table["tag"] |
| table_bbox_attr = _bbox_to_attr_value(native_table.get("bbox")) |
| table_tag["data-pdf-native-index"] = str(native_table["index"]) |
| table_tag["data-pdf-match-score"] = f"{float(match['score']):.3f}" |
| if table_bbox_attr: |
| table_tag["data-pdf-bbox"] = table_bbox_attr |
| if normalize_text(str(native_table.get("source", ""))): |
| table_tag["data-pdf-source"] = str(native_table["source"]) |
|
|
| cell_matches = _match_html_cells_to_native_cells(html_table, native_table) |
| annotated_cells: List[Dict[str, Any]] = [] |
| for html_cell_index, html_cell in enumerate(list(html_table.get("cells") or [])): |
| cell_tag = html_cell["tag"] |
| matched_payload = cell_matches.get(html_cell_index) |
| if matched_payload is None: |
| cell_tag["data-pdf-matched"] = "0" |
| annotated_cells.append( |
| { |
| "row": int(html_cell["row"]), |
| "col": int(html_cell["col"]), |
| "rowspan": int(html_cell["rowspan"]), |
| "colspan": int(html_cell["colspan"]), |
| "text": str(html_cell["text"]), |
| "matched": False, |
| } |
| ) |
| continue |
|
|
| native_cell = matched_payload["native_cell"] |
| cell_tag["data-pdf-matched"] = "1" |
| cell_tag["data-pdf-match-score"] = f"{float(matched_payload['score']):.3f}" |
| bbox_attr = _bbox_to_attr_value(native_cell.get("bbox")) |
| if bbox_attr: |
| cell_tag["data-pdf-bbox"] = bbox_attr |
| cell_tag["data-pdf-native-row"] = str(int(native_cell.get("row", 0))) |
| cell_tag["data-pdf-native-col"] = str(int(native_cell.get("col", 0))) |
| cell_tag["data-pdf-bold"] = "1" if bool(native_cell.get("bold")) else "0" |
| cell_tag["data-pdf-italic"] = "1" if bool(native_cell.get("italic")) else "0" |
| cell_tag["data-pdf-underline"] = "1" if bool(native_cell.get("underline")) else "0" |
| cell_tag["data-pdf-header"] = "1" if bool(native_cell.get("header")) else "0" |
| styled_html = _apply_native_style_overlay_to_cell( |
| cell_tag, |
| html_cell=html_cell, |
| native_cell=native_cell, |
| style_overlay_mode=resolved_style_overlay_mode, |
| match_score=float(matched_payload["score"]), |
| ) |
| annotated_cells.append( |
| { |
| "row": int(html_cell["row"]), |
| "col": int(html_cell["col"]), |
| "rowspan": int(html_cell["rowspan"]), |
| "colspan": int(html_cell["colspan"]), |
| "text": str(html_cell["text"]), |
| "matched": True, |
| "match_score": round(float(matched_payload["score"]), 4), |
| "bbox": list(native_cell.get("bbox") or []), |
| "native_row": int(native_cell.get("row", 0)), |
| "native_col": int(native_cell.get("col", 0)), |
| "bold": bool(native_cell.get("bold")), |
| "italic": bool(native_cell.get("italic")), |
| "underline": bool(native_cell.get("underline")), |
| "header": bool(native_cell.get("header")), |
| "styled_html": styled_html, |
| } |
| ) |
| annotated_tables.append( |
| { |
| "table_index": int(html_table["index"]), |
| "matched": True, |
| "match_score": round(float(match["score"]), 4), |
| "native_table_index": int(native_table["index"]), |
| "bbox": list(native_table.get("bbox") or []), |
| "source": str(native_table.get("source", "")), |
| "cells": annotated_cells, |
| } |
| ) |
|
|
| elapsed_ms = (time.perf_counter() - started_at) * 1000.0 |
| return { |
| "html": _soup_fragment_contents(soup), |
| "mode": normalize_text(str((native_payload or {}).get("mode", ""))) or None, |
| "tables": annotated_tables, |
| "native_tables": native_tables, |
| "style_overlay_mode": resolved_style_overlay_mode, |
| "timings_ms": { |
| "annotation_ms": round(float(elapsed_ms), 2), |
| }, |
| } |
|
|
|
|
| def _build_native_html_table_descriptor(table_tag: Any, *, table_index: int) -> Dict[str, Any]: |
| parsed = _parse_html_table_cells(table_tag, table_index=table_index) |
| native_cells: List[Dict[str, Any]] = [] |
| texts: List[str] = [] |
| for cell in list(parsed.get("cells") or []): |
| source_tag = cell["tag"] |
| bold = _tag_has_bold_style(source_tag) |
| italic = _tag_has_italic_style(source_tag) |
| underline = _tag_has_underline_style(source_tag) |
| rendered_html = _wrap_cell_markup_with_style_tags( |
| str(source_tag.decode_contents() or ""), |
| bold=bold, |
| italic=italic, |
| underline=underline, |
| ) |
| text = normalize_text(str(cell.get("text", ""))) |
| if text: |
| texts.append(text) |
| native_cells.append( |
| { |
| "row": int(cell["row"]), |
| "col": int(cell["col"]), |
| "rowspan": int(cell["rowspan"]), |
| "colspan": int(cell["colspan"]), |
| "text": text, |
| "html": rendered_html, |
| "bold": bold, |
| "italic": italic, |
| "underline": underline, |
| "header": normalize_text(getattr(source_tag, "name", "")).lower() == "th", |
| "match_text": _normalize_table_match_text(text), |
| } |
| ) |
| return { |
| "index": table_index, |
| "source": "html_native", |
| "cells": native_cells, |
| "texts": texts, |
| "row_count": int(parsed.get("row_count", 0)), |
| "column_count": int(parsed.get("column_count", 0)), |
| "match_text": str(parsed.get("match_text", "")), |
| } |
|
|
|
|
| def annotate_table_html_with_native_html( |
| table_html: str, |
| *, |
| native_html: str, |
| style_overlay_mode: Optional[str] = None, |
| ) -> Dict[str, Any]: |
| try: |
| from bs4 import BeautifulSoup |
| except ImportError as exc: |
| raise RuntimeError( |
| "beautifulsoup4 is required for annotating OCR HTML with native HTML style metadata." |
| ) from exc |
|
|
| resolved_style_overlay_mode = _resolve_style_overlay_mode(style_overlay_mode) |
| started_at = time.perf_counter() |
|
|
| soup = BeautifulSoup(str(table_html or ""), "html.parser") |
| native_soup = BeautifulSoup(str(native_html or ""), "html.parser") |
| html_tables = [ |
| _parse_html_table_cells(table_tag, table_index=index) |
| for index, table_tag in enumerate(soup.find_all("table")) |
| ] |
| native_tables = [ |
| _build_native_html_table_descriptor(table_tag, table_index=index) |
| for index, table_tag in enumerate(native_soup.find_all("table")) |
| ] |
|
|
| annotated_tables: List[Dict[str, Any]] = [] |
| if html_tables and native_tables: |
| table_matches = _match_html_tables_to_native_tables(html_tables, native_tables) |
| for html_table in html_tables: |
| match = table_matches.get(int(html_table["index"])) |
| if match is None: |
| annotated_tables.append( |
| { |
| "table_index": int(html_table["index"]), |
| "matched": False, |
| "cells": [], |
| } |
| ) |
| continue |
|
|
| native_table = native_tables[int(match["native_index"])] |
| table_tag = html_table["tag"] |
| table_tag["data-native-index"] = str(native_table["index"]) |
| table_tag["data-native-match-score"] = f"{float(match['score']):.3f}" |
| if normalize_text(str(native_table.get("source", ""))): |
| table_tag["data-native-source"] = str(native_table["source"]) |
|
|
| cell_matches = _match_html_cells_to_native_cells(html_table, native_table) |
| annotated_cells: List[Dict[str, Any]] = [] |
| for html_cell_index, html_cell in enumerate(list(html_table.get("cells") or [])): |
| cell_tag = html_cell["tag"] |
| matched_payload = cell_matches.get(html_cell_index) |
| if matched_payload is None: |
| cell_tag["data-native-matched"] = "0" |
| annotated_cells.append( |
| { |
| "row": int(html_cell["row"]), |
| "col": int(html_cell["col"]), |
| "rowspan": int(html_cell["rowspan"]), |
| "colspan": int(html_cell["colspan"]), |
| "text": str(html_cell["text"]), |
| "matched": False, |
| } |
| ) |
| continue |
|
|
| native_cell = matched_payload["native_cell"] |
| cell_tag["data-native-matched"] = "1" |
| cell_tag["data-native-match-score"] = f"{float(matched_payload['score']):.3f}" |
| cell_tag["data-native-row"] = str(int(native_cell.get("row", 0))) |
| cell_tag["data-native-col"] = str(int(native_cell.get("col", 0))) |
| cell_tag["data-native-bold"] = "1" if bool(native_cell.get("bold")) else "0" |
| cell_tag["data-native-italic"] = "1" if bool(native_cell.get("italic")) else "0" |
| cell_tag["data-native-underline"] = "1" if bool(native_cell.get("underline")) else "0" |
| cell_tag["data-native-header"] = "1" if bool(native_cell.get("header")) else "0" |
| styled_html = _apply_native_style_overlay_to_cell( |
| cell_tag, |
| html_cell=html_cell, |
| native_cell=native_cell, |
| style_overlay_mode=resolved_style_overlay_mode, |
| match_score=float(matched_payload["score"]), |
| ) |
| annotated_cells.append( |
| { |
| "row": int(html_cell["row"]), |
| "col": int(html_cell["col"]), |
| "rowspan": int(html_cell["rowspan"]), |
| "colspan": int(html_cell["colspan"]), |
| "text": str(html_cell["text"]), |
| "matched": True, |
| "match_score": round(float(matched_payload["score"]), 4), |
| "native_row": int(native_cell.get("row", 0)), |
| "native_col": int(native_cell.get("col", 0)), |
| "bold": bool(native_cell.get("bold")), |
| "italic": bool(native_cell.get("italic")), |
| "underline": bool(native_cell.get("underline")), |
| "header": bool(native_cell.get("header")), |
| "styled_html": styled_html, |
| } |
| ) |
| annotated_tables.append( |
| { |
| "table_index": int(html_table["index"]), |
| "matched": True, |
| "match_score": round(float(match["score"]), 4), |
| "native_table_index": int(native_table["index"]), |
| "source": str(native_table.get("source", "")), |
| "cells": annotated_cells, |
| } |
| ) |
|
|
| elapsed_ms = (time.perf_counter() - started_at) * 1000.0 |
| return { |
| "html": _soup_fragment_contents(soup), |
| "mode": "html_native", |
| "tables": annotated_tables, |
| "native_tables": native_tables, |
| "style_overlay_mode": resolved_style_overlay_mode, |
| "timings_ms": { |
| "annotation_ms": round(float(elapsed_ms), 2), |
| }, |
| } |
|
|
|
|
| def overlay_table_html_with_native_html( |
| table_html: str, |
| *, |
| native_html: str, |
| effective_model_id: Optional[str] = None, |
| style_overlay_mode: Optional[str] = None, |
| timings_ms: Optional[Dict[str, Any]] = None, |
| ) -> Dict[str, Any]: |
| annotation = annotate_table_html_with_native_html( |
| table_html, |
| native_html=native_html, |
| style_overlay_mode=style_overlay_mode, |
| ) |
| matched_table_count = sum(1 for table in annotation["tables"] if bool(table.get("matched"))) |
| resolved_timings_ms: Dict[str, Any] = {} |
| for key, value in dict(timings_ms or {}).items(): |
| if isinstance(value, (int, float)): |
| resolved_timings_ms[str(key)] = round(float(value), 2) |
| annotation_ms = annotation.get("timings_ms", {}).get("annotation_ms") |
| if isinstance(annotation_ms, (int, float)): |
| resolved_timings_ms["annotation_ms"] = round(float(annotation_ms), 2) |
| return { |
| "html": str(annotation["html"]), |
| "raw_html": strip_code_fences(str(table_html)), |
| "tables": list(annotation["tables"]), |
| "overlay_mode": annotation.get("mode"), |
| "overlay_applied": bool(matched_table_count), |
| "style_overlay_mode": annotation.get("style_overlay_mode"), |
| "effective_model_id": normalize_text(effective_model_id or ""), |
| "timings_ms": resolved_timings_ms, |
| "native_payload": { |
| "mode": annotation.get("mode"), |
| "tables": annotation.get("native_tables"), |
| }, |
| } |
|
|
|
|
| def overlay_pdf_page_html_with_native_cells( |
| table_html: str, |
| *, |
| pdf_path: str | Path, |
| page_number: int, |
| effective_model_id: Optional[str] = None, |
| native_payload: Optional[Dict[str, Any]] = None, |
| style_overlay_mode: Optional[str] = None, |
| timings_ms: Optional[Dict[str, Any]] = None, |
| progress_handler: ProgressHandler = None, |
| ) -> Dict[str, Any]: |
| annotation = annotate_table_html_with_pdf_cells( |
| table_html, |
| pdf_path=pdf_path, |
| page_number=page_number, |
| native_payload=native_payload, |
| style_overlay_mode=style_overlay_mode, |
| progress_handler=progress_handler, |
| ) |
| matched_table_count = sum(1 for table in annotation["tables"] if bool(table.get("matched"))) |
| resolved_timings_ms: Dict[str, Any] = {} |
| for key, value in dict(timings_ms or {}).items(): |
| if isinstance(value, (int, float)): |
| resolved_timings_ms[str(key)] = round(float(value), 2) |
| annotation_ms = annotation.get("timings_ms", {}).get("annotation_ms") |
| if isinstance(annotation_ms, (int, float)): |
| resolved_timings_ms["annotation_ms"] = round(float(annotation_ms), 2) |
| resolved_native_payload = native_payload |
| if resolved_native_payload is None: |
| resolved_native_payload = { |
| "mode": annotation.get("mode"), |
| "tables": annotation.get("native_tables"), |
| } |
| return { |
| "html": str(annotation["html"]), |
| "raw_html": strip_code_fences(str(table_html)), |
| "tables": list(annotation["tables"]), |
| "overlay_mode": annotation.get("mode"), |
| "overlay_applied": bool(matched_table_count), |
| "style_overlay_mode": annotation.get("style_overlay_mode"), |
| "effective_model_id": normalize_text(effective_model_id or ""), |
| "timings_ms": resolved_timings_ms, |
| "native_payload": resolved_native_payload, |
| } |
|
|
|
|
| def _table_ocr_transcription_text_from_pdf_payload(payload: Dict[str, Any]) -> TableOCRTranscriptionText: |
| transcription = TableOCRTranscriptionText( |
| str(payload.get("html") or ""), |
| effective_model_id=normalize_text(str(payload.get("effective_model_id") or "")), |
| ) |
| transcription.raw_html = str(payload.get("raw_html") or "") |
| transcription.tables = list(payload.get("tables") or []) |
| transcription.overlay_mode = normalize_text(str(payload.get("overlay_mode") or "")) |
| transcription.overlay_applied = bool(payload.get("overlay_applied")) |
| transcription.style_overlay_mode = normalize_text(str(payload.get("style_overlay_mode") or "")) |
| transcription.native_payload = payload.get("native_payload") |
| transcription.timings_ms = dict(payload.get("timings_ms") or {}) |
| transcription.payload = payload |
| return transcription |
|
|
|
|
| def transcribe_pdf_page_with_mistral_and_pdf_cells( |
| pdf_path: str | Path, |
| *, |
| page_number: int, |
| model_id: Optional[str] = None, |
| prompt: Optional[str] = None, |
| page_render_zoom: Optional[float] = None, |
| style_overlay_mode: Optional[str] = None, |
| progress_handler: ProgressHandler = None, |
| ) -> Dict[str, Any]: |
| try: |
| import fitz |
| except ImportError as exc: |
| raise RuntimeError("PyMuPDF is required for PDF page rendering.") from exc |
| try: |
| import pdf_table_fastpath |
| except ImportError as exc: |
| raise RuntimeError( |
| "The pdf_table_fastpath module is required for Mistral PDF cell/style overlay." |
| ) from exc |
|
|
| resolved_pdf_path = Path(pdf_path).resolve() |
| resolved_page_number = max(1, int(page_number)) |
| resolved_zoom = max(1.0, float(page_render_zoom or _pdf_fastpath_page_render_zoom())) |
| total_started_at = time.perf_counter() |
|
|
| emit_progress( |
| f"[mistral+pdf-cells] {resolved_pdf_path.name} | page {resolved_page_number} | render page", |
| progress_handler=progress_handler, |
| ) |
| render_started_at = time.perf_counter() |
| with fitz.open(str(resolved_pdf_path)) as document: |
| page = document.load_page(resolved_page_number - 1) |
| image_data_uri = _render_pdf_page_data_uri( |
| page, |
| zoom=resolved_zoom, |
| ) |
| page_render_ms = (time.perf_counter() - render_started_at) * 1000.0 |
|
|
| emit_progress( |
| f"[mistral+pdf-cells] {resolved_pdf_path.name} | page {resolved_page_number} | extract native cells", |
| progress_handler=progress_handler, |
| ) |
| native_started_at = time.perf_counter() |
| native_payload = pdf_table_fastpath.extract_tables_from_pdf_page( |
| resolved_pdf_path, |
| page_number=resolved_page_number, |
| ) |
| native_overlay_ms = (time.perf_counter() - native_started_at) * 1000.0 |
|
|
| emit_progress( |
| f"[mistral+pdf-cells] {resolved_pdf_path.name} | page {resolved_page_number} | mistral OCR", |
| progress_handler=progress_handler, |
| ) |
| mistral_started_at = time.perf_counter() |
| mistral_html = transcribe_table_image_with_mistral( |
| image_data_uri, |
| model_id=model_id, |
| prompt=prompt, |
| progress_handler=progress_handler, |
| ) |
| mistral_ocr_ms = (time.perf_counter() - mistral_started_at) * 1000.0 |
|
|
| payload = overlay_pdf_page_html_with_native_cells( |
| str(mistral_html), |
| pdf_path=resolved_pdf_path, |
| page_number=resolved_page_number, |
| effective_model_id=( |
| normalize_text(getattr(mistral_html, "effective_model_id", "") or model_id or default_mistral_ocr_model_id()) |
| ), |
| native_payload=native_payload, |
| style_overlay_mode=style_overlay_mode, |
| timings_ms={ |
| "page_render_ms": page_render_ms, |
| "native_overlay_ms": native_overlay_ms, |
| "mistral_ocr_ms": mistral_ocr_ms, |
| }, |
| progress_handler=progress_handler, |
| ) |
| total_ms = (time.perf_counter() - total_started_at) * 1000.0 |
| payload["timings_ms"]["total_ms"] = round(float(total_ms), 2) |
| emit_progress( |
| f"[mistral+pdf-cells] {resolved_pdf_path.name} | page {resolved_page_number} | " |
| f"matched_tables={sum(1 for table in payload['tables'] if bool(table.get('matched')))}/{len(payload['tables'])} | " |
| f"total_ms={total_ms:.2f}", |
| progress_handler=progress_handler, |
| ) |
| return payload |
|
|
|
|
| def transcribe_pdf_page_to_payload( |
| pdf_path: str | Path, |
| *, |
| page_number: int, |
| model_id: Optional[str] = None, |
| prompt: Optional[str] = None, |
| page_render_zoom: Optional[float] = None, |
| overlay_pdf_cells: Optional[bool] = None, |
| style_overlay_mode: Optional[str] = None, |
| max_tables: Optional[int] = None, |
| progress_handler: ProgressHandler = None, |
| ) -> Dict[str, Any]: |
| resolved_pdf_path = Path(pdf_path).resolve() |
| resolved_page_number = max(1, int(page_number)) |
| resolved_model = normalize_text(model_id or default_pdf_page_ocr_model_id()) |
| if not resolved_model: |
| raise RuntimeError( |
| "Missing PDF page OCR model id. Pass model_id=... or set PDF_PAGE_OCR_MODEL_ID / MISTRAL_OCR_MODEL_ID." |
| ) |
|
|
| if resolved_model.lower() in {"pdf_native_fastpath", "pdf_fastpath"}: |
| started_at = time.perf_counter() |
| rendered = transcribe_pdf_page_with_fastpath( |
| resolved_pdf_path, |
| page_number=resolved_page_number, |
| ocr_model_id=None, |
| prompt=prompt, |
| max_tables=max_tables, |
| progress_handler=progress_handler, |
| ) |
| return { |
| "html": str(rendered), |
| "raw_html": str(rendered), |
| "tables": [], |
| "overlay_mode": None, |
| "overlay_applied": False, |
| "style_overlay_mode": _resolve_style_overlay_mode(style_overlay_mode), |
| "effective_model_id": normalize_text(getattr(rendered, "effective_model_id", "") or resolved_model), |
| "timings_ms": { |
| "total_ms": round((time.perf_counter() - started_at) * 1000.0, 2), |
| }, |
| "native_payload": None, |
| } |
|
|
| backend = resolve_table_ocr_backend(resolved_model) |
| if overlay_pdf_cells is None: |
| overlay_pdf_cells = backend == "mistral" |
| if backend == "mistral" and overlay_pdf_cells: |
| return transcribe_pdf_page_with_mistral_and_pdf_cells( |
| resolved_pdf_path, |
| page_number=resolved_page_number, |
| model_id=resolved_model, |
| prompt=prompt, |
| page_render_zoom=page_render_zoom, |
| style_overlay_mode=style_overlay_mode, |
| progress_handler=progress_handler, |
| ) |
|
|
| try: |
| import fitz |
| except ImportError as exc: |
| raise RuntimeError("PyMuPDF is required for PDF page rendering.") from exc |
|
|
| total_started_at = time.perf_counter() |
| resolved_zoom = max(1.0, float(page_render_zoom or _pdf_fastpath_page_render_zoom())) |
| emit_progress( |
| f"[pdf-page-ocr] {resolved_pdf_path.name} | page {resolved_page_number} | render page", |
| progress_handler=progress_handler, |
| ) |
| render_started_at = time.perf_counter() |
| with fitz.open(str(resolved_pdf_path)) as document: |
| page = document.load_page(resolved_page_number - 1) |
| image_data_uri = _render_pdf_page_data_uri( |
| page, |
| zoom=resolved_zoom, |
| ) |
| page_render_ms = (time.perf_counter() - render_started_at) * 1000.0 |
|
|
| emit_progress( |
| f"[pdf-page-ocr] {resolved_pdf_path.name} | page {resolved_page_number} | {resolved_model}", |
| progress_handler=progress_handler, |
| ) |
| ocr_started_at = time.perf_counter() |
| rendered_html = transcribe_table_image( |
| image_data_uri, |
| model_id=resolved_model, |
| prompt=prompt, |
| progress_handler=progress_handler, |
| ) |
| ocr_ms = (time.perf_counter() - ocr_started_at) * 1000.0 |
| effective_model_id = normalize_text(getattr(rendered_html, "effective_model_id", "") or resolved_model) |
|
|
| if overlay_pdf_cells: |
| try: |
| import pdf_table_fastpath |
| except ImportError as exc: |
| raise RuntimeError( |
| "The pdf_table_fastpath module is required for PDF cell/style overlay." |
| ) from exc |
| emit_progress( |
| f"[pdf-page-ocr] {resolved_pdf_path.name} | page {resolved_page_number} | extract native cells", |
| progress_handler=progress_handler, |
| ) |
| native_started_at = time.perf_counter() |
| native_payload = pdf_table_fastpath.extract_tables_from_pdf_page( |
| resolved_pdf_path, |
| page_number=resolved_page_number, |
| ) |
| native_overlay_ms = (time.perf_counter() - native_started_at) * 1000.0 |
| payload = overlay_pdf_page_html_with_native_cells( |
| str(rendered_html), |
| pdf_path=resolved_pdf_path, |
| page_number=resolved_page_number, |
| effective_model_id=effective_model_id, |
| native_payload=native_payload, |
| style_overlay_mode=style_overlay_mode, |
| timings_ms={ |
| "page_render_ms": page_render_ms, |
| "ocr_ms": ocr_ms, |
| "native_overlay_ms": native_overlay_ms, |
| }, |
| progress_handler=progress_handler, |
| ) |
| else: |
| payload = { |
| "html": strip_code_fences(str(rendered_html)), |
| "raw_html": strip_code_fences(str(rendered_html)), |
| "tables": [], |
| "overlay_mode": None, |
| "overlay_applied": False, |
| "style_overlay_mode": _resolve_style_overlay_mode(style_overlay_mode), |
| "effective_model_id": effective_model_id, |
| "timings_ms": { |
| "page_render_ms": round(float(page_render_ms), 2), |
| "ocr_ms": round(float(ocr_ms), 2), |
| }, |
| "native_payload": None, |
| } |
|
|
| total_ms = (time.perf_counter() - total_started_at) * 1000.0 |
| payload["timings_ms"]["total_ms"] = round(float(total_ms), 2) |
| emit_progress( |
| f"[pdf-page-ocr] {resolved_pdf_path.name} | page {resolved_page_number} | " |
| f"overlay={'on' if payload.get('overlay_applied') else 'off'} | total_ms={total_ms:.2f}", |
| progress_handler=progress_handler, |
| ) |
| return payload |
|
|
|
|
| def transcribe_pdf_page( |
| pdf_path: str | Path, |
| *, |
| page_number: int, |
| model_id: Optional[str] = None, |
| prompt: Optional[str] = None, |
| page_render_zoom: Optional[float] = None, |
| overlay_pdf_cells: Optional[bool] = None, |
| style_overlay_mode: Optional[str] = None, |
| max_tables: Optional[int] = None, |
| progress_handler: ProgressHandler = None, |
| ) -> TableOCRTranscriptionText: |
| payload = transcribe_pdf_page_to_payload( |
| pdf_path, |
| page_number=page_number, |
| model_id=model_id, |
| prompt=prompt, |
| page_render_zoom=page_render_zoom, |
| overlay_pdf_cells=overlay_pdf_cells, |
| style_overlay_mode=style_overlay_mode, |
| max_tables=max_tables, |
| progress_handler=progress_handler, |
| ) |
| return _table_ocr_transcription_text_from_pdf_payload(payload) |
|
|
|
|
| def transcribe_pdf_page_with_fastpath( |
| pdf_path: str | Path, |
| *, |
| page_number: int, |
| ocr_model_id: Optional[str] = None, |
| prompt: Optional[str] = None, |
| max_tables: Optional[int] = None, |
| progress_handler: ProgressHandler = None, |
| ) -> str: |
| try: |
| import pdf_table_fastpath |
| except ImportError as exc: |
| raise RuntimeError( |
| "The pdf_table_fastpath module is required for native PDF table extraction." |
| ) from exc |
|
|
| resolved_pdf_path = Path(pdf_path).resolve() |
| resolved_page_number = max(1, int(page_number)) |
| emit_progress( |
| f"[pdf-fastpath] {resolved_pdf_path.name} | page {resolved_page_number}", |
| progress_handler=progress_handler, |
| ) |
| payload = pdf_table_fastpath.extract_tables_from_pdf_page( |
| resolved_pdf_path, |
| page_number=resolved_page_number, |
| ) |
| html_fragments = [ |
| str(fragment) |
| for fragment in payload.get("html_fragments") or [] |
| if normalize_text(fragment) |
| ] |
| if payload.get("mode") == "image_morphology" and payload.get("tables"): |
| try: |
| import fitz |
| except ImportError as exc: |
| raise RuntimeError("PyMuPDF is required for PDF fast-path raster fallback OCR.") from exc |
|
|
| resolved_fallback_model = _pdf_fastpath_raster_fallback_model_id(ocr_model_id) |
| zoom = _pdf_fastpath_raster_render_zoom() |
| padding_pt = _pdf_fastpath_raster_padding_pt() |
| table_limit = max(1, int(max_tables or _pdf_fastpath_raster_max_tables())) |
| raster_tables = list(payload.get("tables") or [])[:table_limit] |
| emitted_fragments: List[str] = [] |
| with fitz.open(str(resolved_pdf_path)) as document: |
| page = document.load_page(resolved_page_number - 1) |
| for table_index, table_payload in enumerate(raster_tables, start=1): |
| bbox = table_payload.get("bbox") if isinstance(table_payload, dict) else None |
| if not isinstance(bbox, list) or len(bbox) != 4: |
| continue |
| emit_progress( |
| f"[pdf-fastpath] {resolved_pdf_path.name} | page {resolved_page_number} | raster table {table_index}/{len(raster_tables)} -> {resolved_fallback_model}", |
| progress_handler=progress_handler, |
| ) |
| image_data_uri = _render_pdf_table_region_data_uri( |
| page, |
| bbox=bbox, |
| zoom=zoom, |
| padding_pt=padding_pt, |
| ) |
| rendered_fragment = transcribe_table_image( |
| image_data_uri, |
| model_id=resolved_fallback_model, |
| prompt=prompt, |
| progress_handler=progress_handler, |
| ) |
| if normalize_text(rendered_fragment): |
| emitted_fragments.append(strip_code_fences(str(rendered_fragment))) |
| if not emitted_fragments: |
| raise TableOCRContentError( |
| "PDF fast-path raster fallback found candidate regions but OCR did not return table HTML.", |
| details={ |
| "pdf_path": str(resolved_pdf_path), |
| "page_number": resolved_page_number, |
| "mode": payload.get("mode"), |
| "table_count": len(payload.get("tables") or []), |
| "ocr_model_id": resolved_fallback_model, |
| }, |
| ) |
| emit_progress( |
| f"[pdf-fastpath] {resolved_pdf_path.name} | page {resolved_page_number} | raster tables={len(emitted_fragments)}", |
| progress_handler=progress_handler, |
| ) |
| return TableOCRTranscriptionText( |
| "\n".join(emitted_fragments), |
| effective_model_id=f"pdf_fastpath_raster+{resolved_fallback_model}", |
| ) |
| if not html_fragments: |
| raise TableOCRContentError( |
| "Native PDF fast path did not find any renderable table HTML fragments.", |
| details={ |
| "pdf_path": str(resolved_pdf_path), |
| "page_number": resolved_page_number, |
| "mode": payload.get("mode"), |
| "table_count": len(payload.get("tables") or []), |
| }, |
| ) |
| emit_progress( |
| f"[pdf-fastpath] {resolved_pdf_path.name} | page {resolved_page_number} | tables={len(html_fragments)}", |
| progress_handler=progress_handler, |
| ) |
| return TableOCRTranscriptionText( |
| "\n".join(html_fragments), |
| effective_model_id="pdf_native_fastpath", |
| ) |
|
|
|
|
| def transcribe_table_image( |
| image_data_uri: str, |
| *, |
| model_id: str, |
| prompt: Optional[str] = None, |
| progress_handler: ProgressHandler = None, |
| ) -> str: |
| resolved_model = normalize_text(model_id) |
| if not resolved_model: |
| raise RuntimeError( |
| "Missing OCR model id. Pass an explicit model id or set DEEPSEEK_OCR_MODEL_ID / OPENROUTER_MODEL_ID." |
| ) |
|
|
| backend = resolve_table_ocr_backend(resolved_model) |
| image_data_uri = _normalize_table_ocr_image_data_uri( |
| image_data_uri, |
| progress_handler=progress_handler, |
| ) |
| if backend == "firered": |
| return transcribe_table_image_with_firered( |
| image_data_uri, |
| model_id=resolved_model, |
| prompt=prompt, |
| progress_handler=progress_handler, |
| ) |
| if backend == "deepseek": |
| return transcribe_table_image_with_deepseek( |
| image_data_uri, |
| model_id=resolved_model, |
| prompt=prompt, |
| progress_handler=progress_handler, |
| ) |
| if backend == "mistral": |
| return transcribe_table_image_with_mistral( |
| image_data_uri, |
| model_id=resolved_model, |
| prompt=prompt, |
| progress_handler=progress_handler, |
| ) |
| if backend == "qianfan": |
| return transcribe_table_image_with_qianfan( |
| image_data_uri, |
| model_id=resolved_model, |
| prompt=prompt, |
| progress_handler=progress_handler, |
| ) |
| if backend == "glm_ocr": |
| return transcribe_table_image_with_glm_ocr( |
| image_data_uri, |
| model_id=resolved_model, |
| prompt=prompt, |
| progress_handler=progress_handler, |
| ) |
| if backend == "paddleocr_vl": |
| return transcribe_table_image_with_paddleocr_vl( |
| image_data_uri, |
| model_id=resolved_model, |
| prompt=prompt, |
| progress_handler=progress_handler, |
| ) |
| if backend == "got_ocr": |
| return transcribe_table_image_with_got_ocr( |
| image_data_uri, |
| model_id=resolved_model, |
| prompt=prompt, |
| progress_handler=progress_handler, |
| ) |
| if backend == "monkeyocr": |
| return transcribe_table_image_with_monkeyocr( |
| image_data_uri, |
| model_id=resolved_model, |
| prompt=prompt, |
| progress_handler=progress_handler, |
| ) |
| if backend == "qwen_ocr": |
| return transcribe_table_image_with_qwen_ocr( |
| image_data_uri, |
| model_id=resolved_model, |
| prompt=prompt, |
| progress_handler=progress_handler, |
| ) |
| if backend == "gemma4_hf": |
| return transcribe_table_image_with_gemma4_hf( |
| image_data_uri, |
| model_id=resolved_model, |
| prompt=prompt, |
| progress_handler=progress_handler, |
| ) |
| if backend == "gemma4_llamacpp": |
| return transcribe_table_image_with_gemma4_llamacpp( |
| image_data_uri, |
| model_id=resolved_model, |
| prompt=prompt, |
| progress_handler=progress_handler, |
| ) |
| if backend == "gemma4_vllm": |
| return transcribe_table_image_with_gemma4_vllm( |
| image_data_uri, |
| model_id=resolved_model, |
| prompt=prompt, |
| progress_handler=progress_handler, |
| ) |
| return transcribe_table_image_with_openrouter( |
| image_data_uri, |
| model_id=resolved_model, |
| prompt=prompt, |
| progress_handler=progress_handler, |
| ) |
|
|