repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/models/tesseract_ocr_model.py | docling/models/tesseract_ocr_model.py | from __future__ import annotations
import logging
from pathlib import Path
from typing import Iterable, Optional, Type
from docling_core.types.doc import BoundingBox, CoordOrigin
from docling_core.types.doc.page import TextCell
from docling.datamodel.accelerator_options import AcceleratorOptions
from docling.datamodel.base_models import Page
from docling.datamodel.document import ConversionResult
from docling.datamodel.pipeline_options import (
OcrOptions,
TesseractOcrOptions,
)
from docling.datamodel.settings import settings
from docling.models.base_ocr_model import BaseOcrModel
from docling.utils.ocr_utils import (
map_tesseract_script,
parse_tesseract_orientation,
tesseract_box_to_bounding_rectangle,
)
from docling.utils.profiling import TimeRecorder
_log = logging.getLogger(__name__)
class TesseractOcrModel(BaseOcrModel):
def __init__(
self,
enabled: bool,
artifacts_path: Optional[Path],
options: TesseractOcrOptions,
accelerator_options: AcceleratorOptions,
):
super().__init__(
enabled=enabled,
artifacts_path=artifacts_path,
options=options,
accelerator_options=accelerator_options,
)
self.options: TesseractOcrOptions
self._is_auto: bool = "auto" in self.options.lang
self.scale = 3 # multiplier for 72 dpi == 216 dpi.
self.reader = None
self.script_readers: dict[str, tesserocr.PyTessBaseAPI] = {}
if self.enabled:
install_errmsg = (
"tesserocr is not correctly installed. "
"Please install it via `pip install tesserocr` to use this OCR engine. "
"Note that tesserocr might have to be manually compiled for working with "
"your Tesseract installation. The Docling documentation provides examples for it. "
"Alternatively, Docling has support for other OCR engines. See the documentation: "
"https://docling-project.github.io/docling/installation/"
)
missing_langs_errmsg = (
"tesserocr is not correctly configured. No language models have been detected. "
"Please ensure that the TESSDATA_PREFIX envvar points to tesseract languages dir. "
"You can find more information how to setup other OCR engines in Docling "
"documentation: "
"https://docling-project.github.io/docling/installation/"
)
try:
import tesserocr
except ImportError:
raise ImportError(install_errmsg)
try:
tesseract_version = tesserocr.tesseract_version()
except Exception:
raise ImportError(install_errmsg)
_, self._tesserocr_languages = tesserocr.get_languages()
if not self._tesserocr_languages:
raise ImportError(missing_langs_errmsg)
# Initialize the tesseractAPI
_log.debug("Initializing TesserOCR: %s", tesseract_version)
lang = "+".join(self.options.lang)
if any(lang.startswith("script/") for lang in self._tesserocr_languages):
self.script_prefix = "script/"
else:
self.script_prefix = ""
tesserocr_kwargs = {
"init": True,
"oem": tesserocr.OEM.DEFAULT,
}
self.osd_reader = None
if self.options.path is not None:
tesserocr_kwargs["path"] = self.options.path
# Set main OCR reader with configurable PSM
main_psm = (
self.options.psm if self.options.psm is not None else tesserocr.PSM.AUTO
)
if lang == "auto":
self.reader = tesserocr.PyTessBaseAPI(psm=main_psm, **tesserocr_kwargs)
else:
self.reader = tesserocr.PyTessBaseAPI(
lang=lang,
psm=main_psm,
**tesserocr_kwargs,
)
# OSD reader must use PSM.OSD_ONLY for orientation detection
self.osd_reader = tesserocr.PyTessBaseAPI(
lang="osd", psm=tesserocr.PSM.OSD_ONLY, **tesserocr_kwargs
)
self.reader_RIL = tesserocr.RIL
def __del__(self):
if self.reader is not None:
# Finalize the tesseractAPI
self.reader.End()
for script in self.script_readers:
self.script_readers[script].End()
def __call__(
self, conv_res: ConversionResult, page_batch: Iterable[Page]
) -> Iterable[Page]:
if not self.enabled:
yield from page_batch
return
for page_i, page in enumerate(page_batch):
assert page._backend is not None
if not page._backend.is_valid():
yield page
else:
with TimeRecorder(conv_res, "ocr"):
assert self.reader is not None
assert self.osd_reader is not None
assert self._tesserocr_languages is not None
ocr_rects = self.get_ocr_rects(page)
all_ocr_cells = []
for ocr_rect_i, ocr_rect in enumerate(ocr_rects):
# Skip zero area boxes
if ocr_rect.area() == 0:
continue
high_res_image = page._backend.get_page_image(
scale=self.scale, cropbox=ocr_rect
)
local_reader = self.reader
self.osd_reader.SetImage(high_res_image)
doc_orientation = 0
osd = self.osd_reader.DetectOrientationScript()
# No text, or Orientation and Script detection failure
if osd is None:
_log.error(
"OSD failed for doc (doc %s, page: %s, "
"OCR rectangle: %s)",
conv_res.input.file,
page_i,
ocr_rect_i,
)
# Skipping if OSD fail when in auto mode, otherwise proceed
# to OCR in the hope OCR will succeed while OSD failed
if self._is_auto:
continue
else:
doc_orientation = parse_tesseract_orientation(
osd["orient_deg"]
)
if doc_orientation != 0:
high_res_image = high_res_image.rotate(
-doc_orientation, expand=True
)
if self._is_auto:
script = osd["script_name"]
script = map_tesseract_script(script)
lang = f"{self.script_prefix}{script}"
# Check if the detected language is present in the system
if lang not in self._tesserocr_languages:
msg = f"Tesseract detected the script '{script}' and language '{lang}'."
msg += " However this language is not installed in your system and will be ignored."
_log.warning(msg)
else:
if script not in self.script_readers:
import tesserocr
self.script_readers[script] = (
tesserocr.PyTessBaseAPI(
path=self.reader.GetDatapath(),
lang=lang,
psm=self.options.psm
if self.options.psm is not None
else tesserocr.PSM.AUTO,
init=True,
oem=tesserocr.OEM.DEFAULT,
)
)
local_reader = self.script_readers[script]
local_reader.SetImage(high_res_image)
boxes = local_reader.GetComponentImages(
self.reader_RIL.TEXTLINE, True
)
cells = []
for ix, (im, box, _, _) in enumerate(boxes):
# Set the area of interest. Tesseract uses Bottom-Left for the origin
local_reader.SetRectangle(
box["x"], box["y"], box["w"], box["h"]
)
# Extract text within the bounding box
text = local_reader.GetUTF8Text().strip()
confidence = local_reader.MeanTextConf()
left, top = box["x"], box["y"]
right = left + box["w"]
bottom = top + box["h"]
bbox = BoundingBox(
l=left,
t=top,
r=right,
b=bottom,
coord_origin=CoordOrigin.TOPLEFT,
)
rect = tesseract_box_to_bounding_rectangle(
bbox,
original_offset=ocr_rect,
scale=self.scale,
orientation=doc_orientation,
im_size=high_res_image.size,
)
cells.append(
TextCell(
index=ix,
text=text,
orig=text,
from_ocr=True,
confidence=confidence,
rect=rect,
)
)
# del high_res_image
all_ocr_cells.extend(cells)
# Post-process the cells
self.post_process_cells(all_ocr_cells, page)
# DEBUG code:
if settings.debug.visualize_ocr:
self.draw_ocr_rects_and_cells(conv_res, page, ocr_rects)
yield page
@classmethod
def get_options_type(cls) -> Type[OcrOptions]:
return TesseractOcrOptions
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/models/rapid_ocr_model.py | docling/models/rapid_ocr_model.py | import logging
from collections.abc import Iterable
from pathlib import Path
from typing import Literal, Optional, Type, TypedDict
import numpy
from docling_core.types.doc import BoundingBox, CoordOrigin
from docling_core.types.doc.page import BoundingRectangle, TextCell
from docling.datamodel.accelerator_options import AcceleratorDevice, AcceleratorOptions
from docling.datamodel.base_models import Page
from docling.datamodel.document import ConversionResult
from docling.datamodel.pipeline_options import (
OcrOptions,
RapidOcrOptions,
)
from docling.datamodel.settings import settings
from docling.models.base_ocr_model import BaseOcrModel
from docling.utils.accelerator_utils import decide_device
from docling.utils.profiling import TimeRecorder
from docling.utils.utils import download_url_with_progress
_log = logging.getLogger(__name__)
_ModelPathEngines = Literal["onnxruntime", "torch"]
_ModelPathTypes = Literal[
"det_model_path", "cls_model_path", "rec_model_path", "rec_keys_path", "font_path"
]
class _ModelPathDetail(TypedDict):
url: str
path: str
class RapidOcrModel(BaseOcrModel):
_model_repo_folder = "RapidOcr"
# from https://github.com/RapidAI/RapidOCR/blob/main/python/rapidocr/default_models.yaml
# matching the default config in https://github.com/RapidAI/RapidOCR/blob/main/python/rapidocr/config.yaml
# and naming f"{file_info.engine_type.value}.{file_info.ocr_version.value}.{file_info.task_type.value}"
_default_models: dict[
_ModelPathEngines, dict[_ModelPathTypes, _ModelPathDetail]
] = {
"onnxruntime": {
"det_model_path": {
"url": "https://www.modelscope.cn/models/RapidAI/RapidOCR/resolve/v3.4.0/onnx/PP-OCRv4/det/ch_PP-OCRv4_det_infer.onnx",
"path": "onnx/PP-OCRv4/det/ch_PP-OCRv4_det_infer.onnx",
},
"cls_model_path": {
"url": "https://www.modelscope.cn/models/RapidAI/RapidOCR/resolve/v3.4.0/onnx/PP-OCRv4/cls/ch_ppocr_mobile_v2.0_cls_infer.onnx",
"path": "onnx/PP-OCRv4/cls/ch_ppocr_mobile_v2.0_cls_infer.onnx",
},
"rec_model_path": {
"url": "https://www.modelscope.cn/models/RapidAI/RapidOCR/resolve/v3.4.0/onnx/PP-OCRv4/rec/ch_PP-OCRv4_rec_infer.onnx",
"path": "onnx/PP-OCRv4/rec/ch_PP-OCRv4_rec_infer.onnx",
},
"rec_keys_path": {
"url": "https://www.modelscope.cn/models/RapidAI/RapidOCR/resolve/v2.0.7/paddle/PP-OCRv4/rec/ch_PP-OCRv4_rec_infer/ppocr_keys_v1.txt",
"path": "paddle/PP-OCRv4/rec/ch_PP-OCRv4_rec_infer/ppocr_keys_v1.txt",
},
"font_path": {
"url": "https://www.modelscope.cn/models/RapidAI/RapidOCR/resolve/v3.4.0/resources/fonts/FZYTK.TTF",
"path": "fonts/FZYTK.TTF",
},
},
"torch": {
"det_model_path": {
"url": "https://www.modelscope.cn/models/RapidAI/RapidOCR/resolve/v3.4.0/torch/PP-OCRv4/det/ch_PP-OCRv4_det_infer.pth",
"path": "torch/PP-OCRv4/det/ch_PP-OCRv4_det_infer.pth",
},
"cls_model_path": {
"url": "https://www.modelscope.cn/models/RapidAI/RapidOCR/resolve/v3.4.0/torch/PP-OCRv4/cls/ch_ptocr_mobile_v2.0_cls_infer.pth",
"path": "torch/PP-OCRv4/cls/ch_ptocr_mobile_v2.0_cls_infer.pth",
},
"rec_model_path": {
"url": "https://www.modelscope.cn/models/RapidAI/RapidOCR/resolve/v3.4.0/torch/PP-OCRv4/rec/ch_PP-OCRv4_rec_infer.pth",
"path": "torch/PP-OCRv4/rec/ch_PP-OCRv4_rec_infer.pth",
},
"rec_keys_path": {
"url": "https://www.modelscope.cn/models/RapidAI/RapidOCR/resolve/v3.4.0/paddle/PP-OCRv4/rec/ch_PP-OCRv4_rec_infer/ppocr_keys_v1.txt",
"path": "paddle/PP-OCRv4/rec/ch_PP-OCRv4_rec_infer/ppocr_keys_v1.txt",
},
"font_path": {
"url": "https://www.modelscope.cn/models/RapidAI/RapidOCR/resolve/v3.4.0/resources/fonts/FZYTK.TTF",
"path": "fonts/FZYTK.TTF",
},
},
}
def __init__(
self,
enabled: bool,
artifacts_path: Optional[Path],
options: RapidOcrOptions,
accelerator_options: AcceleratorOptions,
):
super().__init__(
enabled=enabled,
artifacts_path=artifacts_path,
options=options,
accelerator_options=accelerator_options,
)
self.options: RapidOcrOptions
self.scale = 3 # multiplier for 72 dpi == 216 dpi.
if self.enabled:
try:
from rapidocr import EngineType, RapidOCR # type: ignore
except ImportError:
raise ImportError(
"RapidOCR is not installed. Please install it via `pip install rapidocr onnxruntime` to use this OCR engine. "
"Alternatively, Docling has support for other OCR engines. See the documentation."
)
# Decide the accelerator devices
device = decide_device(accelerator_options.device)
use_cuda = str(AcceleratorDevice.CUDA.value).lower() in device
use_dml = accelerator_options.device == AcceleratorDevice.AUTO
intra_op_num_threads = accelerator_options.num_threads
gpu_id = 0
if use_cuda and ":" in device:
gpu_id = int(device.split(":")[1])
_ALIASES = {
"onnxruntime": EngineType.ONNXRUNTIME,
"openvino": EngineType.OPENVINO,
"paddle": EngineType.PADDLE,
"torch": EngineType.TORCH,
}
backend_enum = _ALIASES.get(self.options.backend, EngineType.ONNXRUNTIME)
det_model_path = self.options.det_model_path
cls_model_path = self.options.cls_model_path
rec_model_path = self.options.rec_model_path
rec_keys_path = self.options.rec_keys_path
font_path = self.options.font_path
if artifacts_path is not None:
det_model_path = (
det_model_path
or artifacts_path
/ self._model_repo_folder
/ self._default_models[backend_enum.value]["det_model_path"]["path"]
)
cls_model_path = (
cls_model_path
or artifacts_path
/ self._model_repo_folder
/ self._default_models[backend_enum.value]["cls_model_path"]["path"]
)
rec_model_path = (
rec_model_path
or artifacts_path
/ self._model_repo_folder
/ self._default_models[backend_enum.value]["rec_model_path"]["path"]
)
rec_keys_path = (
rec_keys_path
or artifacts_path
/ self._model_repo_folder
/ self._default_models[backend_enum.value]["rec_keys_path"]["path"]
)
font_path = (
font_path
or artifacts_path
/ self._model_repo_folder
/ self._default_models[backend_enum.value]["font_path"]["path"]
)
for model_path in (
rec_keys_path,
cls_model_path,
rec_model_path,
rec_keys_path,
font_path,
):
if model_path is None:
continue
if not Path(model_path).exists():
_log.warning(f"The provided model path {model_path} is not found.")
params = {
# Global settings (these are still correct)
"Global.text_score": self.options.text_score,
"Global.font_path": self.options.font_path,
# "Global.verbose": self.options.print_verbose,
# Detection model settings
"Det.model_path": det_model_path,
"Det.use_cuda": use_cuda,
"Det.use_dml": use_dml,
"Det.intra_op_num_threads": intra_op_num_threads,
# Classification model settings
"Cls.model_path": cls_model_path,
"Cls.use_cuda": use_cuda,
"Cls.use_dml": use_dml,
"Cls.intra_op_num_threads": intra_op_num_threads,
# Recognition model settings
"Rec.model_path": rec_model_path,
"Rec.font_path": self.options.rec_font_path,
"Rec.rec_keys_path": rec_keys_path,
"Rec.use_cuda": use_cuda,
"Rec.use_dml": use_dml,
"Rec.intra_op_num_threads": intra_op_num_threads,
"Det.engine_type": backend_enum,
"Cls.engine_type": backend_enum,
"Rec.engine_type": backend_enum,
"EngineConfig.paddle.use_cuda": use_cuda,
"EngineConfig.paddle.gpu_id": gpu_id,
"EngineConfig.torch.use_cuda": use_cuda,
"EngineConfig.torch.gpu_id": gpu_id,
}
if self.options.rec_font_path is not None:
_log.warning(
"The 'rec_font_path' option for RapidOCR is deprecated. Please use 'font_path' instead."
)
user_params = self.options.rapidocr_params
if user_params:
_log.debug("Overwriting RapidOCR params with user-provided values.")
params.update(user_params)
self.reader = RapidOCR(
params=params,
)
@staticmethod
def download_models(
backend: _ModelPathEngines,
local_dir: Optional[Path] = None,
force: bool = False,
progress: bool = False,
) -> Path:
if local_dir is None:
local_dir = settings.cache_dir / "models" / RapidOcrModel._model_repo_folder
local_dir.mkdir(parents=True, exist_ok=True)
# Download models
for model_type, model_details in RapidOcrModel._default_models[backend].items():
output_path = local_dir / model_details["path"]
if output_path.exists() and not force:
continue
output_path.parent.mkdir(exist_ok=True, parents=True)
buf = download_url_with_progress(model_details["url"], progress=progress)
with output_path.open("wb") as fw:
fw.write(buf.read())
return local_dir
def __call__(
self, conv_res: ConversionResult, page_batch: Iterable[Page]
) -> Iterable[Page]:
if not self.enabled:
yield from page_batch
return
for page in page_batch:
assert page._backend is not None
if not page._backend.is_valid():
yield page
else:
with TimeRecorder(conv_res, "ocr"):
ocr_rects = self.get_ocr_rects(page)
all_ocr_cells = []
for ocr_rect in ocr_rects:
# Skip zero area boxes
if ocr_rect.area() == 0:
continue
high_res_image = page._backend.get_page_image(
scale=self.scale, cropbox=ocr_rect
)
im = numpy.array(high_res_image)
result = self.reader(
im,
use_det=self.options.use_det,
use_cls=self.options.use_cls,
use_rec=self.options.use_rec,
)
if result is None or result.boxes is None:
_log.warning("RapidOCR returned empty result!")
continue
result = list(
zip(result.boxes.tolist(), result.txts, result.scores)
)
del high_res_image
del im
if result is not None:
cells = [
TextCell(
index=ix,
text=line[1],
orig=line[1],
confidence=line[2],
from_ocr=True,
rect=BoundingRectangle.from_bounding_box(
BoundingBox.from_tuple(
coord=(
(line[0][0][0] / self.scale)
+ ocr_rect.l,
(line[0][0][1] / self.scale)
+ ocr_rect.t,
(line[0][2][0] / self.scale)
+ ocr_rect.l,
(line[0][2][1] / self.scale)
+ ocr_rect.t,
),
origin=CoordOrigin.TOPLEFT,
)
),
)
for ix, line in enumerate(result)
]
all_ocr_cells.extend(cells)
# Post-process the cells
self.post_process_cells(all_ocr_cells, page)
# DEBUG code:
if settings.debug.visualize_ocr:
self.draw_ocr_rects_and_cells(conv_res, page, ocr_rects)
yield page
@classmethod
def get_options_type(cls) -> Type[OcrOptions]:
return RapidOcrOptions
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/models/api_vlm_model.py | docling/models/api_vlm_model.py | from collections.abc import Iterable
from concurrent.futures import ThreadPoolExecutor
from typing import Union
import numpy as np
from PIL.Image import Image
from docling.datamodel.base_models import Page, VlmPrediction, VlmStopReason
from docling.datamodel.document import ConversionResult
from docling.datamodel.pipeline_options_vlm_model import ApiVlmOptions
from docling.exceptions import OperationNotAllowed
from docling.models.base_model import BaseVlmPageModel
from docling.models.utils.generation_utils import GenerationStopper
from docling.utils.api_image_request import (
api_image_request,
api_image_request_streaming,
)
from docling.utils.profiling import TimeRecorder
class ApiVlmModel(BaseVlmPageModel):
# Override the vlm_options type annotation from BaseVlmPageModel
vlm_options: ApiVlmOptions # type: ignore[assignment]
def __init__(
self,
enabled: bool,
enable_remote_services: bool,
vlm_options: ApiVlmOptions,
):
self.enabled = enabled
self.vlm_options = vlm_options
if self.enabled:
if not enable_remote_services:
raise OperationNotAllowed(
"Connections to remote services is only allowed when set explicitly. "
"pipeline_options.enable_remote_services=True, or using the CLI "
"--enable-remote-services."
)
self.timeout = self.vlm_options.timeout
self.concurrency = self.vlm_options.concurrency
self.params = {
**self.vlm_options.params,
"temperature": self.vlm_options.temperature,
}
def __call__(
self, conv_res: ConversionResult, page_batch: Iterable[Page]
) -> Iterable[Page]:
page_list = list(page_batch)
if not page_list:
return
original_order = page_list[:]
valid_pages = []
for page in page_list:
assert page._backend is not None
if page._backend.is_valid():
valid_pages.append(page)
# Process valid pages in batch
if valid_pages:
with TimeRecorder(conv_res, "vlm"):
# Prepare images and prompts for batch processing
images = []
prompts = []
pages_with_images = []
for page in valid_pages:
assert page.size is not None
hi_res_image = page.get_image(
scale=self.vlm_options.scale, max_size=self.vlm_options.max_size
)
# Only process pages with valid images
if hi_res_image is not None:
images.append(hi_res_image)
prompt = self._build_prompt_safe(page)
prompts.append(prompt)
pages_with_images.append(page)
# Use process_images for the actual inference
if images: # Only if we have valid images
with TimeRecorder(conv_res, "vlm_inference"):
predictions = list(self.process_images(images, prompts))
# Attach results to pages
for page, prediction in zip(pages_with_images, predictions):
page.predictions.vlm_response = prediction
# Yield pages preserving original order
for page in original_order:
yield page
def process_images(
self,
image_batch: Iterable[Union[Image, np.ndarray]],
prompt: Union[str, list[str]],
) -> Iterable[VlmPrediction]:
"""Process raw images without page metadata."""
images = list(image_batch)
# Handle prompt parameter
if isinstance(prompt, str):
prompts = [prompt] * len(images)
elif isinstance(prompt, list):
if len(prompt) != len(images):
raise ValueError(
f"Prompt list length ({len(prompt)}) must match image count ({len(images)})"
)
prompts = prompt
def _process_single_image(image_prompt_pair):
image, prompt_text = image_prompt_pair
# Convert numpy array to PIL Image if needed
if isinstance(image, np.ndarray):
if image.ndim == 3 and image.shape[2] in [3, 4]:
from PIL import Image as PILImage
image = PILImage.fromarray(image.astype(np.uint8))
elif image.ndim == 2:
from PIL import Image as PILImage
image = PILImage.fromarray(image.astype(np.uint8), mode="L")
else:
raise ValueError(f"Unsupported numpy array shape: {image.shape}")
# Ensure image is in RGB mode
if image.mode != "RGB":
image = image.convert("RGB")
stop_reason = VlmStopReason.UNSPECIFIED
if self.vlm_options.custom_stopping_criteria:
# Instantiate any GenerationStopper classes before passing to streaming
instantiated_stoppers = []
for criteria in self.vlm_options.custom_stopping_criteria:
if isinstance(criteria, GenerationStopper):
instantiated_stoppers.append(criteria)
elif isinstance(criteria, type) and issubclass(
criteria, GenerationStopper
):
instantiated_stoppers.append(criteria())
# Skip non-GenerationStopper criteria (should have been caught in validation)
# Streaming path with early abort support
page_tags, num_tokens = api_image_request_streaming(
image=image,
prompt=prompt_text,
url=self.vlm_options.url,
timeout=self.timeout,
headers=self.vlm_options.headers,
generation_stoppers=instantiated_stoppers,
**self.params,
)
else:
# Non-streaming fallback (existing behavior)
page_tags, num_tokens, stop_reason = api_image_request(
image=image,
prompt=prompt_text,
url=self.vlm_options.url,
timeout=self.timeout,
headers=self.vlm_options.headers,
**self.params,
)
page_tags = self.vlm_options.decode_response(page_tags)
input_prompt = prompt_text if self.vlm_options.track_input_prompt else None
return VlmPrediction(
text=page_tags,
num_tokens=num_tokens,
stop_reason=stop_reason,
input_prompt=input_prompt,
)
with ThreadPoolExecutor(max_workers=self.concurrency) as executor:
yield from executor.map(_process_single_image, zip(images, prompts))
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/models/base_ocr_model.py | docling/models/base_ocr_model.py | import copy
import logging
from abc import abstractmethod
from collections.abc import Iterable
from pathlib import Path
from typing import TYPE_CHECKING, List, Optional, Type
import numpy as np
from docling_core.types.doc import BoundingBox, CoordOrigin
from docling_core.types.doc.page import TextCell
from PIL import Image, ImageDraw
from rtree import index
from docling.datamodel.accelerator_options import AcceleratorOptions
from docling.datamodel.base_models import Page
from docling.datamodel.document import ConversionResult
from docling.datamodel.pipeline_options import OcrOptions
from docling.datamodel.settings import settings
from docling.models.base_model import BaseModelWithOptions, BasePageModel
_log = logging.getLogger(__name__)
class BaseOcrModel(BasePageModel, BaseModelWithOptions):
def __init__(
self,
*,
enabled: bool,
artifacts_path: Optional[Path],
options: OcrOptions,
accelerator_options: AcceleratorOptions,
):
# Make sure any delay/error from import occurs on ocr model init and not first use
from scipy.ndimage import binary_dilation, find_objects, label
self.enabled = enabled
self.options = options
# Computes the optimum amount and coordinates of rectangles to OCR on a given page
def get_ocr_rects(self, page: Page) -> List[BoundingBox]:
from scipy.ndimage import binary_dilation, find_objects, label
BITMAP_COVERAGE_TRESHOLD = 0.75
assert page.size is not None
def find_ocr_rects(size, bitmap_rects):
image = Image.new(
"1", (round(size.width), round(size.height))
) # '1' mode is binary
# Draw all bitmap rects into a binary image
draw = ImageDraw.Draw(image)
for rect in bitmap_rects:
x0, y0, x1, y1 = rect.as_tuple()
x0, y0, x1, y1 = round(x0), round(y0), round(x1), round(y1)
draw.rectangle([(x0, y0), (x1, y1)], fill=1)
np_image = np.array(image)
# Dilate the image by 10 pixels to merge nearby bitmap rectangles
structure = np.ones(
(20, 20)
) # Create a 20x20 structure element (10 pixels in all directions)
np_image = binary_dilation(np_image > 0, structure=structure)
# Find the connected components
labeled_image, num_features = label(
np_image > 0
) # Label black (0 value) regions
# Find enclosing bounding boxes for each connected component.
slices = find_objects(labeled_image)
bounding_boxes = [
BoundingBox(
l=slc[1].start,
t=slc[0].start,
r=slc[1].stop - 1,
b=slc[0].stop - 1,
coord_origin=CoordOrigin.TOPLEFT,
)
for slc in slices
]
# Compute area fraction on page covered by bitmaps
area_frac = np.sum(np_image > 0) / (size.width * size.height)
return (area_frac, bounding_boxes) # fraction covered # boxes
if page._backend is not None:
bitmap_rects = page._backend.get_bitmap_rects()
else:
bitmap_rects = []
coverage, ocr_rects = find_ocr_rects(page.size, bitmap_rects)
# return full-page rectangle if page is dominantly covered with bitmaps
if self.options.force_full_page_ocr or coverage > max(
BITMAP_COVERAGE_TRESHOLD, self.options.bitmap_area_threshold
):
return [
BoundingBox(
l=0,
t=0,
r=page.size.width,
b=page.size.height,
coord_origin=CoordOrigin.TOPLEFT,
)
]
# return individual rectangles if the bitmap coverage is above the threshold
elif coverage > self.options.bitmap_area_threshold:
return ocr_rects
else: # overall coverage of bitmaps is too low, drop all bitmap rectangles.
return []
# Filters OCR cells by dropping any OCR cell that intersects with an existing programmatic cell.
def _filter_ocr_cells(
self, ocr_cells: List[TextCell], programmatic_cells: List[TextCell]
) -> List[TextCell]:
# Create R-tree index for programmatic cells
p = index.Property()
p.dimension = 2
idx = index.Index(properties=p)
for i, cell in enumerate(programmatic_cells):
idx.insert(i, cell.rect.to_bounding_box().as_tuple())
def is_overlapping_with_existing_cells(ocr_cell):
# Query the R-tree to get overlapping rectangles
possible_matches_index = list(
idx.intersection(ocr_cell.rect.to_bounding_box().as_tuple())
)
return (
len(possible_matches_index) > 0
) # this is a weak criterion but it works.
filtered_ocr_cells = [
rect for rect in ocr_cells if not is_overlapping_with_existing_cells(rect)
]
return filtered_ocr_cells
def post_process_cells(self, ocr_cells: List[TextCell], page: Page) -> None:
r"""
Post-process the OCR cells and update the page object.
Updates parsed_page.textline_cells directly since page.cells is now read-only.
"""
# Get existing cells from the read-only property
existing_cells = page.cells
# Combine existing and OCR cells with overlap filtering
final_cells = self._combine_cells(existing_cells, ocr_cells)
assert page.parsed_page is not None
# Update parsed_page.textline_cells directly
page.parsed_page.textline_cells = final_cells
page.parsed_page.has_lines = len(final_cells) > 0
# When force_full_page_ocr is used, PDF-extracted word/char cells are
# unreliable. Filter out cells where from_ocr=False, keeping any OCR-
# generated cells. This ensures downstream components (e.g., table
# structure model) fall back to OCR-extracted textline cells.
if self.options.force_full_page_ocr:
page.parsed_page.word_cells = [
c for c in page.parsed_page.word_cells if c.from_ocr
]
page.parsed_page.char_cells = [
c for c in page.parsed_page.char_cells if c.from_ocr
]
page.parsed_page.has_words = len(page.parsed_page.word_cells) > 0
page.parsed_page.has_chars = len(page.parsed_page.char_cells) > 0
def _combine_cells(
self, existing_cells: List[TextCell], ocr_cells: List[TextCell]
) -> List[TextCell]:
"""Combine existing and OCR cells with filtering and re-indexing."""
if self.options.force_full_page_ocr:
combined = ocr_cells
else:
filtered_ocr_cells = self._filter_ocr_cells(ocr_cells, existing_cells)
combined = list(existing_cells) + filtered_ocr_cells
# Re-index in-place
for i, cell in enumerate(combined):
cell.index = i
return combined
def draw_ocr_rects_and_cells(self, conv_res, page, ocr_rects, show: bool = False):
image = copy.deepcopy(page.image)
scale_x = image.width / page.size.width
scale_y = image.height / page.size.height
draw = ImageDraw.Draw(image, "RGBA")
# Draw OCR rectangles as yellow filled rect
for rect in ocr_rects:
x0, y0, x1, y1 = rect.as_tuple()
y0 *= scale_x
y1 *= scale_y
x0 *= scale_x
x1 *= scale_x
shade_color = (255, 255, 0, 40) # transparent yellow
draw.rectangle([(x0, y0), (x1, y1)], fill=shade_color, outline=None)
# Draw OCR and programmatic cells
for tc in page.cells:
x0, y0, x1, y1 = tc.rect.to_bounding_box().as_tuple()
y0 *= scale_x
y1 *= scale_y
x0 *= scale_x
x1 *= scale_x
if y1 <= y0:
y1, y0 = y0, y1
color = "magenta" if tc.from_ocr else "gray"
draw.rectangle([(x0, y0), (x1, y1)], outline=color)
if show:
image.show()
else:
out_path: Path = (
Path(settings.debug.debug_output_path)
/ f"debug_{conv_res.input.file.stem}"
)
out_path.mkdir(parents=True, exist_ok=True)
out_file = out_path / f"ocr_page_{page.page_no:05}.png"
image.save(str(out_file), format="png")
@abstractmethod
def __call__(
self, conv_res: ConversionResult, page_batch: Iterable[Page]
) -> Iterable[Page]:
pass
@classmethod
@abstractmethod
def get_options_type(cls) -> Type[OcrOptions]:
pass
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/models/base_layout_model.py | docling/models/base_layout_model.py | from __future__ import annotations
from abc import ABC, abstractmethod
from collections.abc import Iterable, Sequence
from typing import Type
from docling.datamodel.base_models import LayoutPrediction, Page
from docling.datamodel.document import ConversionResult
from docling.datamodel.pipeline_options import BaseLayoutOptions
from docling.models.base_model import BaseModelWithOptions, BasePageModel
class BaseLayoutModel(BasePageModel, BaseModelWithOptions, ABC):
"""Shared interface for layout models."""
@classmethod
@abstractmethod
def get_options_type(cls) -> Type[BaseLayoutOptions]:
"""Return the options type supported by this layout model."""
@abstractmethod
def predict_layout(
self,
conv_res: ConversionResult,
pages: Sequence[Page],
) -> Sequence[LayoutPrediction]:
"""Produce layout predictions for the provided pages."""
def __call__(
self,
conv_res: ConversionResult,
page_batch: Iterable[Page],
) -> Iterable[Page]:
pages = list(page_batch)
predictions = self.predict_layout(conv_res, pages)
for page, prediction in zip(pages, predictions):
page.predictions.layout = prediction
yield page
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/models/page_assemble_model.py | docling/models/page_assemble_model.py | import logging
import re
from collections.abc import Iterable
from typing import List
import numpy as np
from pydantic import BaseModel
from docling.datamodel.base_models import (
AssembledUnit,
ContainerElement,
FigureElement,
Page,
PageElement,
Table,
TextElement,
)
from docling.datamodel.document import ConversionResult
from docling.models.base_model import BasePageModel
from docling.models.layout_model import LayoutModel
from docling.utils.profiling import TimeRecorder
_log = logging.getLogger(__name__)
class PageAssembleOptions(BaseModel):
pass
class PageAssembleModel(BasePageModel):
def __init__(self, options: PageAssembleOptions):
self.options = options
def sanitize_text(self, lines):
if len(lines) <= 1:
return " ".join(lines)
for ix, line in enumerate(lines[1:]):
prev_line = lines[ix]
if prev_line.endswith("-"):
prev_words = re.findall(r"\b[\w]+\b", prev_line)
line_words = re.findall(r"\b[\w]+\b", line)
if (
len(prev_words)
and len(line_words)
and prev_words[-1].isalnum()
and line_words[0].isalnum()
):
lines[ix] = prev_line[:-1]
else:
lines[ix] += " "
sanitized_text = "".join(lines)
# Text normalization
sanitized_text = sanitized_text.replace("⁄", "/") # noqa: RUF001
sanitized_text = sanitized_text.replace("’", "'") # noqa: RUF001
sanitized_text = sanitized_text.replace("‘", "'") # noqa: RUF001
sanitized_text = sanitized_text.replace("“", '"')
sanitized_text = sanitized_text.replace("”", '"')
sanitized_text = sanitized_text.replace("•", "·")
return sanitized_text.strip() # Strip any leading or trailing whitespace
def __call__(
self, conv_res: ConversionResult, page_batch: Iterable[Page]
) -> Iterable[Page]:
for page in page_batch:
assert page._backend is not None
if not page._backend.is_valid():
yield page
else:
with TimeRecorder(conv_res, "page_assemble"):
assert page.predictions.layout is not None
# assembles some JSON output page by page.
elements: List[PageElement] = []
headers: List[PageElement] = []
body: List[PageElement] = []
for cluster in page.predictions.layout.clusters:
# _log.info("Cluster label seen:", cluster.label)
if cluster.label in LayoutModel.TEXT_ELEM_LABELS:
textlines = [
cell.text.replace("\x02", "-").strip()
for cell in cluster.cells
if len(cell.text.strip()) > 0
]
text = self.sanitize_text(textlines)
text_el = TextElement(
label=cluster.label,
id=cluster.id,
text=text,
page_no=page.page_no,
cluster=cluster,
)
elements.append(text_el)
if cluster.label in LayoutModel.PAGE_HEADER_LABELS:
headers.append(text_el)
else:
body.append(text_el)
elif cluster.label in LayoutModel.TABLE_LABELS:
tbl = None
if page.predictions.tablestructure:
tbl = page.predictions.tablestructure.table_map.get(
cluster.id, None
)
if not tbl: # fallback: add table without structure, if it isn't present
tbl = Table(
label=cluster.label,
id=cluster.id,
text="",
otsl_seq=[],
table_cells=[],
cluster=cluster,
page_no=page.page_no,
)
elements.append(tbl)
body.append(tbl)
elif cluster.label == LayoutModel.FIGURE_LABEL:
fig = None
if page.predictions.figures_classification:
fig = page.predictions.figures_classification.figure_map.get(
cluster.id, None
)
if not fig: # fallback: add figure without classification, if it isn't present
fig = FigureElement(
label=cluster.label,
id=cluster.id,
text="",
data=None,
cluster=cluster,
page_no=page.page_no,
)
elements.append(fig)
body.append(fig)
elif cluster.label in LayoutModel.CONTAINER_LABELS:
container_el = ContainerElement(
label=cluster.label,
id=cluster.id,
page_no=page.page_no,
cluster=cluster,
)
elements.append(container_el)
body.append(container_el)
page.assembled = AssembledUnit(
elements=elements, headers=headers, body=body
)
yield page
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/models/code_formula_model.py | docling/models/code_formula_model.py | import re
from collections.abc import Iterable
from pathlib import Path
from typing import List, Literal, Optional, Tuple, Union
import numpy as np
from docling_core.types.doc import (
CodeItem,
DocItemLabel,
DoclingDocument,
NodeItem,
TextItem,
)
from docling_core.types.doc.labels import CodeLanguageLabel
from PIL import Image
from pydantic import BaseModel
from transformers import AutoModelForImageTextToText, AutoProcessor
from docling.datamodel.accelerator_options import AcceleratorDevice, AcceleratorOptions
from docling.datamodel.base_models import ItemAndImageEnrichmentElement
from docling.models.base_model import BaseItemAndImageEnrichmentModel
from docling.models.utils.hf_model_download import download_hf_model
from docling.utils.accelerator_utils import decide_device
class CodeFormulaModelOptions(BaseModel):
"""
Configuration options for the CodeFormulaModel.
Attributes
----------
kind : str
Type of the model. Fixed value "code_formula".
do_code_enrichment : bool
True if code enrichment is enabled, False otherwise.
do_formula_enrichment : bool
True if formula enrichment is enabled, False otherwise.
"""
kind: Literal["code_formula"] = "code_formula"
do_code_enrichment: bool = True
do_formula_enrichment: bool = True
class CodeFormulaModel(BaseItemAndImageEnrichmentModel):
"""
Model for processing and enriching documents with code and formula predictions.
Attributes
----------
enabled : bool
True if the model is enabled, False otherwise.
options : CodeFormulaModelOptions
Configuration options for the CodeFormulaModel.
code_formula_model : CodeFormulaPredictor
The predictor model for code and formula processing.
Methods
-------
__init__(self, enabled, artifacts_path, accelerator_options, code_formula_options)
Initializes the CodeFormulaModel with the given configuration options.
is_processable(self, doc, element)
Determines if a given element in a document can be processed by the model.
__call__(self, doc, element_batch)
Processes the given batch of elements and enriches them with predictions.
"""
_model_repo_folder = "docling-project--CodeFormulaV2"
elements_batch_size = 5
images_scale = 1.67 # = 120 dpi, aligned with training data resolution
expansion_factor = 0.18
def __init__(
self,
enabled: bool,
artifacts_path: Optional[Path],
options: CodeFormulaModelOptions,
accelerator_options: AcceleratorOptions,
):
"""
Initializes the CodeFormulaModel with the given configuration.
Parameters
----------
enabled : bool
True if the model is enabled, False otherwise.
artifacts_path : Path
Path to the directory containing the model artifacts.
options : CodeFormulaModelOptions
Configuration options for the model.
accelerator_options : AcceleratorOptions
Options specifying the device and number of threads for acceleration.
"""
self.enabled = enabled
self.options = options
if self.enabled:
self.device = decide_device(
accelerator_options.device,
supported_devices=[AcceleratorDevice.CPU, AcceleratorDevice.CUDA],
)
if artifacts_path is None:
artifacts_path = self.download_models()
else:
artifacts_path = artifacts_path / self._model_repo_folder
self._processor = AutoProcessor.from_pretrained(
artifacts_path,
)
self._model_max_length = self._processor.tokenizer.model_max_length
self._model = AutoModelForImageTextToText.from_pretrained(
artifacts_path, device_map=self.device
)
self._model.eval()
@staticmethod
def download_models(
local_dir: Optional[Path] = None,
force: bool = False,
progress: bool = False,
) -> Path:
return download_hf_model(
repo_id="docling-project/CodeFormulaV2",
revision="main",
local_dir=local_dir,
force=force,
progress=progress,
)
def is_processable(self, doc: DoclingDocument, element: NodeItem) -> bool:
"""
Determines if a given element in a document can be processed by the model.
Parameters
----------
doc : DoclingDocument
The document being processed.
element : NodeItem
The element within the document to check.
Returns
-------
bool
True if the element can be processed, False otherwise.
"""
return self.enabled and (
(isinstance(element, CodeItem) and self.options.do_code_enrichment)
or (
isinstance(element, TextItem)
and element.label == DocItemLabel.FORMULA
and self.options.do_formula_enrichment
)
)
def _extract_code_language(self, input_string: str) -> Tuple[str, Optional[str]]:
"""Extracts a programming language from the beginning of a string.
This function checks if the input string starts with a pattern of the form
``<_some_language_>``. If it does, it extracts the language string and returns
a tuple of (remainder, language). Otherwise, it returns the original string
and `None`.
Args:
input_string (str): The input string, which may start with ``<_language_>``.
Returns:
Tuple[str, Optional[str]]:
A tuple where:
- The first element is either:
- The remainder of the string (everything after ``<_language_>``),
if a match is found; or
- The original string, if no match is found.
- The second element is the extracted language if a match is found;
otherwise, `None`.
"""
pattern = r"^<_([^_>]+)_>\s*(.*)"
match = re.match(pattern, input_string, flags=re.DOTALL)
if match:
language = str(match.group(1)) # the captured programming language
remainder = str(match.group(2)) # everything after the <_language_>
return remainder, language
else:
return input_string, None
def _get_code_language_enum(self, value: Optional[str]) -> CodeLanguageLabel:
"""
Converts a string to a corresponding `CodeLanguageLabel` enum member.
If the provided string does not match any value in `CodeLanguageLabel`,
it defaults to `CodeLanguageLabel.UNKNOWN`.
Args:
value (Optional[str]): The string representation of the code language or None.
Returns:
CodeLanguageLabel: The corresponding enum member if the value is valid,
otherwise `CodeLanguageLabel.UNKNOWN`.
"""
if not isinstance(value, str):
return CodeLanguageLabel.UNKNOWN
try:
return CodeLanguageLabel(value)
except ValueError:
return CodeLanguageLabel.UNKNOWN
def _get_prompt(self, label: str) -> str:
"""
Constructs the prompt for the model based on the input label.
Parameters
----------
label : str
The type of input, either 'code' or 'formula'.
Returns
-------
str
The constructed prompt including necessary tokens and query.
Raises
------
NotImplementedError
If the label is not 'code' or 'formula'.
"""
if label == "code":
query = "<code>"
elif label == "formula":
query = "<formula>"
else:
raise NotImplementedError("Label must be either code or formula")
messages = [
{
"role": "user",
"content": [{"type": "image"}, {"type": "text", "text": query}],
},
]
prompt = self._processor.apply_chat_template(
messages, add_generation_prompt=True
)
return prompt
def _post_process(self, texts: list[str]) -> list[str]:
"""
Processes a list of text strings by truncating at '<end_of_utterance>' and
removing a predefined set of unwanted substrings.
Parameters
----------
texts : list[str]
A list of strings to be post-processed.
Returns
-------
list[str]
A list of cleaned strings with specified substrings removed and truncated at
'<end_of_utterance>' if present.
"""
to_remove = ["</code>", "</formula>", "<loc_0><loc_0><loc_500><loc_500>"]
def clean_text(text: str) -> str:
idx = text.find("<end_of_utterance>")
if idx != -1:
text = text[:idx]
for token in to_remove:
if token in text:
text = text.replace(token, "")
return text.lstrip()
return [clean_text(t) for t in texts]
def __call__(
self,
doc: DoclingDocument,
element_batch: Iterable[ItemAndImageEnrichmentElement],
) -> Iterable[NodeItem]:
"""
Processes the given batch of elements and enriches them with predictions.
Parameters
----------
doc : DoclingDocument
The document being processed.
element_batch : Iterable[ItemAndImageEnrichmentElement]
A batch of elements to be processed.
Returns
-------
Iterable[Any]
An iterable of enriched elements.
"""
if not self.enabled:
for element in element_batch:
yield element.item
return
labels: List[str] = []
images: List[Union[Image.Image, np.ndarray]] = []
elements: List[TextItem] = []
for el in element_batch:
elements.append(el.item) # type: ignore[arg-type]
labels.append(el.item.label) # type: ignore[attr-defined]
images.append(el.image)
prompts = [self._get_prompt(label) for label in labels]
inputs = self._processor(
text=prompts,
images=images,
return_tensors="pt",
)
inputs = inputs.to(self.device)
gen_kwargs = dict(
max_new_tokens=self._model_max_length - inputs.input_ids.shape[1],
use_cache=True,
do_sample=False,
)
generated_ids = self._model.generate(**inputs, **gen_kwargs)
outputs = self._processor.batch_decode(
generated_ids[:, inputs.input_ids.shape[1] :], skip_special_tokens=False
)
outputs = self._post_process(outputs)
for item, output in zip(elements, outputs):
if isinstance(item, CodeItem):
output, code_language = self._extract_code_language(output)
item.code_language = self._get_code_language_enum(code_language)
item.text = output
yield item
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/models/base_table_model.py | docling/models/base_table_model.py | from __future__ import annotations
from abc import ABC, abstractmethod
from collections.abc import Iterable, Sequence
from typing import Type
from docling.datamodel.base_models import Page, TableStructurePrediction
from docling.datamodel.document import ConversionResult
from docling.datamodel.pipeline_options import BaseTableStructureOptions
from docling.models.base_model import BaseModelWithOptions, BasePageModel
class BaseTableStructureModel(BasePageModel, BaseModelWithOptions, ABC):
"""Shared interface for table structure models."""
enabled: bool
@classmethod
@abstractmethod
def get_options_type(cls) -> Type[BaseTableStructureOptions]:
"""Return the options type supported by this table model."""
@abstractmethod
def predict_tables(
self,
conv_res: ConversionResult,
pages: Sequence[Page],
) -> Sequence[TableStructurePrediction]:
"""Produce table structure predictions for the provided pages."""
def __call__(
self,
conv_res: ConversionResult,
page_batch: Iterable[Page],
) -> Iterable[Page]:
if not getattr(self, "enabled", True):
yield from page_batch
return
pages = list(page_batch)
predictions = self.predict_tables(conv_res, pages)
for page, prediction in zip(pages, predictions):
page.predictions.tablestructure = prediction
yield page
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/models/layout_model.py | docling/models/layout_model.py | import copy
import logging
import warnings
from collections.abc import Sequence
from pathlib import Path
from typing import List, Optional, Union
import numpy as np
from docling_core.types.doc import DocItemLabel
from PIL import Image
from docling.datamodel.accelerator_options import AcceleratorOptions
from docling.datamodel.base_models import BoundingBox, Cluster, LayoutPrediction, Page
from docling.datamodel.document import ConversionResult
from docling.datamodel.layout_model_specs import DOCLING_LAYOUT_V2, LayoutModelConfig
from docling.datamodel.pipeline_options import LayoutOptions
from docling.datamodel.settings import settings
from docling.models.base_layout_model import BaseLayoutModel
from docling.models.utils.hf_model_download import download_hf_model
from docling.utils.accelerator_utils import decide_device
from docling.utils.layout_postprocessor import LayoutPostprocessor
from docling.utils.profiling import TimeRecorder
from docling.utils.visualization import draw_clusters
_log = logging.getLogger(__name__)
class LayoutModel(BaseLayoutModel):
TEXT_ELEM_LABELS = [
DocItemLabel.TEXT,
DocItemLabel.FOOTNOTE,
DocItemLabel.CAPTION,
DocItemLabel.CHECKBOX_UNSELECTED,
DocItemLabel.CHECKBOX_SELECTED,
DocItemLabel.SECTION_HEADER,
DocItemLabel.PAGE_HEADER,
DocItemLabel.PAGE_FOOTER,
DocItemLabel.CODE,
DocItemLabel.LIST_ITEM,
DocItemLabel.FORMULA,
]
PAGE_HEADER_LABELS = [DocItemLabel.PAGE_HEADER, DocItemLabel.PAGE_FOOTER]
TABLE_LABELS = [DocItemLabel.TABLE, DocItemLabel.DOCUMENT_INDEX]
FIGURE_LABEL = DocItemLabel.PICTURE
FORMULA_LABEL = DocItemLabel.FORMULA
CONTAINER_LABELS = [DocItemLabel.FORM, DocItemLabel.KEY_VALUE_REGION]
def __init__(
self,
artifacts_path: Optional[Path],
accelerator_options: AcceleratorOptions,
options: LayoutOptions,
):
from docling_ibm_models.layoutmodel.layout_predictor import LayoutPredictor
self.options = options
device = decide_device(accelerator_options.device)
layout_model_config = options.model_spec
model_repo_folder = layout_model_config.model_repo_folder
model_path = layout_model_config.model_path
if artifacts_path is None:
artifacts_path = (
self.download_models(layout_model_config=layout_model_config)
/ model_path
)
else:
if (artifacts_path / model_repo_folder).exists():
artifacts_path = artifacts_path / model_repo_folder / model_path
elif (artifacts_path / model_path).exists():
warnings.warn(
"The usage of artifacts_path containing directly "
f"{model_path} is deprecated. Please point "
"the artifacts_path to the parent containing "
f"the {model_repo_folder} folder.",
DeprecationWarning,
stacklevel=3,
)
artifacts_path = artifacts_path / model_path
self.layout_predictor = LayoutPredictor(
artifact_path=str(artifacts_path),
device=device,
num_threads=accelerator_options.num_threads,
)
@classmethod
def get_options_type(cls) -> type[LayoutOptions]:
return LayoutOptions
@staticmethod
def download_models(
local_dir: Optional[Path] = None,
force: bool = False,
progress: bool = False,
layout_model_config: LayoutModelConfig = LayoutOptions().model_spec, # use default
) -> Path:
return download_hf_model(
repo_id=layout_model_config.repo_id,
revision=layout_model_config.revision,
local_dir=local_dir,
force=force,
progress=progress,
)
def draw_clusters_and_cells_side_by_side(
self, conv_res, page, clusters, mode_prefix: str, show: bool = False
):
"""
Draws a page image side by side with clusters filtered into two categories:
- Left: Clusters excluding FORM, KEY_VALUE_REGION, and PICTURE.
- Right: Clusters including FORM, KEY_VALUE_REGION, and PICTURE.
Includes label names and confidence scores for each cluster.
"""
scale_x = page.image.width / page.size.width
scale_y = page.image.height / page.size.height
# Filter clusters for left and right images
exclude_labels = {
DocItemLabel.FORM,
DocItemLabel.KEY_VALUE_REGION,
DocItemLabel.PICTURE,
}
left_clusters = [c for c in clusters if c.label not in exclude_labels]
right_clusters = [c for c in clusters if c.label in exclude_labels]
# Create a deep copy of the original image for both sides
left_image = page.image.copy()
right_image = page.image.copy()
# Draw clusters on both images
draw_clusters(left_image, left_clusters, scale_x, scale_y)
draw_clusters(right_image, right_clusters, scale_x, scale_y)
# Combine the images side by side
combined_width = left_image.width * 2
combined_height = left_image.height
combined_image = Image.new("RGB", (combined_width, combined_height))
combined_image.paste(left_image, (0, 0))
combined_image.paste(right_image, (left_image.width, 0))
if show:
combined_image.show()
else:
out_path: Path = (
Path(settings.debug.debug_output_path)
/ f"debug_{conv_res.input.file.stem}"
)
out_path.mkdir(parents=True, exist_ok=True)
out_file = out_path / f"{mode_prefix}_layout_page_{page.page_no:05}.png"
combined_image.save(str(out_file), format="png")
def predict_layout(
self,
conv_res: ConversionResult,
pages: Sequence[Page],
) -> Sequence[LayoutPrediction]:
# Convert to list to ensure predictable iteration
pages = list(pages)
# Separate valid and invalid pages
valid_pages = []
valid_page_images: List[Union[Image.Image, np.ndarray]] = []
for page in pages:
assert page._backend is not None
if not page._backend.is_valid():
continue
assert page.size is not None
page_image = page.get_image(scale=1.0)
assert page_image is not None
valid_pages.append(page)
valid_page_images.append(page_image)
# Process all valid pages with batch prediction
batch_predictions = []
if valid_page_images:
with TimeRecorder(conv_res, "layout"):
batch_predictions = self.layout_predictor.predict_batch( # type: ignore[attr-defined]
valid_page_images
)
# Process each page with its predictions
layout_predictions: list[LayoutPrediction] = []
valid_page_idx = 0
for page in pages:
assert page._backend is not None
if not page._backend.is_valid():
existing_prediction = page.predictions.layout or LayoutPrediction()
page.predictions.layout = existing_prediction
layout_predictions.append(existing_prediction)
continue
page_predictions = batch_predictions[valid_page_idx]
valid_page_idx += 1
clusters = []
for ix, pred_item in enumerate(page_predictions):
label = DocItemLabel(
pred_item["label"].lower().replace(" ", "_").replace("-", "_")
) # Temporary, until docling-ibm-model uses docling-core types
cluster = Cluster(
id=ix,
label=label,
confidence=pred_item["confidence"],
bbox=BoundingBox.model_validate(pred_item),
cells=[],
)
clusters.append(cluster)
if settings.debug.visualize_raw_layout:
self.draw_clusters_and_cells_side_by_side(
conv_res, page, clusters, mode_prefix="raw"
)
# Apply postprocessing
processed_clusters, processed_cells = LayoutPostprocessor(
page, clusters, self.options
).postprocess()
# Note: LayoutPostprocessor updates page.cells and page.parsed_page internally
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
"Mean of empty slice|invalid value encountered in scalar divide",
RuntimeWarning,
"numpy",
)
conv_res.confidence.pages[page.page_no].layout_score = float(
np.mean([c.confidence for c in processed_clusters])
)
conv_res.confidence.pages[page.page_no].ocr_score = float(
np.mean([c.confidence for c in processed_cells if c.from_ocr])
)
prediction = LayoutPrediction(clusters=processed_clusters)
page.predictions.layout = prediction
if settings.debug.visualize_layout:
self.draw_clusters_and_cells_side_by_side(
conv_res, page, processed_clusters, mode_prefix="postprocessed"
)
layout_predictions.append(prediction)
return layout_predictions
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/models/picture_description_vlm_model.py | docling/models/picture_description_vlm_model.py | import sys
import threading
from collections.abc import Iterable
from pathlib import Path
from typing import Optional, Type, Union
from PIL import Image
from transformers import AutoModelForImageTextToText
from docling.datamodel.accelerator_options import AcceleratorOptions
from docling.datamodel.pipeline_options import (
PictureDescriptionBaseOptions,
PictureDescriptionVlmOptions,
)
from docling.models.picture_description_base_model import PictureDescriptionBaseModel
from docling.models.utils.hf_model_download import (
HuggingFaceModelDownloadMixin,
)
from docling.utils.accelerator_utils import decide_device
# Global lock for model initialization to prevent threading issues
_model_init_lock = threading.Lock()
class PictureDescriptionVlmModel(
PictureDescriptionBaseModel, HuggingFaceModelDownloadMixin
):
@classmethod
def get_options_type(cls) -> Type[PictureDescriptionBaseOptions]:
return PictureDescriptionVlmOptions
def __init__(
self,
enabled: bool,
enable_remote_services: bool,
artifacts_path: Optional[Union[Path, str]],
options: PictureDescriptionVlmOptions,
accelerator_options: AcceleratorOptions,
):
super().__init__(
enabled=enabled,
enable_remote_services=enable_remote_services,
artifacts_path=artifacts_path,
options=options,
accelerator_options=accelerator_options,
)
self.options: PictureDescriptionVlmOptions
if self.enabled:
if artifacts_path is None:
artifacts_path = self.download_models(repo_id=self.options.repo_id)
else:
artifacts_path = Path(artifacts_path) / self.options.repo_cache_folder
self.device = decide_device(accelerator_options.device)
try:
import torch
from transformers import AutoModelForVision2Seq, AutoProcessor
except ImportError:
raise ImportError(
"transformers >=4.46 is not installed. Please install Docling with the required extras `pip install docling[vlm]`."
)
# Initialize processor and model
with _model_init_lock:
self.processor = AutoProcessor.from_pretrained(artifacts_path)
self.model = AutoModelForImageTextToText.from_pretrained(
artifacts_path,
device_map=self.device,
dtype=torch.bfloat16,
_attn_implementation=(
"flash_attention_2"
if self.device.startswith("cuda")
and accelerator_options.cuda_use_flash_attention2
else "sdpa"
),
)
if sys.version_info < (3, 14):
self.model = torch.compile(self.model) # type: ignore
else:
self.model.eval()
self.provenance = f"{self.options.repo_id}"
def _annotate_images(self, images: Iterable[Image.Image]) -> Iterable[str]:
from transformers import GenerationConfig
# Create input messages
messages = [
{
"role": "user",
"content": [
{"type": "image"},
{"type": "text", "text": self.options.prompt},
],
},
]
# TODO: do batch generation
for image in images:
# Prepare inputs
prompt = self.processor.apply_chat_template(
messages, add_generation_prompt=True
)
inputs = self.processor(text=prompt, images=[image], return_tensors="pt")
inputs = inputs.to(self.device)
# Generate outputs
generated_ids = self.model.generate(
**inputs,
generation_config=GenerationConfig(**self.options.generation_config),
)
generated_texts = self.processor.batch_decode(
generated_ids[:, inputs["input_ids"].shape[1] :],
skip_special_tokens=True,
)
yield generated_texts[0].strip()
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/models/easyocr_model.py | docling/models/easyocr_model.py | import logging
import warnings
import zipfile
from collections.abc import Iterable
from pathlib import Path
from typing import List, Optional, Type
import numpy
from docling_core.types.doc import BoundingBox, CoordOrigin
from docling_core.types.doc.page import BoundingRectangle, TextCell
from docling.datamodel.accelerator_options import AcceleratorDevice, AcceleratorOptions
from docling.datamodel.base_models import Page
from docling.datamodel.document import ConversionResult
from docling.datamodel.pipeline_options import (
EasyOcrOptions,
OcrOptions,
)
from docling.datamodel.settings import settings
from docling.models.base_ocr_model import BaseOcrModel
from docling.utils.accelerator_utils import decide_device
from docling.utils.profiling import TimeRecorder
from docling.utils.utils import download_url_with_progress
_log = logging.getLogger(__name__)
class EasyOcrModel(BaseOcrModel):
_model_repo_folder = "EasyOcr"
def __init__(
self,
enabled: bool,
artifacts_path: Optional[Path],
options: EasyOcrOptions,
accelerator_options: AcceleratorOptions,
):
super().__init__(
enabled=enabled,
artifacts_path=artifacts_path,
options=options,
accelerator_options=accelerator_options,
)
self.options: EasyOcrOptions
self.scale = 3 # multiplier for 72 dpi == 216 dpi.
if self.enabled:
try:
import easyocr
except ImportError:
raise ImportError(
"EasyOCR is not installed. Please install it via `pip install easyocr` to use this OCR engine. "
"Alternatively, Docling has support for other OCR engines. See the documentation."
)
if self.options.use_gpu is None:
device = decide_device(accelerator_options.device)
# Enable easyocr GPU if running on CUDA, MPS
use_gpu = any(
device.startswith(x)
for x in [
AcceleratorDevice.CUDA.value,
AcceleratorDevice.MPS.value,
]
)
else:
warnings.warn(
"Deprecated field. Better to set the `accelerator_options.device` in `pipeline_options`. "
"When `use_gpu and accelerator_options.device == AcceleratorDevice.CUDA` the GPU is used "
"to run EasyOCR. Otherwise, EasyOCR runs in CPU."
)
use_gpu = self.options.use_gpu
download_enabled = self.options.download_enabled
model_storage_directory = self.options.model_storage_directory
if artifacts_path is not None and model_storage_directory is None:
download_enabled = False
model_storage_directory = str(artifacts_path / self._model_repo_folder)
with warnings.catch_warnings():
if self.options.suppress_mps_warnings:
warnings.filterwarnings("ignore", message=".*pin_memory.*MPS.*")
self.reader = easyocr.Reader(
lang_list=self.options.lang,
gpu=use_gpu,
model_storage_directory=model_storage_directory,
recog_network=self.options.recog_network,
download_enabled=download_enabled,
verbose=False,
)
@staticmethod
def download_models(
detection_models: List[str] = ["craft"],
recognition_models: List[str] = ["english_g2", "latin_g2"],
local_dir: Optional[Path] = None,
force: bool = False,
progress: bool = False,
) -> Path:
# Models are located in https://github.com/JaidedAI/EasyOCR/blob/master/easyocr/config.py
from easyocr.config import (
detection_models as det_models_dict,
recognition_models as rec_models_dict,
)
if local_dir is None:
local_dir = settings.cache_dir / "models" / EasyOcrModel._model_repo_folder
local_dir.mkdir(parents=True, exist_ok=True)
# Collect models to download
download_list = []
for model_name in detection_models:
if model_name in det_models_dict:
download_list.append(det_models_dict[model_name])
for model_name in recognition_models:
if model_name in rec_models_dict["gen2"]:
download_list.append(rec_models_dict["gen2"][model_name])
# Download models
for model_details in download_list:
buf = download_url_with_progress(model_details["url"], progress=progress)
with zipfile.ZipFile(buf, "r") as zip_ref:
zip_ref.extractall(local_dir)
return local_dir
def __call__(
self, conv_res: ConversionResult, page_batch: Iterable[Page]
) -> Iterable[Page]:
if not self.enabled:
yield from page_batch
return
for page in page_batch:
assert page._backend is not None
if not page._backend.is_valid():
yield page
else:
with TimeRecorder(conv_res, "ocr"):
ocr_rects = self.get_ocr_rects(page)
all_ocr_cells = []
for ocr_rect in ocr_rects:
# Skip zero area boxes
if ocr_rect.area() == 0:
continue
high_res_image = page._backend.get_page_image(
scale=self.scale, cropbox=ocr_rect
)
im = numpy.array(high_res_image)
with warnings.catch_warnings():
if self.options.suppress_mps_warnings:
warnings.filterwarnings(
"ignore", message=".*pin_memory.*MPS.*"
)
result = self.reader.readtext(im)
del high_res_image
del im
cells = [
TextCell(
index=ix,
text=line[1],
orig=line[1],
from_ocr=True,
confidence=line[2],
rect=BoundingRectangle.from_bounding_box(
BoundingBox.from_tuple(
coord=(
(line[0][0][0] / self.scale) + ocr_rect.l,
(line[0][0][1] / self.scale) + ocr_rect.t,
(line[0][2][0] / self.scale) + ocr_rect.l,
(line[0][2][1] / self.scale) + ocr_rect.t,
),
origin=CoordOrigin.TOPLEFT,
)
),
)
for ix, line in enumerate(result)
if line[2] >= self.options.confidence_threshold
]
all_ocr_cells.extend(cells)
# Post-process the cells
self.post_process_cells(all_ocr_cells, page)
# DEBUG code:
if settings.debug.visualize_ocr:
self.draw_ocr_rects_and_cells(conv_res, page, ocr_rects)
yield page
@classmethod
def get_options_type(cls) -> Type[OcrOptions]:
return EasyOcrOptions
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/models/__init__.py | docling/models/__init__.py | python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false | |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/models/readingorder_model.py | docling/models/readingorder_model.py | from pathlib import Path
from docling_core.types.doc import (
DocItemLabel,
DoclingDocument,
DocumentOrigin,
GroupLabel,
NodeItem,
ProvenanceItem,
RefItem,
RichTableCell,
TableData,
)
from docling_core.types.doc.document import ContentLayer
from docling_ibm_models.list_item_normalizer.list_marker_processor import (
ListItemMarkerProcessor,
)
from docling_ibm_models.reading_order.reading_order_rb import (
PageElement as ReadingOrderPageElement,
ReadingOrderPredictor,
)
from pydantic import BaseModel, ConfigDict
from docling.datamodel.base_models import (
BasePageElement,
Cluster,
ContainerElement,
FigureElement,
Table,
TextElement,
)
from docling.datamodel.document import ConversionResult
from docling.utils.profiling import ProfilingScope, TimeRecorder
class ReadingOrderOptions(BaseModel):
model_config = ConfigDict(protected_namespaces=())
model_names: str = "" # e.g. "language;term;reference"
class ReadingOrderModel:
def __init__(self, options: ReadingOrderOptions):
self.options = options
self.ro_model = ReadingOrderPredictor()
self.list_item_processor = ListItemMarkerProcessor()
def _assembled_to_readingorder_elements(
self, conv_res: ConversionResult
) -> list[ReadingOrderPageElement]:
elements: list[ReadingOrderPageElement] = []
page_no_to_pages = {p.page_no: p for p in conv_res.pages}
for element in conv_res.assembled.elements:
page_height = page_no_to_pages[element.page_no].size.height # type: ignore
bbox = element.cluster.bbox.to_bottom_left_origin(page_height)
text = element.text or ""
elements.append(
ReadingOrderPageElement(
cid=len(elements),
ref=RefItem(cref=f"#/{element.page_no}/{element.cluster.id}"),
text=text,
page_no=element.page_no,
page_size=page_no_to_pages[element.page_no].size,
label=element.label,
l=bbox.l,
r=bbox.r,
b=bbox.b,
t=bbox.t,
coord_origin=bbox.coord_origin,
)
)
return elements
def _add_child_elements(
self, element: BasePageElement, doc_item: NodeItem, doc: DoclingDocument
):
child: Cluster
for child in element.cluster.children:
c_label = child.label
c_bbox = child.bbox.to_bottom_left_origin(
doc.pages[element.page_no + 1].size.height
)
c_text = " ".join(
[
cell.text.replace("\x02", "-").strip()
for cell in child.cells
if len(cell.text.strip()) > 0
]
)
c_prov = ProvenanceItem(
page_no=element.page_no + 1, charspan=(0, len(c_text)), bbox=c_bbox
)
if c_label == DocItemLabel.LIST_ITEM:
# TODO: Infer if this is a numbered or a bullet list item
l_item = doc.add_list_item(parent=doc_item, text=c_text, prov=c_prov)
self.list_item_processor.process_list_item(l_item)
elif c_label == DocItemLabel.SECTION_HEADER:
doc.add_heading(parent=doc_item, text=c_text, prov=c_prov)
else:
doc.add_text(parent=doc_item, label=c_label, text=c_text, prov=c_prov)
def _create_rich_cell_group(
self, element: BasePageElement, doc: DoclingDocument, table_item: NodeItem
) -> RefItem:
"""Create a group containing all child elements for a rich table cell."""
group_name = f"rich_cell_group_{len(doc.tables)}_0_0"
group_element = doc.add_group(
label=GroupLabel.UNSPECIFIED,
name=group_name,
parent=table_item,
)
# Add all child elements to the group
self._add_child_elements(element, group_element, doc)
return group_element.get_ref()
def _readingorder_elements_to_docling_doc(
self,
conv_res: ConversionResult,
ro_elements: list[ReadingOrderPageElement],
el_to_captions_mapping: dict[int, list[int]],
el_to_footnotes_mapping: dict[int, list[int]],
el_merges_mapping: dict[int, list[int]],
) -> DoclingDocument:
id_to_elem = {
RefItem(cref=f"#/{elem.page_no}/{elem.cluster.id}").cref: elem
for elem in conv_res.assembled.elements
}
cid_to_rels = {rel.cid: rel for rel in ro_elements}
origin = DocumentOrigin(
mimetype="application/pdf",
filename=conv_res.input.file.name,
binary_hash=conv_res.input.document_hash,
)
doc_name = Path(origin.filename).stem
out_doc: DoclingDocument = DoclingDocument(name=doc_name, origin=origin)
for page in conv_res.pages:
page_no = page.page_no + 1
size = page.size
assert size is not None, "Page size is not initialized."
out_doc.add_page(page_no=page_no, size=size)
current_list = None
skippable_cids = {
cid
for mapping in (
el_to_captions_mapping,
el_to_footnotes_mapping,
el_merges_mapping,
)
for lst in mapping.values()
for cid in lst
}
page_no_to_pages = {p.page_no: p for p in conv_res.pages}
for rel in ro_elements:
if rel.cid in skippable_cids:
continue
element = id_to_elem[rel.ref.cref]
page_height = page_no_to_pages[element.page_no].size.height # type: ignore
if isinstance(element, TextElement):
if element.label == DocItemLabel.CODE:
cap_text = element.text
prov = ProvenanceItem(
page_no=element.page_no + 1,
charspan=(0, len(cap_text)),
bbox=element.cluster.bbox.to_bottom_left_origin(page_height),
)
code_item = out_doc.add_code(text=cap_text, prov=prov)
if rel.cid in el_to_captions_mapping.keys():
for caption_cid in el_to_captions_mapping[rel.cid]:
caption_elem = id_to_elem[cid_to_rels[caption_cid].ref.cref]
new_cap_item = self._add_caption_or_footnote(
caption_elem, out_doc, code_item, page_height
)
code_item.captions.append(new_cap_item.get_ref())
if rel.cid in el_to_footnotes_mapping.keys():
for footnote_cid in el_to_footnotes_mapping[rel.cid]:
footnote_elem = id_to_elem[
cid_to_rels[footnote_cid].ref.cref
]
new_footnote_item = self._add_caption_or_footnote(
footnote_elem, out_doc, code_item, page_height
)
code_item.footnotes.append(new_footnote_item.get_ref())
else:
new_item, current_list = self._handle_text_element(
element, out_doc, current_list, page_height
)
if rel.cid in el_merges_mapping.keys():
for merged_cid in el_merges_mapping[rel.cid]:
merged_elem = id_to_elem[cid_to_rels[merged_cid].ref.cref]
self._merge_elements(
element, merged_elem, new_item, page_height
)
elif isinstance(element, Table):
# Check if table has no structure prediction
if element.num_rows == 0 and element.num_cols == 0:
# Only create 1x1 table if there are children to put in it
if element.cluster.children:
# Create minimal 1x1 table with rich cell containing all children
tbl_data = TableData(num_rows=1, num_cols=1, table_cells=[])
else:
# Create empty table with no structure
tbl_data = TableData(num_rows=0, num_cols=0, table_cells=[])
else:
tbl_data = TableData(
num_rows=element.num_rows,
num_cols=element.num_cols,
table_cells=element.table_cells,
)
prov = ProvenanceItem(
page_no=element.page_no + 1,
charspan=(0, 0),
bbox=element.cluster.bbox.to_bottom_left_origin(page_height),
)
tbl = out_doc.add_table(
data=tbl_data, prov=prov, label=element.cluster.label
)
if rel.cid in el_to_captions_mapping.keys():
for caption_cid in el_to_captions_mapping[rel.cid]:
caption_elem = id_to_elem[cid_to_rels[caption_cid].ref.cref]
new_cap_item = self._add_caption_or_footnote(
caption_elem, out_doc, tbl, page_height
)
tbl.captions.append(new_cap_item.get_ref())
if rel.cid in el_to_footnotes_mapping.keys():
for footnote_cid in el_to_footnotes_mapping[rel.cid]:
footnote_elem = id_to_elem[cid_to_rels[footnote_cid].ref.cref]
new_footnote_item = self._add_caption_or_footnote(
footnote_elem, out_doc, tbl, page_height
)
tbl.footnotes.append(new_footnote_item.get_ref())
# Handle case where table has no structure prediction but has children
if (
element.num_rows == 0
and element.num_cols == 0
and element.cluster.children
):
# Create rich cell containing all child elements
rich_cell_ref = self._create_rich_cell_group(element, out_doc, tbl)
# Create rich table cell spanning the entire 1x1 table
rich_cell = RichTableCell(
text="", # Empty text since content is in the group
row_span=1,
col_span=1,
start_row_offset_idx=0,
end_row_offset_idx=1,
start_col_offset_idx=0,
end_col_offset_idx=1,
column_header=False,
row_header=False,
ref=rich_cell_ref,
)
out_doc.add_table_cell(table_item=tbl, cell=rich_cell)
# TODO: Consider adding children of Table.
elif isinstance(element, FigureElement):
cap_text = ""
prov = ProvenanceItem(
page_no=element.page_no + 1,
charspan=(0, len(cap_text)),
bbox=element.cluster.bbox.to_bottom_left_origin(page_height),
)
pic = out_doc.add_picture(prov=prov)
if rel.cid in el_to_captions_mapping.keys():
for caption_cid in el_to_captions_mapping[rel.cid]:
caption_elem = id_to_elem[cid_to_rels[caption_cid].ref.cref]
new_cap_item = self._add_caption_or_footnote(
caption_elem, out_doc, pic, page_height
)
pic.captions.append(new_cap_item.get_ref())
if rel.cid in el_to_footnotes_mapping.keys():
for footnote_cid in el_to_footnotes_mapping[rel.cid]:
footnote_elem = id_to_elem[cid_to_rels[footnote_cid].ref.cref]
new_footnote_item = self._add_caption_or_footnote(
footnote_elem, out_doc, pic, page_height
)
pic.footnotes.append(new_footnote_item.get_ref())
self._add_child_elements(element, pic, out_doc)
elif isinstance(element, ContainerElement): # Form, KV region
label = element.label
group_label = GroupLabel.UNSPECIFIED
if label == DocItemLabel.FORM:
group_label = GroupLabel.FORM_AREA
elif label == DocItemLabel.KEY_VALUE_REGION:
group_label = GroupLabel.KEY_VALUE_AREA
container_el = out_doc.add_group(label=group_label)
self._add_child_elements(element, container_el, out_doc)
return out_doc
def _add_caption_or_footnote(self, elem, out_doc, parent, page_height):
assert isinstance(elem, TextElement)
text = elem.text
prov = ProvenanceItem(
page_no=elem.page_no + 1,
charspan=(0, len(text)),
bbox=elem.cluster.bbox.to_bottom_left_origin(page_height),
)
new_item = out_doc.add_text(
label=elem.label, text=text, prov=prov, parent=parent
)
return new_item
def _handle_text_element(self, element, out_doc, current_list, page_height):
cap_text = element.text
prov = ProvenanceItem(
page_no=element.page_no + 1,
charspan=(0, len(cap_text)),
bbox=element.cluster.bbox.to_bottom_left_origin(page_height),
)
label = element.label
if label == DocItemLabel.LIST_ITEM:
if current_list is None:
current_list = out_doc.add_group(label=GroupLabel.LIST, name="list")
# TODO: Infer if this is a numbered or a bullet list item
new_item = out_doc.add_list_item(
text=cap_text, enumerated=False, prov=prov, parent=current_list
)
self.list_item_processor.process_list_item(new_item)
elif label == DocItemLabel.SECTION_HEADER:
current_list = None
new_item = out_doc.add_heading(text=cap_text, prov=prov)
elif label == DocItemLabel.FORMULA:
current_list = None
new_item = out_doc.add_text(
label=DocItemLabel.FORMULA, text="", orig=cap_text, prov=prov
)
else:
current_list = None
content_layer = ContentLayer.BODY
if element.label in [DocItemLabel.PAGE_HEADER, DocItemLabel.PAGE_FOOTER]:
content_layer = ContentLayer.FURNITURE
new_item = out_doc.add_text(
label=element.label,
text=cap_text,
prov=prov,
content_layer=content_layer,
)
return new_item, current_list
def _merge_elements(self, element, merged_elem, new_item, page_height):
assert isinstance(merged_elem, type(element)), (
"Merged element must be of same type as element."
)
assert merged_elem.label == new_item.label, (
"Labels of merged elements must match."
)
prov = ProvenanceItem(
page_no=merged_elem.page_no + 1,
charspan=(
len(new_item.text) + 1,
len(new_item.text) + 1 + len(merged_elem.text),
),
bbox=merged_elem.cluster.bbox.to_bottom_left_origin(page_height),
)
new_item.text += f" {merged_elem.text}"
new_item.orig += f" {merged_elem.text}" # TODO: This is incomplete, we don't have the `orig` field of the merged element.
new_item.prov.append(prov)
def __call__(self, conv_res: ConversionResult) -> DoclingDocument:
with TimeRecorder(conv_res, "reading_order", scope=ProfilingScope.DOCUMENT):
page_elements = self._assembled_to_readingorder_elements(conv_res)
# Apply reading order
sorted_elements = self.ro_model.predict_reading_order(
page_elements=page_elements
)
el_to_captions_mapping = self.ro_model.predict_to_captions(
sorted_elements=sorted_elements
)
el_to_footnotes_mapping = self.ro_model.predict_to_footnotes(
sorted_elements=sorted_elements
)
el_merges_mapping = self.ro_model.predict_merges(
sorted_elements=sorted_elements
)
docling_doc: DoclingDocument = self._readingorder_elements_to_docling_doc(
conv_res,
sorted_elements,
el_to_captions_mapping,
el_to_footnotes_mapping,
el_merges_mapping,
)
return docling_doc
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/models/auto_ocr_model.py | docling/models/auto_ocr_model.py | import logging
import sys
from collections.abc import Iterable
from pathlib import Path
from typing import Optional, Type
from docling.datamodel.accelerator_options import AcceleratorOptions
from docling.datamodel.base_models import Page
from docling.datamodel.document import ConversionResult
from docling.datamodel.pipeline_options import (
EasyOcrOptions,
OcrAutoOptions,
OcrMacOptions,
OcrOptions,
RapidOcrOptions,
)
from docling.models.base_ocr_model import BaseOcrModel
from docling.models.easyocr_model import EasyOcrModel
from docling.models.ocr_mac_model import OcrMacModel
from docling.models.rapid_ocr_model import RapidOcrModel
_log = logging.getLogger(__name__)
class OcrAutoModel(BaseOcrModel):
def __init__(
self,
enabled: bool,
artifacts_path: Optional[Path],
options: OcrAutoOptions,
accelerator_options: AcceleratorOptions,
):
super().__init__(
enabled=enabled,
artifacts_path=artifacts_path,
options=options,
accelerator_options=accelerator_options,
)
self.options: OcrAutoOptions
self._engine: Optional[BaseOcrModel] = None
if self.enabled:
if "darwin" == sys.platform:
try:
from ocrmac import ocrmac
self._engine = OcrMacModel(
enabled=self.enabled,
artifacts_path=artifacts_path,
options=OcrMacOptions(
bitmap_area_threshold=self.options.bitmap_area_threshold,
force_full_page_ocr=self.options.force_full_page_ocr,
),
accelerator_options=accelerator_options,
)
_log.info("Auto OCR model selected ocrmac.")
except ImportError:
_log.info("ocrmac cannot be used because ocrmac is not installed.")
if self._engine is None:
try:
import onnxruntime
from rapidocr import EngineType, RapidOCR # type: ignore
self._engine = RapidOcrModel(
enabled=self.enabled,
artifacts_path=artifacts_path,
options=RapidOcrOptions(
backend="onnxruntime",
bitmap_area_threshold=self.options.bitmap_area_threshold,
force_full_page_ocr=self.options.force_full_page_ocr,
),
accelerator_options=accelerator_options,
)
_log.info("Auto OCR model selected rapidocr with onnxruntime.")
except ImportError:
_log.info(
"rapidocr cannot be used because onnxruntime is not installed."
)
if self._engine is None:
try:
import easyocr
self._engine = EasyOcrModel(
enabled=self.enabled,
artifacts_path=artifacts_path,
options=EasyOcrOptions(
bitmap_area_threshold=self.options.bitmap_area_threshold,
force_full_page_ocr=self.options.force_full_page_ocr,
),
accelerator_options=accelerator_options,
)
_log.info("Auto OCR model selected easyocr.")
except ImportError:
_log.info("easyocr cannot be used because it is not installed.")
if self._engine is None:
try:
import torch
from rapidocr import EngineType, RapidOCR # type: ignore
self._engine = RapidOcrModel(
enabled=self.enabled,
artifacts_path=artifacts_path,
options=RapidOcrOptions(
backend="torch",
bitmap_area_threshold=self.options.bitmap_area_threshold,
force_full_page_ocr=self.options.force_full_page_ocr,
),
accelerator_options=accelerator_options,
)
_log.info("Auto OCR model selected rapidocr with torch.")
except ImportError:
_log.info(
"rapidocr cannot be used because rapidocr or torch is not installed."
)
if self._engine is None:
_log.warning("No OCR engine found. Please review the install details.")
def __call__(
self, conv_res: ConversionResult, page_batch: Iterable[Page]
) -> Iterable[Page]:
if not self.enabled or self._engine is None:
yield from page_batch
return
yield from self._engine(conv_res, page_batch)
@classmethod
def get_options_type(cls) -> Type[OcrOptions]:
return OcrAutoOptions
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/models/picture_description_base_model.py | docling/models/picture_description_base_model.py | from abc import abstractmethod
from collections.abc import Iterable
from pathlib import Path
from typing import List, Optional, Type, Union
from docling_core.types.doc import (
DoclingDocument,
NodeItem,
PictureItem,
)
from docling_core.types.doc.document import ( # TODO: move import to docling_core.types.doc
PictureDescriptionData,
)
from PIL import Image
from docling.datamodel.accelerator_options import AcceleratorOptions
from docling.datamodel.pipeline_options import (
PictureDescriptionBaseOptions,
)
from docling.models.base_model import (
BaseItemAndImageEnrichmentModel,
BaseModelWithOptions,
ItemAndImageEnrichmentElement,
)
class PictureDescriptionBaseModel(
BaseItemAndImageEnrichmentModel, BaseModelWithOptions
):
images_scale: float = 2.0
def __init__(
self,
*,
enabled: bool,
enable_remote_services: bool,
artifacts_path: Optional[Union[Path, str]],
options: PictureDescriptionBaseOptions,
accelerator_options: AcceleratorOptions,
):
self.enabled = enabled
self.options = options
self.provenance = "not-implemented"
def is_processable(self, doc: DoclingDocument, element: NodeItem) -> bool:
return self.enabled and isinstance(element, PictureItem)
def _annotate_images(self, images: Iterable[Image.Image]) -> Iterable[str]:
raise NotImplementedError
def __call__(
self,
doc: DoclingDocument,
element_batch: Iterable[ItemAndImageEnrichmentElement],
) -> Iterable[NodeItem]:
if not self.enabled:
for element in element_batch:
yield element.item
return
images: List[Image.Image] = []
elements: List[PictureItem] = []
for el in element_batch:
assert isinstance(el.item, PictureItem)
describe_image = True
# Don't describe the image if it's smaller than the threshold
if len(el.item.prov) > 0:
prov = el.item.prov[0] # PictureItems have at most a single provenance
page = doc.pages.get(prov.page_no)
if page is not None:
page_area = page.size.width * page.size.height
if page_area > 0:
area_fraction = prov.bbox.area() / page_area
if area_fraction < self.options.picture_area_threshold:
describe_image = False
if describe_image:
elements.append(el.item)
images.append(el.image)
outputs = self._annotate_images(images)
for item, output in zip(elements, outputs):
item.annotations.append(
PictureDescriptionData(text=output, provenance=self.provenance)
)
yield item
@classmethod
@abstractmethod
def get_options_type(cls) -> Type[PictureDescriptionBaseOptions]:
pass
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/models/page_preprocessing_model.py | docling/models/page_preprocessing_model.py | import re
import warnings
from collections.abc import Iterable
from pathlib import Path
from typing import Literal, Optional
import numpy as np
from PIL import ImageDraw
from pydantic import BaseModel
from docling.datamodel.base_models import Page
from docling.datamodel.document import ConversionResult
from docling.datamodel.settings import settings
from docling.models.base_model import BasePageModel
from docling.utils.profiling import TimeRecorder
class PagePreprocessingOptions(BaseModel):
images_scale: Optional[float]
skip_cell_extraction: bool = (
False # Skip text cell extraction for VLM-only processing
)
class PagePreprocessingModel(BasePageModel):
def __init__(self, options: PagePreprocessingOptions):
self.options = options
# Pre-compiled regex patterns for efficiency
self.GLYPH_RE = re.compile(r"GLYPH<[0-9A-Fa-f]+>")
self.SLASH_G_RE = re.compile(r"(?:/G\d+){2,}")
self.FRAG_RE = re.compile(r"\b[A-Za-z](?:/[a-z]{1,3}\.[a-z]{1,3}){2,}\b")
self.SLASH_NUMBER_GARBAGE_RE = re.compile(
r"(?:/\w+\s*){2,}"
) # Two or more "/token " sequences
def __call__(
self, conv_res: ConversionResult, page_batch: Iterable[Page]
) -> Iterable[Page]:
for page in page_batch:
assert page._backend is not None
if not page._backend.is_valid():
yield page
else:
with TimeRecorder(conv_res, "page_parse"):
page = self._populate_page_images(page)
if not self.options.skip_cell_extraction:
page = self._parse_page_cells(conv_res, page)
yield page
# Generate the page image and store it in the page object
def _populate_page_images(self, page: Page) -> Page:
# default scale
page.get_image(
scale=1.0
) # puts the page image on the image cache at default scale
images_scale = self.options.images_scale
# user requested scales
if images_scale is not None:
page._default_image_scale = images_scale
page.get_image(
scale=images_scale
) # this will trigger storing the image in the internal cache
return page
# Extract and populate the page cells and store it in the page object
def _parse_page_cells(self, conv_res: ConversionResult, page: Page) -> Page:
assert page._backend is not None
page.parsed_page = page._backend.get_segmented_page()
assert page.parsed_page is not None
# Rate the text quality from the PDF parser, and aggregate on page
text_scores = []
for c in page.cells:
score = self.rate_text_quality(c.text)
text_scores.append(score)
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", "Mean of empty slice", RuntimeWarning, "numpy"
)
conv_res.confidence.pages[page.page_no].parse_score = float(
np.nanquantile(
text_scores, q=0.10
) # To emphasise problems in the parse_score, we take the 10% percentile score of all text cells.
)
# DEBUG code:
def draw_text_boxes(image, cells, show: bool = False):
draw = ImageDraw.Draw(image.copy())
for c in cells:
x0, y0, x1, y1 = (
c.to_bounding_box().l,
c.to_bounding_box().t,
c.to_bounding_box().r,
c.to_bounding_box().b,
)
draw.rectangle([(x0, y0), (x1, y1)], outline="red")
if show:
image.show()
else:
out_path: Path = (
Path(settings.debug.debug_output_path)
/ f"debug_{conv_res.input.file.stem}"
)
out_path.mkdir(parents=True, exist_ok=True)
out_file = out_path / f"cells_page_{page.page_no:05}.png"
image.save(str(out_file), format="png")
if settings.debug.visualize_cells:
draw_text_boxes(page.get_image(scale=1.0), page.cells)
return page
def rate_text_quality(self, text: str) -> float:
# Hard errors: if any of these patterns are found, return 0.0 immediately.
blacklist_chars = ["�"]
if (
any(text.find(c) >= 0 for c in blacklist_chars)
or self.GLYPH_RE.search(text)
or self.SLASH_G_RE.search(text)
or self.SLASH_NUMBER_GARBAGE_RE.match(
text
) # Check if text is mostly slash-number pattern
):
return 0.0
penalty = 0.0
# Apply a penalty only if the fragmented words pattern occurs at least three times.
frag_matches = self.FRAG_RE.findall(text)
if len(frag_matches) >= 3:
penalty += 0.1 * len(frag_matches)
# Additional heuristic: if the average token length is below 2, add a penalty.
# tokens = text.split()
# if tokens and (sum(map(len, tokens)) / len(tokens)) < 2:
# penalty += 0.2
return max(1.0 - penalty, 0.0)
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/models/tesseract_ocr_cli_model.py | docling/models/tesseract_ocr_cli_model.py | import csv
import io
import logging
import os
import subprocess
import tempfile
from collections.abc import Iterable
from pathlib import Path
from subprocess import DEVNULL, PIPE, Popen
from typing import List, Optional, Tuple, Type
import pandas as pd
from docling_core.types.doc import BoundingBox, CoordOrigin
from docling_core.types.doc.page import TextCell
from docling.datamodel.accelerator_options import AcceleratorOptions
from docling.datamodel.base_models import Page
from docling.datamodel.document import ConversionResult
from docling.datamodel.pipeline_options import (
OcrOptions,
TesseractCliOcrOptions,
)
from docling.datamodel.settings import settings
from docling.models.base_ocr_model import BaseOcrModel
from docling.utils.ocr_utils import (
map_tesseract_script,
parse_tesseract_orientation,
tesseract_box_to_bounding_rectangle,
)
from docling.utils.profiling import TimeRecorder
_log = logging.getLogger(__name__)
class TesseractOcrCliModel(BaseOcrModel):
def __init__(
self,
enabled: bool,
artifacts_path: Optional[Path],
options: TesseractCliOcrOptions,
accelerator_options: AcceleratorOptions,
):
super().__init__(
enabled=enabled,
artifacts_path=artifacts_path,
options=options,
accelerator_options=accelerator_options,
)
self.options: TesseractCliOcrOptions
self.scale = 3 # multiplier for 72 dpi == 216 dpi.
self._name: Optional[str] = None
self._version: Optional[str] = None
self._tesseract_languages: Optional[List[str]] = None
self._script_prefix: Optional[str] = None
self._is_auto: bool = "auto" in self.options.lang
if self.enabled:
try:
self._get_name_and_version()
self._set_languages_and_prefix()
except Exception as exc:
raise RuntimeError(
f"Tesseract is not available, aborting: {exc} "
"Install tesseract on your system and the tesseract binary is discoverable. "
"The actual command for Tesseract can be specified in `pipeline_options.ocr_options.tesseract_cmd='tesseract'`. "
"Alternatively, Docling has support for other OCR engines. See the documentation."
)
def _get_name_and_version(self) -> Tuple[str, str]:
if self._name is not None and self._version is not None:
return self._name, self._version # type: ignore
cmd = [self.options.tesseract_cmd, "--version"]
proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
stdout, stderr = proc.communicate()
proc.wait()
# HACK: Windows versions of Tesseract output the version to stdout, Linux versions
# to stderr, so check both.
version_line = (
(stdout.decode("utf8").strip() or stderr.decode("utf8").strip())
.split("\n")[0]
.strip()
)
# If everything else fails...
if not version_line:
version_line = "tesseract XXX"
name, version = version_line.split(" ")
self._name = name
self._version = version
return name, version
def _run_tesseract(self, ifilename: str, osd: Optional[pd.DataFrame]):
r"""
Run tesseract CLI
"""
cmd = [self.options.tesseract_cmd]
if self._is_auto and osd is not None:
lang = self._parse_language(osd)
if lang is not None:
cmd.append("-l")
cmd.append(lang)
elif self.options.lang is not None and len(self.options.lang) > 0:
cmd.append("-l")
cmd.append("+".join(self.options.lang))
if self.options.path is not None:
cmd.append("--tessdata-dir")
cmd.append(self.options.path)
# Add PSM option if specified in the configuration
if self.options.psm is not None:
cmd.extend(["--psm", str(self.options.psm)])
cmd += [ifilename, "stdout", "tsv"]
_log.info("command: {}".format(" ".join(cmd)))
output = subprocess.run(cmd, stdout=PIPE, stderr=DEVNULL, check=True)
# _log.info(output)
# Decode the byte string to a regular string
decoded_data = output.stdout.decode("utf-8")
# _log.info(decoded_data)
# Read the TSV file generated by Tesseract
df_result = pd.read_csv(
io.StringIO(decoded_data), quoting=csv.QUOTE_NONE, sep="\t"
)
# Display the dataframe (optional)
# _log.info("df: ", df.head())
# Filter rows that contain actual text (ignore header or empty rows)
df_filtered = df_result[
df_result["text"].notna() & (df_result["text"].apply(str).str.strip() != "")
]
return df_filtered
def _perform_osd(self, ifilename: str) -> pd.DataFrame:
r"""
Run tesseract in PSM 0 mode to detect the language
"""
cmd = [self.options.tesseract_cmd]
cmd.extend(["--psm", "0", "-l", "osd", ifilename, "stdout"])
_log.info("command: {}".format(" ".join(cmd)))
output = subprocess.run(cmd, capture_output=True, check=True)
decoded_data = output.stdout.decode("utf-8")
df_detected = pd.read_csv(
io.StringIO(decoded_data), sep=":", header=None, names=["key", "value"]
)
return df_detected
def _parse_language(self, df_osd: pd.DataFrame) -> Optional[str]:
assert self._tesseract_languages is not None
scripts = df_osd.loc[df_osd["key"] == "Script"].value.tolist()
if len(scripts) == 0:
_log.warning("Tesseract cannot detect the script of the page")
return None
script = map_tesseract_script(scripts[0].strip())
lang = f"{self._script_prefix}{script}"
# Check if the detected language has been installed
if lang not in self._tesseract_languages:
msg = f"Tesseract detected the script '{script}' and language '{lang}'."
msg += " However this language is not installed in your system and will be ignored."
_log.warning(msg)
return None
_log.debug(
f"Using tesseract model for the detected script '{script}' and language '{lang}'"
)
return lang
def _set_languages_and_prefix(self):
r"""
Read and set the languages installed in tesseract and decide the script prefix
"""
# Get all languages
cmd = [self.options.tesseract_cmd]
cmd.append("--list-langs")
_log.info("command: {}".format(" ".join(cmd)))
output = subprocess.run(cmd, stdout=PIPE, stderr=DEVNULL, check=True)
decoded_data = output.stdout.decode("utf-8")
df_list = pd.read_csv(io.StringIO(decoded_data), header=None)
self._tesseract_languages = df_list[0].tolist()[1:]
# Decide the script prefix
if any(lang.startswith("script/") for lang in self._tesseract_languages):
script_prefix = "script/"
else:
script_prefix = ""
self._script_prefix = script_prefix
def __call__(
self, conv_res: ConversionResult, page_batch: Iterable[Page]
) -> Iterable[Page]:
if not self.enabled:
yield from page_batch
return
for page_i, page in enumerate(page_batch):
assert page._backend is not None
if not page._backend.is_valid():
yield page
else:
with TimeRecorder(conv_res, "ocr"):
ocr_rects = self.get_ocr_rects(page)
all_ocr_cells = []
for ocr_rect_i, ocr_rect in enumerate(ocr_rects):
# Skip zero area boxes
if ocr_rect.area() == 0:
continue
high_res_image = page._backend.get_page_image(
scale=self.scale, cropbox=ocr_rect
)
try:
with tempfile.NamedTemporaryFile(
suffix=".png", mode="w+b", delete=False
) as image_file:
fname = image_file.name
high_res_image.save(image_file)
doc_orientation = 0
df_osd: Optional[pd.DataFrame] = None
try:
df_osd = self._perform_osd(fname)
doc_orientation = _parse_orientation(df_osd)
except subprocess.CalledProcessError as exc:
_log.error(
"OSD failed (doc %s, page: %s, "
"OCR rectangle: %s, processed image file %s):\n %s",
conv_res.input.file,
page_i,
ocr_rect_i,
image_file,
exc.stderr,
)
# Skipping if OSD fail when in auto mode, otherwise proceed
# to OCR in the hope OCR will succeed while OSD failed
if self._is_auto:
continue
if doc_orientation != 0:
high_res_image = high_res_image.rotate(
-doc_orientation, expand=True
)
high_res_image.save(fname)
try:
df_result = self._run_tesseract(fname, df_osd)
except subprocess.CalledProcessError as exc:
_log.error(
"tesseract OCR failed (doc %s, page: %s, "
"OCR rectangle: %s, processed image file %s):\n %s",
conv_res.input.file,
page_i,
ocr_rect_i,
image_file,
exc.stderr,
)
continue
finally:
if os.path.exists(fname):
os.remove(fname)
# _log.info(df_result)
# Print relevant columns (bounding box and text)
for ix, row in df_result.iterrows():
text = row["text"]
conf = row["conf"]
left, top = float(row["left"]), float(row["top"])
right = left + float(row["width"])
bottom = top + row["height"]
bbox = BoundingBox(
l=left,
t=top,
r=right,
b=bottom,
coord_origin=CoordOrigin.TOPLEFT,
)
rect = tesseract_box_to_bounding_rectangle(
bbox,
original_offset=ocr_rect,
scale=self.scale,
orientation=doc_orientation,
im_size=high_res_image.size,
)
cell = TextCell(
index=ix,
text=str(text),
orig=str(text),
from_ocr=True,
confidence=conf / 100.0,
rect=rect,
)
all_ocr_cells.append(cell)
# Post-process the cells
self.post_process_cells(all_ocr_cells, page)
# DEBUG code:
if settings.debug.visualize_ocr:
self.draw_ocr_rects_and_cells(conv_res, page, ocr_rects)
yield page
@classmethod
def get_options_type(cls) -> Type[OcrOptions]:
return TesseractCliOcrOptions
def _parse_orientation(df_osd: pd.DataFrame) -> int:
# For strictly optimal performance with invariant dataframe format:
mask = df_osd["key"].to_numpy() == "Orientation in degrees"
orientation_val = df_osd["value"].to_numpy()[mask][0]
orientation = parse_tesseract_orientation(orientation_val.strip())
return orientation
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/models/ocr_mac_model.py | docling/models/ocr_mac_model.py | import logging
import sys
import tempfile
from collections.abc import Iterable
from pathlib import Path
from typing import Optional, Type
from docling_core.types.doc import BoundingBox, CoordOrigin
from docling_core.types.doc.page import BoundingRectangle, TextCell
from docling.datamodel.accelerator_options import AcceleratorOptions
from docling.datamodel.base_models import Page
from docling.datamodel.document import ConversionResult
from docling.datamodel.pipeline_options import (
OcrMacOptions,
OcrOptions,
)
from docling.datamodel.settings import settings
from docling.models.base_ocr_model import BaseOcrModel
from docling.utils.profiling import TimeRecorder
_log = logging.getLogger(__name__)
class OcrMacModel(BaseOcrModel):
def __init__(
self,
enabled: bool,
artifacts_path: Optional[Path],
options: OcrMacOptions,
accelerator_options: AcceleratorOptions,
):
super().__init__(
enabled=enabled,
artifacts_path=artifacts_path,
options=options,
accelerator_options=accelerator_options,
)
self.options: OcrMacOptions
self.scale = 3 # multiplier for 72 dpi == 216 dpi.
if self.enabled:
if "darwin" != sys.platform:
raise RuntimeError("OcrMac is only supported on Mac.")
install_errmsg = (
"ocrmac is not correctly installed. "
"Please install it via `pip install ocrmac` to use this OCR engine. "
"Alternatively, Docling has support for other OCR engines. See the documentation: "
"https://docling-project.github.io/docling/installation/"
)
try:
from ocrmac import ocrmac
except ImportError:
raise ImportError(install_errmsg)
self.reader_RIL = ocrmac.OCR
def __call__(
self, conv_res: ConversionResult, page_batch: Iterable[Page]
) -> Iterable[Page]:
if not self.enabled:
yield from page_batch
return
for page in page_batch:
assert page._backend is not None
if not page._backend.is_valid():
yield page
else:
with TimeRecorder(conv_res, "ocr"):
ocr_rects = self.get_ocr_rects(page)
all_ocr_cells = []
for ocr_rect in ocr_rects:
# Skip zero area boxes
if ocr_rect.area() == 0:
continue
high_res_image = page._backend.get_page_image(
scale=self.scale, cropbox=ocr_rect
)
with tempfile.NamedTemporaryFile(
suffix=".png", mode="w"
) as image_file:
fname = image_file.name
high_res_image.save(fname)
boxes = self.reader_RIL(
fname,
recognition_level=self.options.recognition,
framework=self.options.framework,
language_preference=self.options.lang,
).recognize()
im_width, im_height = high_res_image.size
cells = []
for ix, (text, confidence, box) in enumerate(boxes):
x = float(box[0])
y = float(box[1])
w = float(box[2])
h = float(box[3])
x1 = x * im_width
y2 = (1 - y) * im_height
x2 = x1 + w * im_width
y1 = y2 - h * im_height
left = x1 / self.scale
top = y1 / self.scale
right = x2 / self.scale
bottom = y2 / self.scale
cells.append(
TextCell(
index=ix,
text=text,
orig=text,
from_ocr=True,
confidence=confidence,
rect=BoundingRectangle.from_bounding_box(
BoundingBox.from_tuple(
coord=(left, top, right, bottom),
origin=CoordOrigin.TOPLEFT,
)
),
)
)
# del high_res_image
all_ocr_cells.extend(cells)
# Post-process the cells
self.post_process_cells(all_ocr_cells, page)
# DEBUG code:
if settings.debug.visualize_ocr:
self.draw_ocr_rects_and_cells(conv_res, page, ocr_rects)
yield page
@classmethod
def get_options_type(cls) -> Type[OcrOptions]:
return OcrMacOptions
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/models/factories/ocr_factory.py | docling/models/factories/ocr_factory.py | import logging
from docling.models.base_ocr_model import BaseOcrModel
from docling.models.factories.base_factory import BaseFactory
logger = logging.getLogger(__name__)
class OcrFactory(BaseFactory[BaseOcrModel]):
def __init__(self, *args, **kwargs):
super().__init__("ocr_engines", *args, **kwargs)
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/models/factories/picture_description_factory.py | docling/models/factories/picture_description_factory.py | import logging
from docling.models.factories.base_factory import BaseFactory
from docling.models.picture_description_base_model import PictureDescriptionBaseModel
logger = logging.getLogger(__name__)
class PictureDescriptionFactory(BaseFactory[PictureDescriptionBaseModel]):
def __init__(self, *args, **kwargs):
super().__init__("picture_description", *args, **kwargs)
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/models/factories/table_factory.py | docling/models/factories/table_factory.py | from docling.models.base_table_model import BaseTableStructureModel
from docling.models.factories.base_factory import BaseFactory
class TableStructureFactory(BaseFactory[BaseTableStructureModel]):
def __init__(self, *args, **kwargs):
super().__init__("table_structure_engines", *args, **kwargs)
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/models/factories/__init__.py | docling/models/factories/__init__.py | import logging
from functools import lru_cache
from docling.models.factories.layout_factory import LayoutFactory
from docling.models.factories.ocr_factory import OcrFactory
from docling.models.factories.picture_description_factory import (
PictureDescriptionFactory,
)
from docling.models.factories.table_factory import TableStructureFactory
logger = logging.getLogger(__name__)
@lru_cache
def get_ocr_factory(allow_external_plugins: bool = False) -> OcrFactory:
factory = OcrFactory()
factory.load_from_plugins(allow_external_plugins=allow_external_plugins)
logger.info("Registered ocr engines: %r", factory.registered_kind)
return factory
@lru_cache
def get_picture_description_factory(
allow_external_plugins: bool = False,
) -> PictureDescriptionFactory:
factory = PictureDescriptionFactory()
factory.load_from_plugins(allow_external_plugins=allow_external_plugins)
logger.info("Registered picture descriptions: %r", factory.registered_kind)
return factory
@lru_cache
def get_layout_factory(allow_external_plugins: bool = False) -> LayoutFactory:
factory = LayoutFactory()
factory.load_from_plugins(allow_external_plugins=allow_external_plugins)
logger.info("Registered layout engines: %r", factory.registered_kind)
return factory
@lru_cache
def get_table_structure_factory(
allow_external_plugins: bool = False,
) -> TableStructureFactory:
factory = TableStructureFactory()
factory.load_from_plugins(allow_external_plugins=allow_external_plugins)
logger.info("Registered table structure engines: %r", factory.registered_kind)
return factory
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/models/factories/layout_factory.py | docling/models/factories/layout_factory.py | from docling.models.base_layout_model import BaseLayoutModel
from docling.models.factories.base_factory import BaseFactory
class LayoutFactory(BaseFactory[BaseLayoutModel]):
def __init__(self, *args, **kwargs):
super().__init__("layout_engines", *args, **kwargs)
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/models/factories/base_factory.py | docling/models/factories/base_factory.py | import enum
import logging
from abc import ABCMeta
from typing import Generic, Optional, Type, TypeVar
from pluggy import PluginManager
from pydantic import BaseModel
from docling.datamodel.pipeline_options import BaseOptions
from docling.models.base_model import BaseModelWithOptions
A = TypeVar("A", bound=BaseModelWithOptions)
logger = logging.getLogger(__name__)
class FactoryMeta(BaseModel):
kind: str
plugin_name: str
module: str
class BaseFactory(Generic[A], metaclass=ABCMeta):
default_plugin_name = "docling"
def __init__(self, plugin_attr_name: str, plugin_name=default_plugin_name):
self.plugin_name = plugin_name
self.plugin_attr_name = plugin_attr_name
self._classes: dict[Type[BaseOptions], Type[A]] = {}
self._meta: dict[Type[BaseOptions], FactoryMeta] = {}
@property
def registered_kind(self) -> list[str]:
return [opt.kind for opt in self._classes.keys()]
def get_enum(self) -> enum.Enum:
return enum.Enum(
self.plugin_attr_name + "_enum",
names={kind: kind for kind in self.registered_kind},
type=str,
module=__name__,
)
@property
def classes(self):
return self._classes
@property
def registered_meta(self):
return self._meta
def create_instance(self, options: BaseOptions, **kwargs) -> A:
try:
_cls = self._classes[type(options)]
return _cls(options=options, **kwargs)
except KeyError:
raise RuntimeError(self._err_msg_on_class_not_found(options.kind))
def create_options(self, kind: str, *args, **kwargs) -> BaseOptions:
for opt_cls, _ in self._classes.items():
if opt_cls.kind == kind:
return opt_cls(*args, **kwargs)
raise RuntimeError(self._err_msg_on_class_not_found(kind))
def _err_msg_on_class_not_found(self, kind: str):
msg = []
for opt, cls in self._classes.items():
msg.append(f"\t{opt.kind!r} => {cls!r}")
msg_str = "\n".join(msg)
return f"No class found with the name {kind!r}, known classes are:\n{msg_str}"
def register(self, cls: Type[A], plugin_name: str, plugin_module_name: str):
opt_type = cls.get_options_type()
if opt_type in self._classes:
raise ValueError(
f"{opt_type.kind!r} already registered to class {self._classes[opt_type]!r}"
)
self._classes[opt_type] = cls
self._meta[opt_type] = FactoryMeta(
kind=opt_type.kind, plugin_name=plugin_name, module=plugin_module_name
)
def load_from_plugins(
self, plugin_name: Optional[str] = None, allow_external_plugins: bool = False
):
plugin_name = plugin_name or self.plugin_name
plugin_manager = PluginManager(plugin_name)
plugin_manager.load_setuptools_entrypoints(plugin_name)
for plugin_name, plugin_module in plugin_manager.list_name_plugin():
plugin_module_name = str(plugin_module.__name__) # type: ignore
if not allow_external_plugins and not plugin_module_name.startswith(
"docling."
):
logger.warning(
f"The plugin {plugin_name} will not be loaded because Docling is being executed with allow_external_plugins=false."
)
continue
attr = getattr(plugin_module, self.plugin_attr_name, None)
if callable(attr):
logger.info("Loading plugin %r", plugin_name)
config = attr()
self.process_plugin(config, plugin_name, plugin_module_name)
def process_plugin(self, config, plugin_name: str, plugin_module_name: str):
for item in config[self.plugin_attr_name]:
try:
self.register(item, plugin_name, plugin_module_name)
except ValueError:
logger.warning("%r already registered", item)
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/models/utils/__init__.py | docling/models/utils/__init__.py | python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false | |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/models/utils/generation_utils.py | docling/models/utils/generation_utils.py | import logging
import re
import sys
from abc import abstractmethod
from typing import List
from transformers import StoppingCriteria
_log = logging.getLogger(__name__)
class GenerationStopper:
"""
Base interface for stopping logic.
- should_stop(s): True to stop given the current decoded text window.
- lookback_tokens(): how many tokens should be considered (default: sys.maxsize).
"""
@abstractmethod
def should_stop(self, s: str) -> bool:
pass
def lookback_tokens(self) -> int:
return sys.maxsize
class DocTagsRepetitionStopper(GenerationStopper):
"""
Detects repetitive <tag>...<loc_x><loc_y><loc_w><loc_h>text</tag> blocks,
but only when repeats are **consecutive** and both tag & inner text are identical.
Performance:
- Heavy check runs every N calls (default 32).
- Only decodes the last LOOKBACK_TOKENS tokens per sequence (default 200).
"""
def __init__(self, *, N: int = 32, lookback_tokens: int = 200):
self.N = max(1, int(N))
self._lookback_tokens = max(1, int(lookback_tokens))
self._call_count = 0
# <tag> ... <loc_x><loc_y><loc_w><loc_h> text ... </tag>
self._PATTERN = re.compile(
r"""
<(?P<tag>[a-zA-Z0-9_]+)>\s*
(?P<prefix>.*?)?
<loc_(?P<x>\d+)><loc_(?P<y>\d+)><loc_(?P<w>\d+)><loc_(?P<h>\d+)>
(?P<text>.*?)
</(?P=tag)>
""",
re.DOTALL | re.VERBOSE,
)
# --- small helper ---
def _regular(self, vals: List[int]) -> bool:
"""3+ strictly increasing values with ~regular spacing (±20%)."""
if len(vals) < 3:
return False
diffs = [b - a for a, b in zip(vals, vals[1:])]
if any(d <= 0 for d in diffs):
return False
mean = sum(diffs) / len(diffs)
tol = 0.2 * mean
return all(abs(d - mean) <= tol for d in diffs)
def should_stop(self, s: str) -> bool:
"""
Trip only on **consecutive** runs (no other matched blocks between) of ≥3 items
with the same <tag> and identical inner text, where within that run we see:
- any exact duplicate (x,y,w,h), or
- stable X/W with regular Y progression, or
- stable Y/H with regular X progression.
"""
# Stream matches and evaluate runs on-the-fly to stay compact and fast.
prev_tag = prev_text = None
run = [] # list of (x,y,w,h)
def run_repetitive(boxes: List[tuple]) -> bool:
if len(boxes) < 3:
return False
# duplicates?
if len(set(boxes)) < len(boxes):
return True
xs, ys, ws, hs = zip(*boxes)
x_stable = all(x == xs[0] for x in xs)
y_stable = all(y == ys[0] for y in ys)
w_stable = all(w == ws[0] for w in ws)
h_stable = all(h == hs[0] for h in hs)
# horizontal (down the page): X/W stable, Y regular
if (x_stable or w_stable) and self._regular(list(ys)):
return True
# vertical (across): Y/H stable, X regular
if (y_stable or h_stable) and self._regular(list(xs)):
return True
return False
for m in self._PATTERN.finditer(s):
tag, text = m.group("tag"), m.group("text")
box = (
int(m.group("x")),
int(m.group("y")),
int(m.group("w")),
int(m.group("h")),
)
if prev_tag == tag and prev_text == text:
run.append(box) # consecutive same-tag+text
else:
# evaluate previous run before starting a new one
if run_repetitive(run):
return True
prev_tag, prev_text = tag, text
run = [box]
# check the last run
return run_repetitive(run)
class HFStoppingCriteriaWrapper(StoppingCriteria):
"""
Adapts any GenerationStopper to HuggingFace Transformers.
Decodes exactly min(seq_len, stopper.lookback_tokens()) tokens from the end.
"""
def __init__(
self,
tokenizer,
stopper: GenerationStopper,
*,
skip_special_tokens: bool = False,
):
self.tokenizer = tokenizer
self.stopper = stopper
self.skip_special_tokens = skip_special_tokens
def __call__(self, input_ids, scores, **kwargs) -> bool:
lb = max(1, int(self.stopper.lookback_tokens()))
for seq in input_ids: # (batch, seq_len)
window = seq[-lb:] # slicing handles lb > len(seq)
try:
text = self.tokenizer.decode(
window, skip_special_tokens=self.skip_special_tokens
)
except Exception as e:
_log.info(f"Decoding failed for stopping check: {e}")
continue
try:
if self.stopper.should_stop(text):
_log.info(
"HF wrapper: stopping due to TextStopper.should_stop==True"
)
return True
except Exception as e:
_log.info(f"Error in TextStopper.should_stop: {e}")
continue
return False
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/models/utils/hf_model_download.py | docling/models/utils/hf_model_download.py | import logging
from pathlib import Path
from typing import Optional
_log = logging.getLogger(__name__)
def download_hf_model(
repo_id: str,
local_dir: Optional[Path] = None,
force: bool = False,
progress: bool = False,
revision: Optional[str] = None,
) -> Path:
from huggingface_hub import snapshot_download
from huggingface_hub.utils import disable_progress_bars
if not progress:
disable_progress_bars()
download_path = snapshot_download(
repo_id=repo_id,
force_download=force,
local_dir=local_dir,
revision=revision,
)
return Path(download_path)
class HuggingFaceModelDownloadMixin:
@staticmethod
def download_models(
repo_id: str,
local_dir: Optional[Path] = None,
force: bool = False,
progress: bool = False,
revision: Optional[str] = None,
) -> Path:
return download_hf_model(
repo_id=repo_id,
local_dir=local_dir,
force=force,
progress=progress,
revision=revision,
)
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/models/plugins/defaults.py | docling/models/plugins/defaults.py | def ocr_engines():
from docling.models.auto_ocr_model import OcrAutoModel
from docling.models.easyocr_model import EasyOcrModel
from docling.models.ocr_mac_model import OcrMacModel
from docling.models.rapid_ocr_model import RapidOcrModel
from docling.models.tesseract_ocr_cli_model import TesseractOcrCliModel
from docling.models.tesseract_ocr_model import TesseractOcrModel
return {
"ocr_engines": [
OcrAutoModel,
EasyOcrModel,
OcrMacModel,
RapidOcrModel,
TesseractOcrModel,
TesseractOcrCliModel,
]
}
def picture_description():
from docling.models.picture_description_api_model import PictureDescriptionApiModel
from docling.models.picture_description_vlm_model import PictureDescriptionVlmModel
return {
"picture_description": [
PictureDescriptionVlmModel,
PictureDescriptionApiModel,
]
}
def layout_engines():
from docling.experimental.models.table_crops_layout_model import (
TableCropsLayoutModel,
)
from docling.models.layout_model import LayoutModel
return {
"layout_engines": [
LayoutModel,
TableCropsLayoutModel,
]
}
def table_structure_engines():
from docling.models.table_structure_model import TableStructureModel
return {
"table_structure_engines": [
TableStructureModel,
]
}
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/models/plugins/__init__.py | docling/models/plugins/__init__.py | python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false | |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/models/vlm_models_inline/nuextract_transformers_model.py | docling/models/vlm_models_inline/nuextract_transformers_model.py | import logging
import sys
import time
from collections.abc import Iterable
from pathlib import Path
from typing import Any, Optional, Union
import numpy as np
from PIL.Image import Image
from transformers import AutoModelForImageTextToText, AutoProcessor, GenerationConfig
from docling.datamodel.accelerator_options import (
AcceleratorOptions,
)
from docling.datamodel.base_models import VlmPrediction, VlmStopReason
from docling.datamodel.pipeline_options_vlm_model import InlineVlmOptions
from docling.models.base_model import BaseVlmModel
from docling.models.utils.hf_model_download import (
HuggingFaceModelDownloadMixin,
)
from docling.utils.accelerator_utils import decide_device
_log = logging.getLogger(__name__)
# Source code from https://huggingface.co/numind/NuExtract-2.0-8B
def process_all_vision_info(messages, examples=None):
"""
Process vision information from both messages and in-context examples, supporting batch processing.
Args:
messages: List of message dictionaries (single input) OR list of message lists (batch input)
examples: Optional list of example dictionaries (single input) OR list of example lists (batch)
Returns:
A flat list of all images in the correct order:
- For single input: example images followed by message images
- For batch input: interleaved as (item1 examples, item1 input, item2 examples, item2 input, etc.)
- Returns None if no images were found
"""
try:
from qwen_vl_utils import fetch_image, process_vision_info
except ImportError:
raise ImportError(
"qwen-vl-utils is required for NuExtractTransformersModel. "
"Please install it with: pip install qwen-vl-utils"
)
from qwen_vl_utils import fetch_image, process_vision_info
# Helper function to extract images from examples
def extract_example_images(example_item):
if not example_item:
return []
# Handle both list of examples and single example
examples_to_process = (
example_item if isinstance(example_item, list) else [example_item]
)
images = []
for example in examples_to_process:
if (
isinstance(example.get("input"), dict)
and example["input"].get("type") == "image"
):
images.append(fetch_image(example["input"]))
return images
# Normalize inputs to always be batched format
is_batch = messages and isinstance(messages[0], list)
messages_batch = messages if is_batch else [messages]
is_batch_examples = (
examples
and isinstance(examples, list)
and (isinstance(examples[0], list) or examples[0] is None)
)
examples_batch = (
examples
if is_batch_examples
else ([examples] if examples is not None else None)
)
# Ensure examples batch matches messages batch if provided
if examples and len(examples_batch) != len(messages_batch):
if not is_batch and len(examples_batch) == 1:
# Single example set for a single input is fine
pass
else:
raise ValueError("Examples batch length must match messages batch length")
# Process all inputs, maintaining correct order
all_images = []
for i, message_group in enumerate(messages_batch):
# Get example images for this input
if examples and i < len(examples_batch):
input_example_images = extract_example_images(examples_batch[i])
all_images.extend(input_example_images)
# Get message images for this input
input_message_images = process_vision_info(message_group)[0] or []
all_images.extend(input_message_images)
return all_images if all_images else None
class NuExtractTransformersModel(BaseVlmModel, HuggingFaceModelDownloadMixin):
def __init__(
self,
enabled: bool,
artifacts_path: Optional[Path],
accelerator_options: AcceleratorOptions,
vlm_options: InlineVlmOptions,
):
self.enabled = enabled
self.vlm_options = vlm_options
if self.enabled:
import torch
self.device = decide_device(
accelerator_options.device,
supported_devices=vlm_options.supported_devices,
)
_log.debug(f"Available device for NuExtract VLM: {self.device}")
self.max_new_tokens = vlm_options.max_new_tokens
self.temperature = vlm_options.temperature
repo_cache_folder = vlm_options.repo_id.replace("/", "--")
if artifacts_path is None:
artifacts_path = self.download_models(
repo_id=self.vlm_options.repo_id,
revision=self.vlm_options.revision,
)
elif (artifacts_path / repo_cache_folder).exists():
artifacts_path = artifacts_path / repo_cache_folder
self.processor = AutoProcessor.from_pretrained(
artifacts_path,
trust_remote_code=vlm_options.trust_remote_code,
use_fast=True,
)
self.processor.tokenizer.padding_side = "left"
self.vlm_model = AutoModelForImageTextToText.from_pretrained(
artifacts_path,
device_map=self.device,
dtype=self.vlm_options.torch_dtype,
_attn_implementation=(
"flash_attention_2"
if self.device.startswith("cuda")
and accelerator_options.cuda_use_flash_attention2
else "sdpa"
),
trust_remote_code=vlm_options.trust_remote_code,
)
if sys.version_info < (3, 14):
self.vlm_model = torch.compile(self.vlm_model) # type: ignore
else:
self.vlm_model.eval()
# Load generation config
self.generation_config = GenerationConfig.from_pretrained(artifacts_path)
def process_images(
self,
image_batch: Iterable[Union[Image, np.ndarray]],
prompt: Union[str, list[str]],
) -> Iterable[VlmPrediction]:
"""
Batched inference for NuExtract VLM using the specialized input format.
Args:
image_batch: Iterable of PIL Images or numpy arrays
prompt: Either:
- str: Single template used for all images
- list[str]: List of templates (one per image, must match image count)
"""
import torch
from PIL import Image as PILImage
# Normalize images to RGB PIL
pil_images: list[Image] = []
for img in image_batch:
if isinstance(img, np.ndarray):
if img.ndim == 3 and img.shape[2] in (3, 4):
pil_img = PILImage.fromarray(img.astype(np.uint8))
elif img.ndim == 2:
pil_img = PILImage.fromarray(img.astype(np.uint8), mode="L")
else:
raise ValueError(f"Unsupported numpy array shape: {img.shape}")
else:
pil_img = img
if pil_img.mode != "RGB":
pil_img = pil_img.convert("RGB")
pil_images.append(pil_img)
if not pil_images:
return
# Normalize templates (1 per image)
if isinstance(prompt, str):
templates = [prompt] * len(pil_images)
else:
if len(prompt) != len(pil_images):
raise ValueError(
f"Number of templates ({len(prompt)}) must match number of images ({len(pil_images)})"
)
templates = prompt
# Construct NuExtract input format
inputs = []
for pil_img, template in zip(pil_images, templates):
input_item = {
"document": {"type": "image", "image": pil_img},
"template": template,
}
inputs.append(input_item)
# Create messages structure for batch processing
messages = [
[
{
"role": "user",
"content": [x["document"]],
}
]
for x in inputs
]
# Apply chat template to each example individually
texts = [
self.processor.tokenizer.apply_chat_template(
messages[i],
template=x["template"],
tokenize=False,
add_generation_prompt=True,
)
for i, x in enumerate(inputs)
]
# Process vision inputs using qwen-vl-utils
image_inputs = process_all_vision_info(messages)
# Process with the processor
processor_inputs = self.processor(
text=texts,
images=image_inputs,
padding=True,
return_tensors="pt",
**self.vlm_options.extra_processor_kwargs,
)
processor_inputs = {k: v.to(self.device) for k, v in processor_inputs.items()}
# Generate
gen_kwargs = {
**processor_inputs,
"max_new_tokens": self.max_new_tokens,
"generation_config": self.generation_config,
**self.vlm_options.extra_generation_config,
}
if self.temperature > 0:
gen_kwargs["do_sample"] = True
gen_kwargs["temperature"] = self.temperature
else:
gen_kwargs["do_sample"] = False
start_time = time.time()
with torch.inference_mode():
generated_ids = self.vlm_model.generate(**gen_kwargs)
generation_time = time.time() - start_time
# Trim generated sequences
input_len = processor_inputs["input_ids"].shape[1]
trimmed_sequences = generated_ids[:, input_len:]
# Decode with the processor/tokenizer
decoded_texts: list[str] = self.processor.batch_decode(
trimmed_sequences,
skip_special_tokens=True,
clean_up_tokenization_spaces=False,
)
# Optional logging
num_tokens = None
if generated_ids.shape[0] > 0: # type: ignore
# Todo: confirm num tokens is actually from first item, code was already like this
num_tokens = int(generated_ids[0].shape[0])
_log.debug(
f"Generated {num_tokens} tokens in {generation_time:.2f}s "
f"for batch size {generated_ids.shape[0]}." # type: ignore
)
for text in decoded_texts:
# Apply decode_response to the output text
decoded_text = self.vlm_options.decode_response(text)
yield VlmPrediction(
text=decoded_text,
generation_time=generation_time,
num_tokens=num_tokens,
stop_reason=VlmStopReason.UNSPECIFIED,
)
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/models/vlm_models_inline/hf_transformers_model.py | docling/models/vlm_models_inline/hf_transformers_model.py | import importlib.metadata
import logging
import sys
import time
from collections.abc import Iterable
from pathlib import Path
from typing import Any, Optional, Union
import numpy as np
from PIL.Image import Image
from transformers import StoppingCriteria, StoppingCriteriaList, StopStringCriteria
from docling.datamodel.accelerator_options import (
AcceleratorOptions,
)
from docling.datamodel.base_models import Page, VlmPrediction, VlmStopReason
from docling.datamodel.document import ConversionResult
from docling.datamodel.pipeline_options_vlm_model import (
InlineVlmOptions,
TransformersModelType,
TransformersPromptStyle,
)
from docling.models.base_model import BaseVlmPageModel
from docling.models.utils.generation_utils import (
GenerationStopper,
HFStoppingCriteriaWrapper,
)
from docling.models.utils.hf_model_download import (
HuggingFaceModelDownloadMixin,
)
from docling.utils.accelerator_utils import decide_device
from docling.utils.profiling import TimeRecorder
_log = logging.getLogger(__name__)
class HuggingFaceTransformersVlmModel(BaseVlmPageModel, HuggingFaceModelDownloadMixin):
def __init__(
self,
enabled: bool,
artifacts_path: Optional[Path],
accelerator_options: AcceleratorOptions,
vlm_options: InlineVlmOptions,
):
self.enabled = enabled
self.vlm_options = vlm_options
if self.enabled:
import torch
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForImageTextToText,
AutoModelForVision2Seq,
AutoProcessor,
BitsAndBytesConfig,
GenerationConfig,
)
transformers_version = importlib.metadata.version("transformers")
if (
self.vlm_options.repo_id == "microsoft/Phi-4-multimodal-instruct"
and transformers_version >= "4.52.0"
):
raise NotImplementedError(
f"Phi 4 only works with transformers<4.52.0 but you have {transformers_version=}. Please downgrage running pip install -U 'transformers<4.52.0'."
)
self.device = decide_device(
accelerator_options.device,
supported_devices=vlm_options.supported_devices,
)
_log.debug(f"Available device for VLM: {self.device}")
self.use_cache = vlm_options.use_kv_cache
self.max_new_tokens = vlm_options.max_new_tokens
self.temperature = vlm_options.temperature
repo_cache_folder = vlm_options.repo_id.replace("/", "--")
if artifacts_path is None:
artifacts_path = self.download_models(
self.vlm_options.repo_id, revision=self.vlm_options.revision
)
elif (artifacts_path / repo_cache_folder).exists():
artifacts_path = artifacts_path / repo_cache_folder
self.param_quantization_config: Optional[BitsAndBytesConfig] = None
if vlm_options.quantized:
self.param_quantization_config = BitsAndBytesConfig(
load_in_8bit=vlm_options.load_in_8bit,
llm_int8_threshold=vlm_options.llm_int8_threshold,
)
model_cls: Any = AutoModel
if (
self.vlm_options.transformers_model_type
== TransformersModelType.AUTOMODEL_CAUSALLM
):
model_cls = AutoModelForCausalLM
elif (
self.vlm_options.transformers_model_type
== TransformersModelType.AUTOMODEL_VISION2SEQ
):
model_cls = AutoModelForVision2Seq
elif (
self.vlm_options.transformers_model_type
== TransformersModelType.AUTOMODEL_IMAGETEXTTOTEXT
):
model_cls = AutoModelForImageTextToText
self.processor = AutoProcessor.from_pretrained(
artifacts_path,
trust_remote_code=vlm_options.trust_remote_code,
revision=vlm_options.revision,
)
self.processor.tokenizer.padding_side = "left"
self.vlm_model = model_cls.from_pretrained(
artifacts_path,
device_map=self.device,
dtype=self.vlm_options.torch_dtype,
_attn_implementation=(
"flash_attention_2"
if self.device.startswith("cuda")
and accelerator_options.cuda_use_flash_attention2
else "sdpa"
),
trust_remote_code=vlm_options.trust_remote_code,
revision=vlm_options.revision,
)
if sys.version_info < (3, 14):
self.vlm_model = torch.compile(self.vlm_model) # type: ignore
else:
self.vlm_model.eval()
# Load generation config
self.generation_config = GenerationConfig.from_pretrained(
artifacts_path, revision=vlm_options.revision
)
def __call__(
self, conv_res: ConversionResult, page_batch: Iterable[Page]
) -> Iterable[Page]:
page_list = list(page_batch)
if not page_list:
return
valid_pages = []
invalid_pages = []
for page in page_list:
assert page._backend is not None
if not page._backend.is_valid():
invalid_pages.append(page)
else:
valid_pages.append(page)
# Process valid pages in batch
if valid_pages:
with TimeRecorder(conv_res, "vlm"):
# Prepare images and prompts for batch processing
images = []
user_prompts = []
pages_with_images = []
for page in valid_pages:
assert page.size is not None
hi_res_image = page.get_image(
scale=self.vlm_options.scale, max_size=self.vlm_options.max_size
)
# Only process pages with valid images
if hi_res_image is not None:
images.append(hi_res_image)
# Define prompt structure
user_prompt = self._build_prompt_safe(page)
user_prompts.append(user_prompt)
pages_with_images.append(page)
# Use process_images for the actual inference
if images: # Only if we have valid images
with TimeRecorder(conv_res, "vlm_inference"):
predictions = list(self.process_images(images, user_prompts))
# Attach results to pages
for page, prediction in zip(pages_with_images, predictions):
page.predictions.vlm_response = prediction
# Yield all pages (valid and invalid)
for page in invalid_pages:
yield page
for page in valid_pages:
yield page
def process_images(
self,
image_batch: Iterable[Union[Image, np.ndarray]],
prompt: Union[str, list[str]],
) -> Iterable[VlmPrediction]:
"""
Batched inference for Hugging Face Image-Text-to-Text VLMs (e.g., SmolDocling / SmolVLM).
- Lets the processor handle all padding & batching for text+images.
- Trims generated sequences per row using attention_mask (no pad-id fallbacks).
- Keeps your formulate_prompt() exactly as-is.
"""
import numpy as np
import torch
from PIL import Image as PILImage
# -- Normalize images to RGB PIL
pil_images: list[Image] = []
for img in image_batch:
if isinstance(img, np.ndarray):
if img.ndim == 3 and img.shape[2] in (3, 4):
pil_img = PILImage.fromarray(img.astype(np.uint8))
elif img.ndim == 2:
pil_img = PILImage.fromarray(img.astype(np.uint8), mode="L")
else:
raise ValueError(f"Unsupported numpy array shape: {img.shape}")
else:
pil_img = img
if pil_img.mode != "RGB":
pil_img = pil_img.convert("RGB")
pil_images.append(pil_img)
if not pil_images:
return
# -- Normalize prompts (1 per image)
if isinstance(prompt, str):
user_prompts = [prompt] * len(pil_images)
else:
if len(prompt) != len(pil_images):
raise ValueError(
f"Number of prompts ({len(prompt)}) must match number of images ({len(pil_images)})"
)
user_prompts = prompt
# Use your prompt formatter verbatim
if self.vlm_options.transformers_prompt_style == TransformersPromptStyle.NONE:
inputs = self.processor(
pil_images,
return_tensors="pt",
padding=True, # pad across batch for both text and vision
**self.vlm_options.extra_processor_kwargs,
)
else:
prompts: list[str] = [self.formulate_prompt(p) for p in user_prompts]
# -- Processor performs BOTH text+image preprocessing + batch padding (recommended)
inputs = self.processor(
text=prompts,
images=pil_images,
return_tensors="pt",
padding=True, # pad across batch for both text and vision
**self.vlm_options.extra_processor_kwargs,
)
inputs = {k: v.to(self.device) for k, v in inputs.items()}
# -- Optional stopping criteria
stopping_criteria_list: StoppingCriteriaList = StoppingCriteriaList()
# Add string-based stopping criteria
if self.vlm_options.stop_strings:
stopping_criteria_list.append(
StopStringCriteria(
stop_strings=self.vlm_options.stop_strings,
tokenizer=self.processor.tokenizer,
)
)
# Add custom stopping criteria
if self.vlm_options.custom_stopping_criteria:
for criteria in self.vlm_options.custom_stopping_criteria:
# If it's a class (not an instance), determine the type and handle accordingly
if isinstance(criteria, type):
# Check if it's a GenerationStopper class
if issubclass(criteria, GenerationStopper):
# Instantiate GenerationStopper and wrap it
stopper_instance = criteria()
wrapped_criteria = HFStoppingCriteriaWrapper(
self.processor.tokenizer, stopper_instance
)
stopping_criteria_list.append(wrapped_criteria)
elif issubclass(criteria, StoppingCriteria):
# It's a StoppingCriteria class, instantiate with tokenizer
criteria_instance = criteria(self.processor.tokenizer)
stopping_criteria_list.append(criteria_instance)
elif isinstance(criteria, GenerationStopper):
# Wrap GenerationStopper instances in HFStoppingCriteriaWrapper
wrapped_criteria = HFStoppingCriteriaWrapper(
self.processor.tokenizer, criteria
)
stopping_criteria_list.append(wrapped_criteria)
else:
# If it's already an instance of StoppingCriteria, use it directly
stopping_criteria_list.append(criteria)
stopping_criteria = (
StoppingCriteriaList(stopping_criteria_list)
if stopping_criteria_list
else None
)
# -- Filter out decoder-specific keys from extra_generation_config
decoder_keys = {
"skip_special_tokens",
"clean_up_tokenization_spaces",
"spaces_between_special_tokens",
}
generation_config = {
k: v
for k, v in self.vlm_options.extra_generation_config.items()
if k not in decoder_keys
}
decoder_config = {
k: v
for k, v in self.vlm_options.extra_generation_config.items()
if k in decoder_keys
}
# -- Generate (Image-Text-to-Text class expects these inputs from processor)
gen_kwargs = {
**inputs,
"max_new_tokens": self.max_new_tokens,
"use_cache": self.use_cache,
"generation_config": self.generation_config,
**generation_config,
}
if self.temperature > 0:
gen_kwargs["do_sample"] = True
gen_kwargs["temperature"] = self.temperature
else:
gen_kwargs["do_sample"] = False
if stopping_criteria is not None:
gen_kwargs["stopping_criteria"] = stopping_criteria
start_time = time.time()
with torch.inference_mode():
generated_ids = self.vlm_model.generate(**gen_kwargs)
generation_time = time.time() - start_time
input_len = inputs["input_ids"].shape[1] # common right-aligned prompt length
trimmed_sequences = generated_ids[:, input_len:] # only newly generated tokens
# -- Decode with the processor/tokenizer (skip specials, keep DocTags as text)
decode_fn = getattr(self.processor, "batch_decode", None)
if decode_fn is None and getattr(self.processor, "tokenizer", None) is not None:
decode_fn = self.processor.tokenizer.batch_decode
if decode_fn is None:
raise RuntimeError(
"Neither processor.batch_decode nor tokenizer.batch_decode is available."
)
decoded_texts: list[str] = decode_fn(
trimmed_sequences,
**decoder_config,
)
# -- Clip off pad tokens from decoded texts
pad_token = self.processor.tokenizer.pad_token
if pad_token:
decoded_texts = [text.rstrip(pad_token) for text in decoded_texts]
# -- Optional logging
num_tokens = None
if generated_ids.shape[0] > 0:
num_tokens = int(generated_ids[0].shape[0])
_log.debug(
f"Generated {num_tokens} tokens in {generation_time:.2f}s "
f"for batch size {generated_ids.shape[0]}."
)
for i, text in enumerate(decoded_texts):
input_prompt = (
prompts[i] if self.vlm_options.track_input_prompt and prompts else None
)
# Apply decode_response to the output text
decoded_text = self.vlm_options.decode_response(text)
yield VlmPrediction(
text=decoded_text,
generation_time=generation_time,
num_tokens=num_tokens,
stop_reason=VlmStopReason.UNSPECIFIED,
input_prompt=input_prompt,
)
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/models/vlm_models_inline/mlx_model.py | docling/models/vlm_models_inline/mlx_model.py | import logging
import sys
import threading
import time
from collections.abc import Iterable
from pathlib import Path
from typing import Optional, Union
import numpy as np
from PIL.Image import Image
from transformers import StoppingCriteria
from docling.datamodel.accelerator_options import (
AcceleratorOptions,
)
from docling.datamodel.base_models import (
Page,
VlmPrediction,
VlmPredictionToken,
VlmStopReason,
)
from docling.datamodel.document import ConversionResult
from docling.datamodel.pipeline_options_vlm_model import InlineVlmOptions
from docling.models.base_model import BaseVlmPageModel
from docling.models.utils.generation_utils import GenerationStopper
from docling.models.utils.hf_model_download import (
HuggingFaceModelDownloadMixin,
)
from docling.utils.profiling import TimeRecorder
_log = logging.getLogger(__name__)
# Global lock for MLX model calls - MLX models are not thread-safe
# All MLX models share this lock to prevent concurrent MLX operations
_MLX_GLOBAL_LOCK = threading.Lock()
class HuggingFaceMlxModel(BaseVlmPageModel, HuggingFaceModelDownloadMixin):
def __init__(
self,
enabled: bool,
artifacts_path: Optional[Path],
accelerator_options: AcceleratorOptions,
vlm_options: InlineVlmOptions,
):
self.enabled = enabled
self.vlm_options = vlm_options
self.max_tokens = vlm_options.max_new_tokens
self.temperature = vlm_options.temperature
if self.enabled:
try:
from mlx_vlm import generate, load, stream_generate # type: ignore
from mlx_vlm.prompt_utils import apply_chat_template # type: ignore
from mlx_vlm.utils import load_config # type: ignore
except ImportError:
if sys.version_info < (3, 14):
raise ImportError(
"mlx-vlm is not installed. Please install it via `pip install mlx-vlm` to use MLX VLM models."
)
else:
raise ImportError(
"mlx-vlm is not installed. It is not yet available on Python 3.14."
)
repo_cache_folder = vlm_options.repo_id.replace("/", "--")
self.apply_chat_template = apply_chat_template
self.stream_generate = stream_generate
# PARAMETERS:
if artifacts_path is None:
artifacts_path = self.download_models(
self.vlm_options.repo_id,
revision=self.vlm_options.revision,
)
elif (artifacts_path / repo_cache_folder).exists():
artifacts_path = artifacts_path / repo_cache_folder
## Load the model
self.vlm_model, self.processor = load(artifacts_path)
self.config = load_config(artifacts_path)
# Validate custom stopping criteria - MLX doesn't support HF StoppingCriteria
if self.vlm_options.custom_stopping_criteria:
for criteria in self.vlm_options.custom_stopping_criteria:
if isinstance(criteria, StoppingCriteria):
raise ValueError(
f"MLX models do not support HuggingFace StoppingCriteria instances. "
f"Found {type(criteria).__name__}. Use GenerationStopper instead."
)
elif isinstance(criteria, type) and issubclass(
criteria, StoppingCriteria
):
raise ValueError(
f"MLX models do not support HuggingFace StoppingCriteria classes. "
f"Found {criteria.__name__}. Use GenerationStopper instead."
)
def __call__(
self, conv_res: ConversionResult, page_batch: Iterable[Page]
) -> Iterable[Page]:
page_list = list(page_batch)
if not page_list:
return
valid_pages = []
invalid_pages = []
for page in page_list:
assert page._backend is not None
if not page._backend.is_valid():
invalid_pages.append(page)
else:
valid_pages.append(page)
# Process valid pages in batch
if valid_pages:
with TimeRecorder(conv_res, f"vlm-mlx-{self.vlm_options.repo_id}"):
# Prepare images and prompts for batch processing
images = []
user_prompts = []
pages_with_images = []
for page in valid_pages:
assert page.size is not None
hi_res_image = page.get_image(
scale=self.vlm_options.scale, max_size=self.vlm_options.max_size
)
# Only process pages with valid images
if hi_res_image is not None:
images.append(hi_res_image)
# Define prompt structure
user_prompt = self._build_prompt_safe(page)
user_prompts.append(user_prompt)
pages_with_images.append(page)
# Use process_images for the actual inference
if images: # Only if we have valid images
predictions = list(self.process_images(images, user_prompts))
# Attach results to pages
for page, prediction in zip(pages_with_images, predictions):
page.predictions.vlm_response = prediction
# Yield all pages (valid and invalid)
for page in invalid_pages:
yield page
for page in valid_pages:
yield page
def process_images(
self,
image_batch: Iterable[Union[Image, np.ndarray]],
prompt: Union[str, list[str]],
) -> Iterable[VlmPrediction]:
"""Process raw images without page metadata.
Args:
image_batch: Iterable of PIL Images or numpy arrays
prompt: Either:
- str: Single prompt used for all images
- list[str]: List of prompts (one per image, must match image count)
Raises:
ValueError: If prompt list length doesn't match image count.
"""
# Convert image batch to list for length validation
image_list = list(image_batch)
if len(image_list) == 0:
return
# Handle prompt parameter
if isinstance(prompt, str):
# Single prompt for all images
user_prompts = [prompt] * len(image_list)
elif isinstance(prompt, list):
# List of prompts (one per image)
if len(prompt) != len(image_list):
raise ValueError(
f"Number of prompts ({len(prompt)}) must match number of images ({len(image_list)})"
)
user_prompts = prompt
else:
raise ValueError(f"prompt must be str or list[str], got {type(prompt)}")
# MLX models are not thread-safe - use global lock to serialize access
with _MLX_GLOBAL_LOCK:
_log.debug("MLX model: Acquired global lock for thread safety")
for image, user_prompt in zip(image_list, user_prompts):
# Convert numpy array to PIL Image if needed
if isinstance(image, np.ndarray):
if image.ndim == 3 and image.shape[2] in [3, 4]:
# RGB or RGBA array
from PIL import Image as PILImage
image = PILImage.fromarray(image.astype(np.uint8))
elif image.ndim == 2:
# Grayscale array
from PIL import Image as PILImage
image = PILImage.fromarray(image.astype(np.uint8), mode="L")
else:
raise ValueError(
f"Unsupported numpy array shape: {image.shape}"
)
# Ensure image is in RGB mode (handles RGBA, L, etc.)
if image.mode != "RGB":
image = image.convert("RGB")
# Use the MLX chat template approach like in the __call__ method
formatted_prompt = self.apply_chat_template(
self.processor, self.config, user_prompt, num_images=1
)
# Stream generate with stop strings and custom stopping criteria support
start_time = time.time()
_log.debug("start generating ...")
tokens: list[VlmPredictionToken] = []
output = ""
# Use stream_generate for proper stop string handling
for token in self.stream_generate(
self.vlm_model,
self.processor,
formatted_prompt,
[image], # MLX stream_generate expects list of images
max_tokens=self.max_tokens,
verbose=False,
temp=self.temperature,
):
# Collect token information
if len(token.logprobs.shape) == 1:
tokens.append(
VlmPredictionToken(
text=token.text,
token=token.token,
logprob=token.logprobs[token.token],
)
)
elif (
len(token.logprobs.shape) == 2 and token.logprobs.shape[0] == 1
):
tokens.append(
VlmPredictionToken(
text=token.text,
token=token.token,
logprob=token.logprobs[0, token.token],
)
)
else:
_log.warning(
f"incompatible shape for logprobs: {token.logprobs.shape}"
)
output += token.text
# Check for any configured stop strings
if self.vlm_options.stop_strings:
if any(
stop_str in output
for stop_str in self.vlm_options.stop_strings
):
_log.debug("Stopping generation due to stop string match")
break
# Check for custom stopping criteria (GenerationStopper instances)
if self.vlm_options.custom_stopping_criteria:
for criteria in self.vlm_options.custom_stopping_criteria:
# Handle both instances and classes of GenerationStopper
if isinstance(criteria, GenerationStopper):
stopper = criteria
elif isinstance(criteria, type) and issubclass(
criteria, GenerationStopper
):
stopper = criteria()
# Determine the text window to check based on lookback_tokens
lookback_tokens = stopper.lookback_tokens()
# Check only the last N characters worth of text
# This is a simplified approach - in practice, you might want to
# decode the last N tokens from the token list for more accuracy
text_to_check = (
output[-lookback_tokens:]
if len(output) > lookback_tokens
else output
)
try:
if stopper.should_stop(text_to_check):
_log.info(
f"Stopping generation due to GenerationStopper: {type(stopper).__name__}"
)
break
except Exception as e:
_log.warning(
f"Error in GenerationStopper.should_stop: {e}"
)
continue
else: # note: for-else idiom
continue # Only executed if the inner loop didn't break
break # Break the outer loop if any stopper triggered
generation_time = time.time() - start_time
_log.debug(
f"{generation_time:.2f} seconds for {len(tokens)} tokens ({len(tokens) / generation_time:.1f} tokens/sec)."
)
# Apply decode_response to the output before yielding
decoded_output = self.vlm_options.decode_response(output)
input_prompt = (
formatted_prompt if self.vlm_options.track_input_prompt else None
)
yield VlmPrediction(
text=decoded_output,
generation_time=generation_time,
generated_tokens=tokens,
num_tokens=len(tokens),
stop_reason=VlmStopReason.UNSPECIFIED,
input_prompt=input_prompt,
)
_log.debug("MLX model: Released global lock")
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/models/vlm_models_inline/__init__.py | docling/models/vlm_models_inline/__init__.py | python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false | |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/models/vlm_models_inline/vllm_model.py | docling/models/vlm_models_inline/vllm_model.py | import logging
import sys
import time
from collections.abc import Iterable
from pathlib import Path
from typing import Any, Dict, Optional, Union
import numpy as np
from PIL.Image import Image
from docling.datamodel.accelerator_options import AcceleratorOptions
from docling.datamodel.base_models import (
Page,
VlmPrediction,
VlmPredictionToken,
VlmStopReason,
)
from docling.datamodel.document import ConversionResult
from docling.datamodel.pipeline_options_vlm_model import (
InlineVlmOptions,
TransformersPromptStyle,
)
from docling.models.base_model import BaseVlmPageModel
from docling.models.utils.hf_model_download import HuggingFaceModelDownloadMixin
from docling.utils.accelerator_utils import decide_device
from docling.utils.profiling import TimeRecorder
_log = logging.getLogger(__name__)
class VllmVlmModel(BaseVlmPageModel, HuggingFaceModelDownloadMixin):
"""
vLLM-backed vision-language model that accepts PIL images (or numpy arrays)
via vLLM's multi_modal_data, with prompt formatting handled by formulate_prompt().
"""
# --------- Allowlist of vLLM args ---------
# SamplingParams (runtime generation controls)
_VLLM_SAMPLING_KEYS = {
# Core
"max_tokens",
"temperature",
"top_p",
"top_k",
# Penalties
"presence_penalty",
"frequency_penalty",
"repetition_penalty",
# Stops / outputs
"stop",
"stop_token_ids",
"skip_special_tokens",
"spaces_between_special_tokens",
# Search / length
"n",
"best_of",
"length_penalty",
"early_stopping",
# Misc
"logprobs",
"prompt_logprobs",
"min_p",
"seed",
}
# LLM(...) / EngineArgs (engine/load-time controls)
_VLLM_ENGINE_KEYS = {
# Model/tokenizer/impl
"tokenizer",
"tokenizer_mode",
"download_dir",
# Parallelism / memory / lengths
"tensor_parallel_size",
"pipeline_parallel_size",
"gpu_memory_utilization",
"max_model_len",
"max_num_batched_tokens",
"kv_cache_dtype",
"dtype",
# Quantization (coarse switch)
"quantization",
# Multimodal limits
"limit_mm_per_prompt",
# Execution toggles
"enforce_eager",
}
def __init__(
self,
enabled: bool,
artifacts_path: Optional[Path],
accelerator_options: AcceleratorOptions,
vlm_options: InlineVlmOptions,
):
self.enabled = enabled
self.vlm_options: InlineVlmOptions = vlm_options
self.llm = None
self.sampling_params = None
self.processor = None # used for CHAT templating in formulate_prompt()
self.device = "cpu"
self.max_new_tokens = vlm_options.max_new_tokens
self.temperature = vlm_options.temperature
if not self.enabled:
return
from transformers import AutoProcessor
try:
from vllm import LLM, SamplingParams
except ImportError:
if sys.version_info < (3, 14):
raise ImportError(
"vllm is not installed. Please install it via `pip install vllm`."
)
else:
raise ImportError(
"vllm is not installed. It is not yet available on Python 3.14."
)
# Device selection
self.device = decide_device(
accelerator_options.device, supported_devices=vlm_options.supported_devices
)
_log.debug(f"Available device for VLM: {self.device}")
# Resolve artifacts path / cache folder
repo_cache_folder = vlm_options.repo_id.replace("/", "--")
if artifacts_path is None:
artifacts_path = self.download_models(
self.vlm_options.repo_id, revision=self.vlm_options.revision
)
elif (artifacts_path / repo_cache_folder).exists():
artifacts_path = artifacts_path / repo_cache_folder
# --------- Strict split & validation of extra_generation_config ---------
extra_cfg = self.vlm_options.extra_generation_config
load_cfg = {k: v for k, v in extra_cfg.items() if k in self._VLLM_ENGINE_KEYS}
gen_cfg = {k: v for k, v in extra_cfg.items() if k in self._VLLM_SAMPLING_KEYS}
unknown = sorted(
k
for k in extra_cfg.keys()
if k not in self._VLLM_ENGINE_KEYS and k not in self._VLLM_SAMPLING_KEYS
)
if unknown:
_log.warning(
"Ignoring unknown extra_generation_config keys for vLLM: %s", unknown
)
# --------- Construct LLM kwargs (engine/load-time) ---------
llm_kwargs: Dict[str, Any] = {
"model": str(artifacts_path),
"model_impl": "transformers",
"limit_mm_per_prompt": {"image": 1},
"revision": self.vlm_options.revision,
"trust_remote_code": self.vlm_options.trust_remote_code,
**load_cfg,
}
if self.device == "cpu":
llm_kwargs.setdefault("enforce_eager", True)
else:
llm_kwargs.setdefault(
"gpu_memory_utilization", 0.3
) # room for other models
# Quantization (kept as-is; coarse)
if self.vlm_options.quantized and self.vlm_options.load_in_8bit:
llm_kwargs.setdefault("quantization", "bitsandbytes")
# Initialize vLLM LLM
self.llm = LLM(**llm_kwargs)
# Initialize processor for prompt templating (needed for CHAT style)
self.processor = AutoProcessor.from_pretrained(
artifacts_path,
trust_remote_code=self.vlm_options.trust_remote_code,
revision=self.vlm_options.revision,
)
# --------- SamplingParams (runtime) ---------
self.sampling_params = SamplingParams(
temperature=self.temperature,
max_tokens=self.max_new_tokens,
stop=(self.vlm_options.stop_strings or None),
**gen_cfg,
)
def __call__(
self, conv_res: ConversionResult, page_batch: Iterable[Page]
) -> Iterable[Page]:
# If disabled, pass-through
if not self.enabled:
for page in page_batch:
yield page
return
page_list = list(page_batch)
if not page_list:
return
# Preserve original order
original_order = page_list[:]
# Separate valid/invalid
valid_pages: list[Page] = []
invalid_pages: list[Page] = []
for page in page_list:
assert page._backend is not None
if page._backend.is_valid():
valid_pages.append(page)
else:
invalid_pages.append(page)
if valid_pages:
with TimeRecorder(conv_res, "vlm"):
images: list[Image] = []
user_prompts: list[str] = []
pages_with_images: list[Page] = []
for page in valid_pages:
assert page.size is not None
hi_res_image = page.get_image(
scale=self.vlm_options.scale,
max_size=self.vlm_options.max_size,
)
if hi_res_image is None:
continue
images.append(hi_res_image)
# Define prompt structure
user_prompt = self._build_prompt_safe(page)
user_prompts.append(user_prompt)
pages_with_images.append(page)
if images:
with TimeRecorder(conv_res, "vlm_inference"):
predictions = list(self.process_images(images, user_prompts))
for page, prediction in zip(pages_with_images, predictions):
page.predictions.vlm_response = prediction
# Yield in original order
for page in original_order:
yield page
def process_images(
self,
image_batch: Iterable[Union[Image, np.ndarray]],
prompt: Union[str, list[str]],
) -> Iterable[VlmPrediction]:
"""Process images in a single batched vLLM inference call."""
import numpy as np
from PIL import Image as PILImage
# -- Normalize images to RGB PIL
pil_images: list[Image] = []
for img in image_batch:
if isinstance(img, np.ndarray):
if img.ndim == 3 and img.shape[2] in (3, 4):
pil_img = PILImage.fromarray(img.astype(np.uint8))
elif img.ndim == 2:
pil_img = PILImage.fromarray(img.astype(np.uint8), mode="L")
else:
raise ValueError(f"Unsupported numpy array shape: {img.shape}")
else:
pil_img = img
if pil_img.mode != "RGB":
pil_img = pil_img.convert("RGB")
pil_images.append(pil_img)
if not pil_images:
return
# Normalize prompts
if isinstance(prompt, str):
user_prompts = [prompt] * len(pil_images)
elif isinstance(prompt, list):
if len(prompt) != len(pil_images):
raise ValueError(
f"Number of prompts ({len(prompt)}) must match number of images ({len(pil_images)})"
)
user_prompts = prompt
else:
raise ValueError(f"prompt must be str or list[str], got {type(prompt)}")
# Format prompts
prompts: list[str] = [self.formulate_prompt(up) for up in user_prompts]
# Build vLLM inputs
llm_inputs = [
{"prompt": p, "multi_modal_data": {"image": im}}
for p, im in zip(prompts, pil_images)
]
# Generate
assert self.llm is not None and self.sampling_params is not None
start_time = time.time()
outputs = self.llm.generate(llm_inputs, sampling_params=self.sampling_params) # type: ignore
generation_time = time.time() - start_time
# Optional debug
if outputs:
try:
num_tokens_within_batch = len(outputs[0].outputs[0].token_ids)
_log.debug(
f"Generated {num_tokens_within_batch} tokens for batch in {generation_time:.2f}s."
)
except Exception:
num_tokens_within_batch = 0
# Emit predictions
for i, output in enumerate(outputs):
text = output.outputs[0].text if output.outputs else ""
stop_reason = (
VlmStopReason.END_OF_SEQUENCE
if output.outputs[0].stop_reason
else VlmStopReason.LENGTH
)
generated_tokens = [
VlmPredictionToken(token=int(t)) for t in output.outputs[0].token_ids
]
num_tokens = len(generated_tokens)
if not self.vlm_options.track_generated_tokens:
generated_tokens = []
input_prompt = prompts[i] if self.vlm_options.track_input_prompt else None
_log.debug(f"VLM generated response carries input prompt: {input_prompt}")
decoded_text = self.vlm_options.decode_response(text)
yield VlmPrediction(
text=decoded_text,
generation_time=generation_time,
num_tokens=num_tokens,
stop_reason=stop_reason,
generated_tokens=generated_tokens,
input_prompt=input_prompt,
)
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/utils/api_image_request.py | docling/utils/api_image_request.py | import base64
import json
import logging
from io import BytesIO
from typing import Dict, List, Optional, Tuple
import requests
from PIL import Image
from pydantic import AnyUrl
from docling.datamodel.base_models import OpenAiApiResponse, VlmStopReason
from docling.models.utils.generation_utils import GenerationStopper
_log = logging.getLogger(__name__)
def api_image_request(
image: Image.Image,
prompt: str,
url: AnyUrl,
timeout: float = 20,
headers: Optional[dict[str, str]] = None,
**params,
) -> Tuple[str, Optional[int], VlmStopReason]:
img_io = BytesIO()
image = (
image.copy()
) # Fix for inconsistent PIL image width/height to actual byte data
image = image.convert("RGBA")
good_image = True
try:
image.save(img_io, "PNG")
except Exception as e:
good_image = False
_log.error(f"Error, corrupter PNG of size: {image.size}: {e}")
if good_image:
try:
image_base64 = base64.b64encode(img_io.getvalue()).decode("utf-8")
messages = [
{
"role": "user",
"content": [
{
"type": "image_url",
"image_url": {
"url": f"data:image/png;base64,{image_base64}"
},
},
{
"type": "text",
"text": prompt,
},
],
}
]
payload = {
"messages": messages,
**params,
}
headers = headers or {}
r = requests.post(
str(url),
headers=headers,
json=payload,
timeout=timeout,
)
if not r.ok:
_log.error(f"Error calling the API. Response was {r.text}")
# image.show()
# r.raise_for_status()
api_resp = OpenAiApiResponse.model_validate_json(r.text)
generated_text = api_resp.choices[0].message.content.strip()
num_tokens = api_resp.usage.total_tokens
stop_reason = (
VlmStopReason.LENGTH
if api_resp.choices[0].finish_reason == "length"
else VlmStopReason.END_OF_SEQUENCE
)
return generated_text, num_tokens, stop_reason
except Exception as e:
_log.error(f"Error, could not process request: {e}")
return "", 0, VlmStopReason.UNSPECIFIED
else:
return "", 0, VlmStopReason.UNSPECIFIED
def api_image_request_streaming(
image: Image.Image,
prompt: str,
url: AnyUrl,
*,
timeout: float = 20,
headers: Optional[dict[str, str]] = None,
generation_stoppers: list[GenerationStopper] = [],
**params,
) -> Tuple[str, Optional[int]]:
"""
Stream a chat completion from an OpenAI-compatible server (e.g., vLLM).
Parses SSE lines: 'data: {json}\\n\\n', terminated by 'data: [DONE]'.
Accumulates text and calls stopper.should_stop(window) as chunks arrive.
If stopper triggers, the HTTP connection is closed to abort server-side generation.
"""
img_io = BytesIO()
image.save(img_io, "PNG")
image_b64 = base64.b64encode(img_io.getvalue()).decode("utf-8")
messages = [
{
"role": "user",
"content": [
{
"type": "image_url",
"image_url": {"url": f"data:image/png;base64,{image_b64}"},
},
{"type": "text", "text": prompt},
],
}
]
payload = {
"messages": messages,
"stream": True, # <-- critical for SSE streaming
**params,
}
# Debug: Log the payload to verify temperature is included
_log.debug(f"API streaming request payload: {json.dumps(payload, indent=2)}")
# Some servers require Accept: text/event-stream for SSE.
# It's safe to set it; OpenAI-compatible servers tolerate it.
hdrs = {"Accept": "text/event-stream", **(headers or {})}
# Try to force temperature via header if server ignores payload parameter
if "temperature" in params:
hdrs["X-Temperature"] = str(params["temperature"])
# Stream the HTTP response
with requests.post(
str(url), headers=hdrs, json=payload, timeout=timeout, stream=True
) as r:
if not r.ok:
_log.error(
f"Error calling the API {url} in streaming mode. Response was {r.text}"
)
r.raise_for_status()
full_text = []
for raw_line in r.iter_lines(decode_unicode=True):
if not raw_line: # keep-alives / blank lines
continue
if not raw_line.startswith("data:"):
# Some proxies inject comments; ignore anything not starting with 'data:'
continue
data = raw_line[len("data:") :].strip()
if data == "[DONE]":
break
try:
obj = json.loads(data)
except json.JSONDecodeError:
_log.debug("Skipping non-JSON SSE chunk: %r", data[:200])
continue
# OpenAI-compatible delta format
# obj["choices"][0]["delta"]["content"] may be None or missing (e.g., tool calls)
try:
delta = obj["choices"][0].get("delta") or {}
piece = delta.get("content") or ""
except (KeyError, IndexError) as e:
_log.debug("Unexpected SSE chunk shape: %s", e)
piece = ""
# Try to extract token count
num_tokens = None
try:
if "usage" in obj:
usage = obj["usage"]
num_tokens = usage.get("total_tokens")
except Exception as e:
num_tokens = None
_log.debug("Usage key not included in response: %s", e)
if piece:
full_text.append(piece)
for stopper in generation_stoppers:
# Respect stopper's lookback window. We use a simple string window which
# works with the GenerationStopper interface.
lookback = max(1, stopper.lookback_tokens())
window = "".join(full_text)[-lookback:]
if stopper.should_stop(window):
# Break out of the loop cleanly. The context manager will handle
# closing the connection when we exit the 'with' block.
# vLLM/OpenAI-compatible servers will detect the client disconnect
# and abort the request server-side.
return "".join(full_text), num_tokens
return "".join(full_text), num_tokens
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/utils/layout_postprocessor.py | docling/utils/layout_postprocessor.py | import bisect
import logging
import sys
from collections import defaultdict
from docling_core.types.doc import DocItemLabel, Size
from docling_core.types.doc.page import TextCell
from rtree import index
from docling.datamodel.base_models import BoundingBox, Cluster, Page
from docling.datamodel.pipeline_options import LayoutOptions
_log = logging.getLogger(__name__)
class UnionFind:
"""Efficient Union-Find data structure for grouping elements."""
def __init__(self, elements):
self.parent = {elem: elem for elem in elements}
self.rank = dict.fromkeys(elements, 0)
def find(self, x):
if self.parent[x] != x:
self.parent[x] = self.find(self.parent[x]) # Path compression
return self.parent[x]
def union(self, x, y):
root_x, root_y = self.find(x), self.find(y)
if root_x == root_y:
return
if self.rank[root_x] > self.rank[root_y]:
self.parent[root_y] = root_x
elif self.rank[root_x] < self.rank[root_y]:
self.parent[root_x] = root_y
else:
self.parent[root_y] = root_x
self.rank[root_x] += 1
def get_groups(self) -> dict[int, list[int]]:
"""Returns groups as {root: [elements]}."""
groups = defaultdict(list)
for elem in self.parent:
groups[self.find(elem)].append(elem)
return groups
class SpatialClusterIndex:
"""Efficient spatial indexing for clusters using R-tree and interval trees."""
def __init__(self, clusters: list[Cluster]):
p = index.Property()
p.dimension = 2
self.spatial_index = index.Index(properties=p)
self.x_intervals = IntervalTree()
self.y_intervals = IntervalTree()
self.clusters_by_id: dict[int, Cluster] = {}
for cluster in clusters:
self.add_cluster(cluster)
def add_cluster(self, cluster: Cluster):
bbox = cluster.bbox
self.spatial_index.insert(cluster.id, bbox.as_tuple())
self.x_intervals.insert(bbox.l, bbox.r, cluster.id)
self.y_intervals.insert(bbox.t, bbox.b, cluster.id)
self.clusters_by_id[cluster.id] = cluster
def remove_cluster(self, cluster: Cluster):
self.spatial_index.delete(cluster.id, cluster.bbox.as_tuple())
del self.clusters_by_id[cluster.id]
def find_candidates(self, bbox: BoundingBox) -> set[int]:
"""Find potential overlapping cluster IDs using all indexes."""
spatial = set(self.spatial_index.intersection(bbox.as_tuple()))
x_candidates = self.x_intervals.find_containing(
bbox.l
) | self.x_intervals.find_containing(bbox.r)
y_candidates = self.y_intervals.find_containing(
bbox.t
) | self.y_intervals.find_containing(bbox.b)
return spatial.union(x_candidates).union(y_candidates)
def check_overlap(
self,
bbox1: BoundingBox,
bbox2: BoundingBox,
overlap_threshold: float,
containment_threshold: float,
) -> bool:
"""Check if two bboxes overlap sufficiently."""
if bbox1.area() <= 0 or bbox2.area() <= 0:
return False
iou = bbox1.intersection_over_union(bbox2)
containment1 = bbox1.intersection_over_self(bbox2)
containment2 = bbox2.intersection_over_self(bbox1)
return (
iou > overlap_threshold
or containment1 > containment_threshold
or containment2 > containment_threshold
)
class Interval:
"""Helper class for sortable intervals."""
def __init__(self, min_val: float, max_val: float, id: int):
self.min_val = min_val
self.max_val = max_val
self.id = id
def __lt__(self, other):
if isinstance(other, Interval):
return self.min_val < other.min_val
return self.min_val < other
class IntervalTree:
"""Memory-efficient interval tree for 1D overlap queries."""
def __init__(self):
self.intervals: list[Interval] = [] # Sorted by min_val
def insert(self, min_val: float, max_val: float, id: int):
interval = Interval(min_val, max_val, id)
bisect.insort(self.intervals, interval)
def find_containing(self, point: float) -> set[int]:
"""Find all intervals containing the point."""
pos = bisect.bisect_left(self.intervals, point)
result = set()
# Check intervals starting before point
for interval in reversed(self.intervals[:pos]):
if interval.min_val <= point <= interval.max_val:
result.add(interval.id)
else:
break
# Check intervals starting at/after point
for interval in self.intervals[pos:]:
if point <= interval.max_val:
if interval.min_val <= point:
result.add(interval.id)
else:
break
return result
class LayoutPostprocessor:
"""Postprocesses layout predictions by cleaning up clusters and mapping cells."""
# Cluster type-specific parameters for overlap resolution
OVERLAP_PARAMS = {
"regular": {"area_threshold": 1.3, "conf_threshold": 0.05},
"picture": {"area_threshold": 2.0, "conf_threshold": 0.3},
"wrapper": {"area_threshold": 2.0, "conf_threshold": 0.2},
}
WRAPPER_TYPES = {
DocItemLabel.FORM,
DocItemLabel.KEY_VALUE_REGION,
DocItemLabel.TABLE,
DocItemLabel.DOCUMENT_INDEX,
}
SPECIAL_TYPES = WRAPPER_TYPES.union({DocItemLabel.PICTURE})
CONFIDENCE_THRESHOLDS = {
DocItemLabel.CAPTION: 0.5,
DocItemLabel.FOOTNOTE: 0.5,
DocItemLabel.FORMULA: 0.5,
DocItemLabel.LIST_ITEM: 0.5,
DocItemLabel.PAGE_FOOTER: 0.5,
DocItemLabel.PAGE_HEADER: 0.5,
DocItemLabel.PICTURE: 0.5,
DocItemLabel.SECTION_HEADER: 0.45,
DocItemLabel.TABLE: 0.5,
DocItemLabel.TEXT: 0.5, # 0.45,
DocItemLabel.TITLE: 0.45,
DocItemLabel.CODE: 0.45,
DocItemLabel.CHECKBOX_SELECTED: 0.45,
DocItemLabel.CHECKBOX_UNSELECTED: 0.45,
DocItemLabel.FORM: 0.45,
DocItemLabel.KEY_VALUE_REGION: 0.45,
DocItemLabel.DOCUMENT_INDEX: 0.45,
}
LABEL_REMAPPING = {
# DocItemLabel.DOCUMENT_INDEX: DocItemLabel.TABLE,
DocItemLabel.TITLE: DocItemLabel.SECTION_HEADER,
}
def __init__(
self, page: Page, clusters: list[Cluster], options: LayoutOptions
) -> None:
"""Initialize processor with page and clusters."""
self.cells = page.cells
self.page = page
self.page_size = page.size
self.all_clusters = clusters
self.options = options
self.regular_clusters = [
c for c in clusters if c.label not in self.SPECIAL_TYPES
]
self.special_clusters = [c for c in clusters if c.label in self.SPECIAL_TYPES]
# Build spatial indices once
self.regular_index = SpatialClusterIndex(self.regular_clusters)
self.picture_index = SpatialClusterIndex(
[c for c in self.special_clusters if c.label == DocItemLabel.PICTURE]
)
self.wrapper_index = SpatialClusterIndex(
[c for c in self.special_clusters if c.label in self.WRAPPER_TYPES]
)
def postprocess(self) -> tuple[list[Cluster], list[TextCell]]:
"""Main processing pipeline."""
self.regular_clusters = self._process_regular_clusters()
self.special_clusters = self._process_special_clusters()
# Remove regular clusters that are included in wrappers
contained_ids = {
child.id
for wrapper in self.special_clusters
if wrapper.label in self.SPECIAL_TYPES
for child in wrapper.children
}
self.regular_clusters = [
c for c in self.regular_clusters if c.id not in contained_ids
]
# Combine and sort final clusters
final_clusters = self._sort_clusters(
self.regular_clusters + self.special_clusters, mode="id"
)
# Conditionally process cells if not skipping cell assignment
if not self.options.skip_cell_assignment:
for cluster in final_clusters:
cluster.cells = self._sort_cells(cluster.cells)
# Also sort cells in children if any
for child in cluster.children:
child.cells = self._sort_cells(child.cells)
assert self.page.parsed_page is not None
self.page.parsed_page.textline_cells = self.cells
self.page.parsed_page.has_lines = len(self.cells) > 0
return final_clusters, self.cells
def _process_regular_clusters(self) -> list[Cluster]:
"""Process regular clusters with iterative refinement."""
clusters = [
c
for c in self.regular_clusters
if c.confidence >= self.CONFIDENCE_THRESHOLDS[c.label]
]
# Apply label remapping
for cluster in clusters:
if cluster.label in self.LABEL_REMAPPING:
cluster.label = self.LABEL_REMAPPING[cluster.label]
# Conditionally assign cells to clusters
if not self.options.skip_cell_assignment:
# Initial cell assignment
clusters = self._assign_cells_to_clusters(clusters)
# Remove clusters with no cells (if keep_empty_clusters is False),
# but always keep clusters with label DocItemLabel.FORMULA
if not self.options.keep_empty_clusters:
clusters = [
cluster
for cluster in clusters
if cluster.cells or cluster.label == DocItemLabel.FORMULA
]
# Handle orphaned cells
unassigned = self._find_unassigned_cells(clusters)
if unassigned and self.options.create_orphan_clusters:
next_id = max((c.id for c in self.all_clusters), default=0) + 1
orphan_clusters = []
for i, cell in enumerate(unassigned):
conf = cell.confidence
orphan_clusters.append(
Cluster(
id=next_id + i,
label=DocItemLabel.TEXT,
bbox=cell.to_bounding_box(),
confidence=conf,
cells=[cell],
)
)
clusters.extend(orphan_clusters)
# Iterative refinement
prev_count = len(clusters) + 1
for _ in range(3): # Maximum 3 iterations
if prev_count == len(clusters):
break
prev_count = len(clusters)
clusters = self._adjust_cluster_bboxes(clusters)
clusters = self._remove_overlapping_clusters(clusters, "regular")
return clusters
def _process_special_clusters(self) -> list[Cluster]:
special_clusters = [
c
for c in self.special_clusters
if c.confidence >= self.CONFIDENCE_THRESHOLDS[c.label]
]
special_clusters = self._handle_cross_type_overlaps(special_clusters)
# Calculate page area from known page size
assert self.page_size is not None
page_area = self.page_size.width * self.page_size.height
if page_area > 0:
# Filter out full-page pictures
special_clusters = [
cluster
for cluster in special_clusters
if not (
cluster.label == DocItemLabel.PICTURE
and cluster.bbox.area() / page_area > 0.90
)
]
for special in special_clusters:
contained = []
for cluster in self.regular_clusters:
containment = cluster.bbox.intersection_over_self(special.bbox)
if containment > 0.8:
contained.append(cluster)
if contained:
# Sort contained clusters by minimum cell ID:
contained = self._sort_clusters(contained, mode="id")
special.children = contained
# Adjust bbox only for Form and Key-Value-Region, not Table or Picture
if special.label in [DocItemLabel.FORM, DocItemLabel.KEY_VALUE_REGION]:
special.bbox = BoundingBox(
l=min(c.bbox.l for c in contained),
t=min(c.bbox.t for c in contained),
r=max(c.bbox.r for c in contained),
b=max(c.bbox.b for c in contained),
)
# Conditionally collect cells from children
if not self.options.skip_cell_assignment:
all_cells = []
for child in contained:
all_cells.extend(child.cells)
special.cells = self._deduplicate_cells(all_cells)
special.cells = self._sort_cells(special.cells)
else:
special.cells = []
picture_clusters = [
c for c in special_clusters if c.label == DocItemLabel.PICTURE
]
picture_clusters = self._remove_overlapping_clusters(
picture_clusters, "picture"
)
wrapper_clusters = [
c for c in special_clusters if c.label in self.WRAPPER_TYPES
]
wrapper_clusters = self._remove_overlapping_clusters(
wrapper_clusters, "wrapper"
)
return picture_clusters + wrapper_clusters
def _handle_cross_type_overlaps(self, special_clusters) -> list[Cluster]:
"""Handle overlaps between regular and wrapper clusters before child assignment.
In particular, KEY_VALUE_REGION proposals that are almost identical to a TABLE
should be removed.
"""
wrappers_to_remove = set()
for wrapper in special_clusters:
if wrapper.label not in self.WRAPPER_TYPES:
continue # only treat KEY_VALUE_REGION for now.
for regular in self.regular_clusters:
if regular.label == DocItemLabel.TABLE:
# Calculate overlap
overlap_ratio = wrapper.bbox.intersection_over_self(regular.bbox)
conf_diff = wrapper.confidence - regular.confidence
# If wrapper is mostly overlapping with a TABLE, remove the wrapper
if (
overlap_ratio > 0.9 and conf_diff < 0.1
): # self.OVERLAP_PARAMS["wrapper"]["conf_threshold"]): # 80% overlap threshold
wrappers_to_remove.add(wrapper.id)
break
# Filter out the identified wrappers
special_clusters = [
cluster
for cluster in special_clusters
if cluster.id not in wrappers_to_remove
]
return special_clusters
def _should_prefer_cluster(
self, candidate: Cluster, other: Cluster, params: dict
) -> bool:
"""Determine if candidate cluster should be preferred over other cluster based on rules.
Returns True if candidate should be preferred, False if not."""
# Rule 1: LIST_ITEM vs TEXT
if (
candidate.label == DocItemLabel.LIST_ITEM
and other.label == DocItemLabel.TEXT
):
# Check if areas are similar (within 20% of each other)
area_ratio = candidate.bbox.area() / other.bbox.area()
area_similarity = abs(1 - area_ratio) < 0.2
if area_similarity:
return True
# Rule 2: CODE vs others
if candidate.label == DocItemLabel.CODE:
# Calculate how much of the other cluster is contained within the CODE cluster
containment = other.bbox.intersection_over_self(candidate.bbox)
if containment > 0.8: # other is 80% contained within CODE
return True
# If no label-based rules matched, fall back to area/confidence thresholds
area_ratio = candidate.bbox.area() / other.bbox.area()
conf_diff = other.confidence - candidate.confidence
if (
area_ratio <= params["area_threshold"]
and conf_diff > params["conf_threshold"]
):
return False
return True # Default to keeping candidate if no rules triggered rejection
def _select_best_cluster_from_group(
self,
group_clusters: list[Cluster],
params: dict,
) -> Cluster:
"""Select best cluster from a group of overlapping clusters based on all rules."""
current_best = None
for candidate in group_clusters:
should_select = True
for other in group_clusters:
if other == candidate:
continue
if not self._should_prefer_cluster(candidate, other, params):
should_select = False
break
if should_select:
if current_best is None:
current_best = candidate
else:
# If both clusters pass rules, prefer the larger one unless confidence differs significantly
if (
candidate.bbox.area() > current_best.bbox.area()
and current_best.confidence - candidate.confidence
<= params["conf_threshold"]
):
current_best = candidate
return current_best if current_best else group_clusters[0]
def _remove_overlapping_clusters(
self,
clusters: list[Cluster],
cluster_type: str,
overlap_threshold: float = 0.8,
containment_threshold: float = 0.8,
) -> list[Cluster]:
if not clusters:
return []
spatial_index = (
self.regular_index
if cluster_type == "regular"
else self.picture_index
if cluster_type == "picture"
else self.wrapper_index
)
# Map of currently valid clusters
valid_clusters = {c.id: c for c in clusters}
uf = UnionFind(valid_clusters.keys())
params = self.OVERLAP_PARAMS[cluster_type]
for cluster in clusters:
candidates = spatial_index.find_candidates(cluster.bbox)
candidates &= valid_clusters.keys() # Only keep existing candidates
candidates.discard(cluster.id)
for other_id in candidates:
if spatial_index.check_overlap(
cluster.bbox,
valid_clusters[other_id].bbox,
overlap_threshold,
containment_threshold,
):
uf.union(cluster.id, other_id)
result = []
for group in uf.get_groups().values():
if len(group) == 1:
result.append(valid_clusters[group[0]])
continue
group_clusters = [valid_clusters[cid] for cid in group]
best = self._select_best_cluster_from_group(group_clusters, params)
# Simple cell merging - no special cases
for cluster in group_clusters:
if cluster != best:
best.cells.extend(cluster.cells)
best.cells = self._deduplicate_cells(best.cells)
best.cells = self._sort_cells(best.cells)
result.append(best)
return result
def _select_best_cluster(
self,
clusters: list[Cluster],
area_threshold: float,
conf_threshold: float,
) -> Cluster:
"""Iteratively select best cluster based on area and confidence thresholds."""
current_best = None
for candidate in clusters:
should_select = True
for other in clusters:
if other == candidate:
continue
area_ratio = candidate.bbox.area() / other.bbox.area()
conf_diff = other.confidence - candidate.confidence
if area_ratio <= area_threshold and conf_diff > conf_threshold:
should_select = False
break
if should_select:
if current_best is None or (
candidate.bbox.area() > current_best.bbox.area()
and current_best.confidence - candidate.confidence <= conf_threshold
):
current_best = candidate
return current_best if current_best else clusters[0]
def _deduplicate_cells(self, cells: list[TextCell]) -> list[TextCell]:
"""Ensure each cell appears only once, maintaining order of first appearance."""
seen_ids = set()
unique_cells = []
for cell in cells:
if cell.index not in seen_ids:
seen_ids.add(cell.index)
unique_cells.append(cell)
return unique_cells
def _assign_cells_to_clusters(
self, clusters: list[Cluster], min_overlap: float = 0.2
) -> list[Cluster]:
"""Assign cells to best overlapping cluster."""
for cluster in clusters:
cluster.cells = []
for cell in self.cells:
if not cell.text.strip():
continue
best_overlap = min_overlap
best_cluster = None
for cluster in clusters:
if cell.rect.to_bounding_box().area() <= 0:
continue
overlap_ratio = cell.rect.to_bounding_box().intersection_over_self(
cluster.bbox
)
if overlap_ratio > best_overlap:
best_overlap = overlap_ratio
best_cluster = cluster
if best_cluster is not None:
best_cluster.cells.append(cell)
# Deduplicate cells in each cluster after assignment
for cluster in clusters:
cluster.cells = self._deduplicate_cells(cluster.cells)
return clusters
def _find_unassigned_cells(self, clusters: list[Cluster]) -> list[TextCell]:
"""Find cells not assigned to any cluster."""
assigned = {cell.index for cluster in clusters for cell in cluster.cells}
return [
cell
for cell in self.cells
if cell.index not in assigned and cell.text.strip()
]
def _adjust_cluster_bboxes(self, clusters: list[Cluster]) -> list[Cluster]:
"""Adjust cluster bounding boxes to contain their cells."""
for cluster in clusters:
if not cluster.cells:
continue
cells_bbox = BoundingBox(
l=min(cell.rect.to_bounding_box().l for cell in cluster.cells),
t=min(cell.rect.to_bounding_box().t for cell in cluster.cells),
r=max(cell.rect.to_bounding_box().r for cell in cluster.cells),
b=max(cell.rect.to_bounding_box().b for cell in cluster.cells),
)
if cluster.label == DocItemLabel.TABLE:
# For tables, take union of current bbox and cells bbox
cluster.bbox = BoundingBox(
l=min(cluster.bbox.l, cells_bbox.l),
t=min(cluster.bbox.t, cells_bbox.t),
r=max(cluster.bbox.r, cells_bbox.r),
b=max(cluster.bbox.b, cells_bbox.b),
)
else:
cluster.bbox = cells_bbox
return clusters
def _sort_cells(self, cells: list[TextCell]) -> list[TextCell]:
"""Sort cells in native reading order."""
return sorted(cells, key=lambda c: (c.index))
def _sort_clusters(
self, clusters: list[Cluster], mode: str = "id"
) -> list[Cluster]:
"""Sort clusters in reading order (top-to-bottom, left-to-right)."""
if mode == "id": # sort in the order the cells are printed in the PDF.
return sorted(
clusters,
key=lambda cluster: (
(
min(cell.index for cell in cluster.cells)
if cluster.cells
else sys.maxsize
),
cluster.bbox.t,
cluster.bbox.l,
),
)
elif mode == "tblr": # Sort top-to-bottom, then left-to-right ("row first")
return sorted(
clusters, key=lambda cluster: (cluster.bbox.t, cluster.bbox.l)
)
elif mode == "lrtb": # Sort left-to-right, then top-to-bottom ("column first")
return sorted(
clusters, key=lambda cluster: (cluster.bbox.l, cluster.bbox.t)
)
else:
return clusters
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/utils/export.py | docling/utils/export.py | import logging
from collections.abc import Iterable
from typing import Any, Dict, List, Tuple, Union
from docling_core.types.doc import BoundingBox, CoordOrigin
from docling_core.types.legacy_doc.base import BaseCell, BaseText, Ref, Table
from docling.datamodel.document import ConversionResult, Page
_log = logging.getLogger(__name__)
def generate_multimodal_pages(
doc_result: ConversionResult,
) -> Iterable[Tuple[str, str, List[Dict[str, Any]], List[Dict[str, Any]], Page]]:
label_to_doclaynet = {
"title": "title",
"table-of-contents": "document_index",
"subtitle-level-1": "section_header",
"checkbox-selected": "checkbox_selected",
"checkbox-unselected": "checkbox_unselected",
"caption": "caption",
"page-header": "page_header",
"page-footer": "page_footer",
"footnote": "footnote",
"table": "table",
"formula": "formula",
"list-item": "list_item",
"code": "code",
"figure": "picture",
"picture": "picture",
"reference": "text",
"paragraph": "text",
"text": "text",
}
content_text = ""
page_no = 0
start_ix = 0
end_ix = 0
doc_items: List[Tuple[int, Union[BaseCell, BaseText]]] = []
doc = doc_result.legacy_document
def _process_page_segments(doc_items: list[Tuple[int, BaseCell]], page: Page):
segments = []
for ix, item in doc_items:
item_type = item.obj_type
label = label_to_doclaynet.get(item_type, None)
if label is None or item.prov is None or page.size is None:
continue
bbox = BoundingBox.from_tuple(
tuple(item.prov[0].bbox), origin=CoordOrigin.BOTTOMLEFT
)
new_bbox = bbox.to_top_left_origin(page_height=page.size.height).normalized(
page_size=page.size
)
new_segment = {
"index_in_doc": ix,
"label": label,
"text": item.text if item.text is not None else "",
"bbox": new_bbox.as_tuple(),
"data": [],
}
if isinstance(item, Table):
table_html = item.export_to_html()
new_segment["data"].append(
{
"html_seq": table_html,
"otsl_seq": "",
}
)
segments.append(new_segment)
return segments
def _process_page_cells(page: Page):
cells: List[dict] = []
if page.size is None:
return cells
for cell in page.cells:
new_bbox = (
cell.rect.to_bounding_box()
.to_top_left_origin(page_height=page.size.height)
.normalized(page_size=page.size)
)
is_ocr = cell.from_ocr
ocr_confidence = cell.confidence
cells.append(
{
"text": cell.text,
"bbox": new_bbox.as_tuple(),
"ocr": is_ocr,
"ocr_confidence": ocr_confidence,
}
)
return cells
def _process_page():
page_ix = page_no - 1
page = doc_result.pages[page_ix]
page_cells = _process_page_cells(page=page)
page_segments = _process_page_segments(doc_items=doc_items, page=page)
content_md = doc.export_to_markdown(
main_text_start=start_ix, main_text_stop=end_ix
)
# No page-tagging since we only do 1 page at the time
content_dt = doc.export_to_document_tokens(
main_text_start=start_ix, main_text_stop=end_ix, add_page_index=False
)
return content_text, content_md, content_dt, page_cells, page_segments, page
if doc.main_text is None:
return
for ix, orig_item in enumerate(doc.main_text):
item = doc._resolve_ref(orig_item) if isinstance(orig_item, Ref) else orig_item
if item is None or item.prov is None or len(item.prov) == 0:
_log.debug(f"Skipping item {orig_item}")
continue
item_page = item.prov[0].page
# Page is complete
if page_no > 0 and item_page > page_no:
yield _process_page()
start_ix = ix
doc_items = []
content_text = ""
page_no = item_page
end_ix = ix
doc_items.append((ix, item))
if item.text is not None and item.text != "":
content_text += item.text + " "
if len(doc_items) > 0:
yield _process_page()
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/utils/accelerator_utils.py | docling/utils/accelerator_utils.py | import logging
from typing import List, Optional
from docling.datamodel.accelerator_options import AcceleratorDevice
_log = logging.getLogger(__name__)
def decide_device(
accelerator_device: str, supported_devices: Optional[List[AcceleratorDevice]] = None
) -> str:
r"""
Resolve the device based on the acceleration options and the available devices in the system.
Rules:
1. AUTO: Check for the best available device on the system.
2. User-defined: Check if the device actually exists, otherwise fall-back to CPU
"""
import torch
device = "cpu"
has_cuda = torch.backends.cuda.is_built() and torch.cuda.is_available()
has_mps = torch.backends.mps.is_built() and torch.backends.mps.is_available()
if supported_devices is not None:
if has_cuda and AcceleratorDevice.CUDA not in supported_devices:
_log.info(
f"Removing CUDA from available devices because it is not in {supported_devices=}"
)
has_cuda = False
if has_mps and AcceleratorDevice.MPS not in supported_devices:
_log.info(
f"Removing MPS from available devices because it is not in {supported_devices=}"
)
has_mps = False
if accelerator_device == AcceleratorDevice.AUTO.value: # Handle 'auto'
if has_cuda:
device = "cuda:0"
elif has_mps:
device = "mps"
elif accelerator_device.startswith("cuda"):
if has_cuda:
# if cuda device index specified extract device id
parts = accelerator_device.split(":")
if len(parts) == 2 and parts[1].isdigit():
# select cuda device's id
cuda_index = int(parts[1])
if cuda_index < torch.cuda.device_count():
device = f"cuda:{cuda_index}"
else:
_log.warning(
"CUDA device 'cuda:%d' is not available. Fall back to 'CPU'.",
cuda_index,
)
elif len(parts) == 1: # just "cuda"
device = "cuda:0"
else:
_log.warning(
"Invalid CUDA device format '%s'. Fall back to 'CPU'",
accelerator_device,
)
else:
_log.warning("CUDA is not available in the system. Fall back to 'CPU'")
elif accelerator_device == AcceleratorDevice.MPS.value:
if has_mps:
device = "mps"
else:
_log.warning("MPS is not available in the system. Fall back to 'CPU'")
elif accelerator_device == AcceleratorDevice.CPU.value:
device = "cpu"
else:
_log.warning(
"Unknown device option '%s'. Fall back to 'CPU'", accelerator_device
)
_log.info("Accelerator device: '%s'", device)
return device
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/utils/glm_utils.py | docling/utils/glm_utils.py | import re
from pathlib import Path
from typing import List
import pandas as pd
from docling_core.types.doc import (
BoundingBox,
CoordOrigin,
DocItemLabel,
DoclingDocument,
DocumentOrigin,
GroupLabel,
ProvenanceItem,
Size,
TableCell,
TableData,
)
from docling_core.types.doc.document import ContentLayer
def resolve_item(paths, obj):
"""Find item in document from a reference path"""
if len(paths) == 0:
return obj
if paths[0] == "#":
return resolve_item(paths[1:], obj)
try:
key = int(paths[0])
except Exception:
key = paths[0]
if len(paths) == 1:
if isinstance(key, str) and key in obj:
return obj[key]
elif isinstance(key, int) and key < len(obj):
return obj[key]
else:
return None
elif len(paths) > 1:
if isinstance(key, str) and key in obj:
return resolve_item(paths[1:], obj[key])
elif isinstance(key, int) and key < len(obj):
return resolve_item(paths[1:], obj[key])
else:
return None
else:
return None
def _flatten_table_grid(grid: List[List[dict]]) -> List[dict]:
unique_objects = []
seen_spans = set()
for sublist in grid:
for obj in sublist:
# Convert the spans list to a tuple of tuples for hashing
spans_tuple = tuple(tuple(span) for span in obj["spans"])
if spans_tuple not in seen_spans:
seen_spans.add(spans_tuple)
unique_objects.append(obj)
return unique_objects
def to_docling_document(doc_glm, update_name_label=False) -> DoclingDocument: # noqa: C901
origin = DocumentOrigin(
mimetype="application/pdf",
filename=doc_glm["file-info"]["filename"],
binary_hash=doc_glm["file-info"]["document-hash"],
)
doc_name = Path(origin.filename).stem
doc: DoclingDocument = DoclingDocument(name=doc_name, origin=origin)
for page_dim in doc_glm["page-dimensions"]:
page_no = int(page_dim["page"])
size = Size(width=page_dim["width"], height=page_dim["height"])
doc.add_page(page_no=page_no, size=size)
if "properties" in doc_glm:
props = pd.DataFrame(
doc_glm["properties"]["data"], columns=doc_glm["properties"]["headers"]
)
else:
props = pd.DataFrame()
current_list = None
for ix, pelem in enumerate(doc_glm["page-elements"]):
ptype = pelem["type"]
span_i = pelem["span"][0]
span_j = pelem["span"][1]
if "iref" not in pelem:
# print(json.dumps(pelem, indent=2))
continue
iref = pelem["iref"]
if re.match("#/figures/(\\d+)/captions/(.+)", iref):
# print(f"skip {iref}")
continue
if re.match("#/tables/(\\d+)/captions/(.+)", iref):
# print(f"skip {iref}")
continue
path = iref.split("/")
obj = resolve_item(path, doc_glm)
if obj is None:
current_list = None
print(f"warning: undefined {path}")
continue
if ptype == "figure":
current_list = None
text = ""
caption_refs = []
for caption in obj["captions"]:
text += caption["text"]
for nprov in caption["prov"]:
npaths = nprov["$ref"].split("/")
nelem = resolve_item(npaths, doc_glm)
if nelem is None:
# print(f"warning: undefined caption {npaths}")
continue
span_i = nelem["span"][0]
span_j = nelem["span"][1]
cap_text = caption["text"][span_i:span_j]
# doc_glm["page-elements"].remove(nelem)
prov = ProvenanceItem(
page_no=nelem["page"],
charspan=tuple(nelem["span"]),
bbox=BoundingBox.from_tuple(
nelem["bbox"], origin=CoordOrigin.BOTTOMLEFT
),
)
caption_obj = doc.add_text(
label=DocItemLabel.CAPTION, text=cap_text, prov=prov
)
caption_refs.append(caption_obj.get_ref())
prov = ProvenanceItem(
page_no=pelem["page"],
charspan=(0, len(text)),
bbox=BoundingBox.from_tuple(
pelem["bbox"], origin=CoordOrigin.BOTTOMLEFT
),
)
pic = doc.add_picture(prov=prov)
pic.captions.extend(caption_refs)
_add_child_elements(pic, doc, obj, pelem)
elif ptype == "table":
current_list = None
text = ""
caption_refs = []
item_label = DocItemLabel(pelem["name"])
for caption in obj["captions"]:
text += caption["text"]
for nprov in caption["prov"]:
npaths = nprov["$ref"].split("/")
nelem = resolve_item(npaths, doc_glm)
if nelem is None:
# print(f"warning: undefined caption {npaths}")
continue
span_i = nelem["span"][0]
span_j = nelem["span"][1]
cap_text = caption["text"][span_i:span_j]
# doc_glm["page-elements"].remove(nelem)
prov = ProvenanceItem(
page_no=nelem["page"],
charspan=tuple(nelem["span"]),
bbox=BoundingBox.from_tuple(
nelem["bbox"], origin=CoordOrigin.BOTTOMLEFT
),
)
caption_obj = doc.add_text(
label=DocItemLabel.CAPTION, text=cap_text, prov=prov
)
caption_refs.append(caption_obj.get_ref())
table_cells_glm = _flatten_table_grid(obj["data"])
table_cells = []
for tbl_cell_glm in table_cells_glm:
if tbl_cell_glm["bbox"] is not None:
bbox = BoundingBox.from_tuple(
tbl_cell_glm["bbox"], origin=CoordOrigin.BOTTOMLEFT
)
else:
bbox = None
is_col_header = False
is_row_header = False
is_row_section = False
if tbl_cell_glm["type"] == "col_header":
is_col_header = True
elif tbl_cell_glm["type"] == "row_header":
is_row_header = True
elif tbl_cell_glm["type"] == "row_section":
is_row_section = True
table_cells.append(
TableCell(
row_span=tbl_cell_glm["row-span"][1]
- tbl_cell_glm["row-span"][0],
col_span=tbl_cell_glm["col-span"][1]
- tbl_cell_glm["col-span"][0],
start_row_offset_idx=tbl_cell_glm["row-span"][0],
end_row_offset_idx=tbl_cell_glm["row-span"][1],
start_col_offset_idx=tbl_cell_glm["col-span"][0],
end_col_offset_idx=tbl_cell_glm["col-span"][1],
text=tbl_cell_glm["text"],
bbox=bbox,
column_header=is_col_header,
row_header=is_row_header,
row_section=is_row_section,
)
)
tbl_data = TableData(
num_rows=obj.get("#-rows", 0),
num_cols=obj.get("#-cols", 0),
table_cells=table_cells,
)
prov = ProvenanceItem(
page_no=pelem["page"],
charspan=(0, 0),
bbox=BoundingBox.from_tuple(
pelem["bbox"], origin=CoordOrigin.BOTTOMLEFT
),
)
tbl = doc.add_table(data=tbl_data, prov=prov, label=item_label)
tbl.captions.extend(caption_refs)
elif ptype in [DocItemLabel.FORM.value, DocItemLabel.KEY_VALUE_REGION.value]:
label = DocItemLabel(ptype)
group_label = GroupLabel.UNSPECIFIED
if label == DocItemLabel.FORM:
group_label = GroupLabel.FORM_AREA
elif label == DocItemLabel.KEY_VALUE_REGION:
group_label = GroupLabel.KEY_VALUE_AREA
container_el = doc.add_group(label=group_label)
_add_child_elements(container_el, doc, obj, pelem)
elif "text" in obj:
text = obj["text"][span_i:span_j]
type_label = pelem["type"]
name_label = pelem["name"]
if update_name_label and len(props) > 0 and type_label == "paragraph":
prop = props[
(props["type"] == "semantic") & (props["subj_path"] == iref)
]
if len(prop) == 1 and prop.iloc[0]["confidence"] > 0.85:
name_label = prop.iloc[0]["label"]
prov = ProvenanceItem(
page_no=pelem["page"],
charspan=(0, len(text)),
bbox=BoundingBox.from_tuple(
pelem["bbox"], origin=CoordOrigin.BOTTOMLEFT
),
)
label = DocItemLabel(name_label)
if label == DocItemLabel.LIST_ITEM:
if current_list is None:
current_list = doc.add_group(label=GroupLabel.LIST, name="list")
# TODO: Infer if this is a numbered or a bullet list item
doc.add_list_item(
text=text, enumerated=False, prov=prov, parent=current_list
)
elif label == DocItemLabel.SECTION_HEADER:
current_list = None
doc.add_heading(text=text, prov=prov)
elif label == DocItemLabel.CODE:
current_list = None
doc.add_code(text=text, prov=prov)
elif label == DocItemLabel.FORMULA:
current_list = None
doc.add_text(label=DocItemLabel.FORMULA, text="", orig=text, prov=prov)
elif label in [DocItemLabel.PAGE_HEADER, DocItemLabel.PAGE_FOOTER]:
current_list = None
doc.add_text(
label=DocItemLabel(name_label),
text=text,
prov=prov,
content_layer=ContentLayer.FURNITURE,
)
else:
current_list = None
doc.add_text(label=DocItemLabel(name_label), text=text, prov=prov)
return doc
def _add_child_elements(container_el, doc, obj, pelem):
payload = obj.get("payload")
if payload is not None:
children = payload.get("children", [])
for child in children:
c_label = DocItemLabel(child["label"])
c_bbox = BoundingBox.model_validate(child["bbox"]).to_bottom_left_origin(
doc.pages[pelem["page"]].size.height
)
c_text = " ".join(
[
cell["text"].replace("\x02", "-").strip()
for cell in child["cells"]
if len(cell["text"].strip()) > 0
]
)
c_prov = ProvenanceItem(
page_no=pelem["page"], charspan=(0, len(c_text)), bbox=c_bbox
)
if c_label == DocItemLabel.LIST_ITEM:
# TODO: Infer if this is a numbered or a bullet list item
doc.add_list_item(parent=container_el, text=c_text, prov=c_prov)
elif c_label == DocItemLabel.SECTION_HEADER:
doc.add_heading(parent=container_el, text=c_text, prov=c_prov)
else:
doc.add_text(
parent=container_el, label=c_label, text=c_text, prov=c_prov
)
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/utils/ocr_utils.py | docling/utils/ocr_utils.py | from typing import Optional, Tuple
from docling_core.types.doc import BoundingBox, CoordOrigin
from docling_core.types.doc.page import BoundingRectangle
from docling.utils.orientation import CLIPPED_ORIENTATIONS, rotate_bounding_box
def map_tesseract_script(script: str) -> str:
r""" """
if script == "Katakana" or script == "Hiragana":
script = "Japanese"
elif script == "Han":
script = "HanS"
elif script == "Korean":
script = "Hangul"
return script
def parse_tesseract_orientation(orientation: str) -> int:
# Tesseract orientation is [0, 90, 180, 270] clockwise, bounding rectangle angles
# are [0, 360[ counterclockwise
parsed = int(orientation)
if parsed not in CLIPPED_ORIENTATIONS:
msg = (
f"invalid tesseract document orientation {orientation}, "
f"expected orientation: {sorted(CLIPPED_ORIENTATIONS)}"
)
raise ValueError(msg)
parsed = -parsed
parsed %= 360
return parsed
def tesseract_box_to_bounding_rectangle(
bbox: BoundingBox,
*,
original_offset: Optional[BoundingBox] = None,
scale: float,
orientation: int,
im_size: Tuple[int, int],
) -> BoundingRectangle:
# box is in the top, left, height, width format, top left coordinates
rect = rotate_bounding_box(bbox, angle=orientation, im_size=im_size)
rect = BoundingRectangle(
r_x0=rect.r_x0 / scale,
r_y0=rect.r_y0 / scale,
r_x1=rect.r_x1 / scale,
r_y1=rect.r_y1 / scale,
r_x2=rect.r_x2 / scale,
r_y2=rect.r_y2 / scale,
r_x3=rect.r_x3 / scale,
r_y3=rect.r_y3 / scale,
coord_origin=CoordOrigin.TOPLEFT,
)
if original_offset is not None:
if original_offset.coord_origin is not CoordOrigin.TOPLEFT:
msg = f"expected coordinate origin to be {CoordOrigin.TOPLEFT.value}"
raise ValueError(msg)
if original_offset is not None:
rect.r_x0 += original_offset.l
rect.r_x1 += original_offset.l
rect.r_x2 += original_offset.l
rect.r_x3 += original_offset.l
rect.r_y0 += original_offset.t
rect.r_y1 += original_offset.t
rect.r_y2 += original_offset.t
rect.r_y3 += original_offset.t
return rect
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/utils/utils.py | docling/utils/utils.py | import hashlib
from io import BytesIO
from itertools import islice
from pathlib import Path
from typing import List, Union
import requests
from tqdm import tqdm
def chunkify(iterator, chunk_size):
"""Yield successive chunks of chunk_size from the iterable."""
if isinstance(iterator, List):
iterator = iter(iterator)
for first in iterator: # Take the first element from the iterator
yield [first, *list(islice(iterator, chunk_size - 1))]
def create_file_hash(path_or_stream: Union[BytesIO, Path]) -> str:
"""Create a stable page_hash of the path_or_stream of a file"""
block_size = 65536
hasher = hashlib.sha256(usedforsecurity=False)
def _hash_buf(binary_stream):
buf = binary_stream.read(block_size) # read and page_hash in chunks
while len(buf) > 0:
hasher.update(buf)
buf = binary_stream.read(block_size)
if isinstance(path_or_stream, Path):
with path_or_stream.open("rb") as afile:
_hash_buf(afile)
elif isinstance(path_or_stream, BytesIO):
_hash_buf(path_or_stream)
return hasher.hexdigest()
def create_hash(string: str):
hasher = hashlib.sha256(usedforsecurity=False)
hasher.update(string.encode("utf-8"))
return hasher.hexdigest()
def download_url_with_progress(url: str, progress: bool = False) -> BytesIO:
buf = BytesIO()
with requests.get(url, stream=True, allow_redirects=True) as response:
total_size = int(response.headers.get("content-length", 0))
progress_bar = tqdm(
total=total_size,
unit="B",
unit_scale=True,
unit_divisor=1024,
disable=(not progress),
)
for chunk in response.iter_content(10 * 1024):
buf.write(chunk)
progress_bar.update(len(chunk))
progress_bar.close()
buf.seek(0)
return buf
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/utils/model_downloader.py | docling/utils/model_downloader.py | import logging
from pathlib import Path
from typing import Optional
from docling.datamodel.layout_model_specs import DOCLING_LAYOUT_V2
from docling.datamodel.pipeline_options import (
LayoutOptions,
granite_picture_description,
smolvlm_picture_description,
)
from docling.datamodel.settings import settings
from docling.datamodel.vlm_model_specs import (
GRANITEDOCLING_MLX,
GRANITEDOCLING_TRANSFORMERS,
SMOLDOCLING_MLX,
SMOLDOCLING_TRANSFORMERS,
)
from docling.models.code_formula_model import CodeFormulaModel
from docling.models.document_picture_classifier import DocumentPictureClassifier
from docling.models.easyocr_model import EasyOcrModel
from docling.models.layout_model import LayoutModel
from docling.models.picture_description_vlm_model import PictureDescriptionVlmModel
from docling.models.rapid_ocr_model import RapidOcrModel
from docling.models.table_structure_model import TableStructureModel
from docling.models.utils.hf_model_download import download_hf_model
_log = logging.getLogger(__name__)
def download_models(
output_dir: Optional[Path] = None,
*,
force: bool = False,
progress: bool = False,
with_layout: bool = True,
with_tableformer: bool = True,
with_code_formula: bool = True,
with_picture_classifier: bool = True,
with_smolvlm: bool = False,
with_granitedocling: bool = False,
with_granitedocling_mlx: bool = False,
with_smoldocling: bool = False,
with_smoldocling_mlx: bool = False,
with_granite_vision: bool = False,
with_rapidocr: bool = True,
with_easyocr: bool = False,
):
if output_dir is None:
output_dir = settings.cache_dir / "models"
# Make sure the folder exists
output_dir.mkdir(exist_ok=True, parents=True)
if with_layout:
_log.info("Downloading layout model...")
LayoutModel.download_models(
local_dir=output_dir / LayoutOptions().model_spec.model_repo_folder,
force=force,
progress=progress,
)
if with_tableformer:
_log.info("Downloading tableformer model...")
TableStructureModel.download_models(
local_dir=output_dir / TableStructureModel._model_repo_folder,
force=force,
progress=progress,
)
if with_picture_classifier:
_log.info("Downloading picture classifier model...")
DocumentPictureClassifier.download_models(
local_dir=output_dir / DocumentPictureClassifier._model_repo_folder,
force=force,
progress=progress,
)
if with_code_formula:
_log.info("Downloading code formula model...")
CodeFormulaModel.download_models(
local_dir=output_dir / CodeFormulaModel._model_repo_folder,
force=force,
progress=progress,
)
if with_smolvlm:
_log.info("Downloading SmolVlm model...")
download_hf_model(
repo_id=smolvlm_picture_description.repo_id,
local_dir=output_dir / smolvlm_picture_description.repo_cache_folder,
force=force,
progress=progress,
)
if with_granitedocling:
_log.info("Downloading GraniteDocling model...")
download_hf_model(
repo_id=GRANITEDOCLING_TRANSFORMERS.repo_id,
local_dir=output_dir / GRANITEDOCLING_TRANSFORMERS.repo_cache_folder,
force=force,
progress=progress,
)
if with_granitedocling_mlx:
_log.info("Downloading GraniteDocling MLX model...")
download_hf_model(
repo_id=GRANITEDOCLING_MLX.repo_id,
local_dir=output_dir / GRANITEDOCLING_MLX.repo_cache_folder,
force=force,
progress=progress,
)
if with_smoldocling:
_log.info("Downloading SmolDocling model...")
download_hf_model(
repo_id=SMOLDOCLING_TRANSFORMERS.repo_id,
local_dir=output_dir / SMOLDOCLING_TRANSFORMERS.repo_cache_folder,
force=force,
progress=progress,
)
if with_smoldocling_mlx:
_log.info("Downloading SmolDocling MLX model...")
download_hf_model(
repo_id=SMOLDOCLING_MLX.repo_id,
local_dir=output_dir / SMOLDOCLING_MLX.repo_cache_folder,
force=force,
progress=progress,
)
if with_granite_vision:
_log.info("Downloading Granite Vision model...")
download_hf_model(
repo_id=granite_picture_description.repo_id,
local_dir=output_dir / granite_picture_description.repo_cache_folder,
force=force,
progress=progress,
)
if with_rapidocr:
for backend in ("torch", "onnxruntime"):
_log.info(f"Downloading rapidocr {backend} models...")
RapidOcrModel.download_models(
backend=backend,
local_dir=output_dir / RapidOcrModel._model_repo_folder,
force=force,
progress=progress,
)
if with_easyocr:
_log.info("Downloading easyocr models...")
EasyOcrModel.download_models(
local_dir=output_dir / EasyOcrModel._model_repo_folder,
force=force,
progress=progress,
)
return output_dir
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/utils/__init__.py | docling/utils/__init__.py | python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false | |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/utils/locks.py | docling/utils/locks.py | import threading
pypdfium2_lock = threading.Lock()
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/utils/profiling.py | docling/utils/profiling.py | import time
from datetime import datetime
from enum import Enum
from typing import TYPE_CHECKING, List
import numpy as np
from pydantic import BaseModel
from docling.datamodel.settings import settings
if TYPE_CHECKING:
from docling.datamodel.document import ConversionResult
class ProfilingScope(str, Enum):
PAGE = "page"
DOCUMENT = "document"
class ProfilingItem(BaseModel):
scope: ProfilingScope
count: int = 0
times: List[float] = []
start_timestamps: List[datetime] = []
def avg(self) -> float:
return np.average(self.times) # type: ignore
def std(self) -> float:
return np.std(self.times) # type: ignore
def mean(self) -> float:
return np.mean(self.times) # type: ignore
def percentile(self, perc: float) -> float:
return np.percentile(self.times, perc) # type: ignore
class TimeRecorder:
def __init__(
self,
conv_res: "ConversionResult",
key: str,
scope: ProfilingScope = ProfilingScope.PAGE,
):
if settings.debug.profile_pipeline_timings:
if key not in conv_res.timings.keys():
conv_res.timings[key] = ProfilingItem(scope=scope)
self.conv_res = conv_res
self.key = key
def __enter__(self):
if settings.debug.profile_pipeline_timings:
self.start = time.monotonic()
self.conv_res.timings[self.key].start_timestamps.append(datetime.utcnow())
return self
def __exit__(self, *args):
if settings.debug.profile_pipeline_timings:
elapsed = time.monotonic() - self.start
self.conv_res.timings[self.key].times.append(elapsed)
self.conv_res.timings[self.key].count += 1
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/utils/orientation.py | docling/utils/orientation.py | from typing import Tuple
from docling_core.types.doc import BoundingBox, CoordOrigin
from docling_core.types.doc.page import BoundingRectangle
CLIPPED_ORIENTATIONS = [0, 90, 180, 270]
def rotate_bounding_box(
bbox: BoundingBox, angle: int, im_size: Tuple[int, int]
) -> BoundingRectangle:
# The box is left top width height in TOPLEFT coordinates
# Bounding rectangle start with r_0 at the bottom left whatever the
# coordinate system. Then other corners are found rotating counterclockwise
bbox = bbox.to_top_left_origin(im_size[1])
left, top, width, height = bbox.l, bbox.t, bbox.width, bbox.height
im_w, im_h = im_size
angle = angle % 360
if angle == 0:
return BoundingRectangle.from_bounding_box(bbox)
elif angle == 90:
r_x0 = top + height
r_y0 = im_w - left
r_x1 = r_x0
r_y1 = r_y0 - width
r_x2 = r_x1 - height
r_y2 = r_y1
r_x3 = r_x2
r_y3 = r_y0
elif angle == 180:
r_x0 = im_w - left
r_y0 = im_h - (top + height)
r_x1 = r_x0 - width
r_y1 = r_y0
r_x2 = r_x1
r_y2 = r_y1 + height
r_x3 = r_x0
r_y3 = r_y2
elif angle == 270:
r_x0 = im_h - (top + height)
r_y0 = left
r_x1 = r_x0
r_y1 = r_y0 + width
r_x2 = r_x1 + height
r_y2 = r_y1
r_x3 = r_x2
r_y3 = r_y0
else:
msg = (
f"invalid orientation {angle}, expected values in:"
f" {sorted(CLIPPED_ORIENTATIONS)}"
)
raise ValueError(msg)
rectangle = BoundingRectangle(
r_x0=r_x0,
r_y0=r_y0,
r_x1=r_x1,
r_y1=r_y1,
r_x2=r_x2,
r_y2=r_y2,
r_x3=r_x3,
r_y3=r_y3,
coord_origin=CoordOrigin.TOPLEFT,
)
return rectangle
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/utils/visualization.py | docling/utils/visualization.py | from docling_core.types.doc import DocItemLabel
from PIL import Image, ImageDraw, ImageFont
from PIL.ImageFont import FreeTypeFont
from docling.datamodel.base_models import Cluster
def draw_clusters(
image: Image.Image, clusters: list[Cluster], scale_x: float, scale_y: float
) -> None:
"""
Draw clusters on an image
"""
draw = ImageDraw.Draw(image, "RGBA")
# Create a smaller font for the labels
font: ImageFont.ImageFont | FreeTypeFont
try:
font = ImageFont.truetype("arial.ttf", 12)
except OSError:
# Fallback to default font if arial is not available
font = ImageFont.load_default()
for c_tl in clusters:
all_clusters = [c_tl, *c_tl.children]
for c in all_clusters:
# Draw cells first (underneath)
cell_color = (0, 0, 0, 40) # Transparent black for cells
for tc in c.cells:
cx0, cy0, cx1, cy1 = tc.rect.to_bounding_box().as_tuple()
cx0 *= scale_x
cx1 *= scale_x
cy0 *= scale_x
cy1 *= scale_y
draw.rectangle(
[(cx0, cy0), (cx1, cy1)],
outline=None,
fill=cell_color,
)
# Draw cluster rectangle
x0, y0, x1, y1 = c.bbox.as_tuple()
x0 *= scale_x
x1 *= scale_x
y0 *= scale_x
y1 *= scale_y
if y1 <= y0:
y1, y0 = y0, y1
if x1 <= x0:
x1, x0 = x0, x1
cluster_fill_color = (*list(DocItemLabel.get_color(c.label)), 70)
cluster_outline_color = (
*list(DocItemLabel.get_color(c.label)),
255,
)
draw.rectangle(
[(x0, y0), (x1, y1)],
outline=cluster_outline_color,
fill=cluster_fill_color,
)
# Add label name and confidence
label_text = f"{c.label.name} ({c.confidence:.2f})"
# Create semi-transparent background for text
text_bbox = draw.textbbox((x0, y0), label_text, font=font)
text_bg_padding = 2
draw.rectangle(
[
(
text_bbox[0] - text_bg_padding,
text_bbox[1] - text_bg_padding,
),
(
text_bbox[2] + text_bg_padding,
text_bbox[3] + text_bg_padding,
),
],
fill=(255, 255, 255, 180), # Semi-transparent white
)
# Draw text
draw.text(
(x0, y0),
label_text,
fill=(0, 0, 0, 255), # Solid black
font=font,
)
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/backend/image_backend.py | docling/backend/image_backend.py | import logging
from io import BytesIO
from pathlib import Path
from typing import Iterable, List, Optional, Union
from docling_core.types.doc import BoundingBox, CoordOrigin
from docling_core.types.doc.page import (
BoundingRectangle,
PdfPageBoundaryType,
PdfPageGeometry,
SegmentedPdfPage,
TextCell,
)
from PIL import Image
from docling.backend.abstract_backend import AbstractDocumentBackend
from docling.backend.pdf_backend import PdfDocumentBackend, PdfPageBackend
from docling.datamodel.backend_options import PdfBackendOptions
from docling.datamodel.base_models import InputFormat, Size
from docling.datamodel.document import InputDocument
_log = logging.getLogger(__name__)
class _ImagePageBackend(PdfPageBackend):
def __init__(self, image: Image.Image):
self._image: Optional[Image.Image] = image
self.valid: bool = self._image is not None
def is_valid(self) -> bool:
return self.valid
def get_text_in_rect(self, bbox: BoundingBox) -> str:
# No text extraction from raw images without OCR
return ""
def get_segmented_page(self) -> SegmentedPdfPage:
# Return empty segmented page with proper dimensions for raw images
assert self._image is not None
page_size = self.get_size()
bbox = BoundingBox(
l=0.0,
t=0.0,
r=float(page_size.width),
b=float(page_size.height),
coord_origin=CoordOrigin.BOTTOMLEFT,
)
dimension = PdfPageGeometry(
angle=0.0,
rect=BoundingRectangle.from_bounding_box(bbox),
boundary_type=PdfPageBoundaryType.CROP_BOX,
art_bbox=bbox,
bleed_bbox=bbox,
crop_bbox=bbox,
media_bbox=bbox,
trim_bbox=bbox,
)
return SegmentedPdfPage(
dimension=dimension,
char_cells=[],
word_cells=[],
textline_cells=[],
has_chars=False,
has_words=False,
has_lines=False,
)
def get_text_cells(self) -> Iterable[TextCell]:
# No text cells on raw images
return []
def get_bitmap_rects(self, scale: float = 1) -> Iterable[BoundingBox]:
# For raw images, the entire page is a bitmap
assert self._image is not None
page_size = self.get_size()
full_page_bbox = BoundingBox(
l=0.0,
t=0.0,
r=float(page_size.width),
b=float(page_size.height),
coord_origin=CoordOrigin.TOPLEFT,
)
if scale != 1:
full_page_bbox = full_page_bbox.scaled(scale=scale)
yield full_page_bbox
def get_page_image(
self, scale: float = 1, cropbox: Optional[BoundingBox] = None
) -> Image.Image:
assert self._image is not None
img = self._image
if cropbox is not None:
# Expected cropbox comes in TOPLEFT coords in our pipeline
if cropbox.coord_origin != CoordOrigin.TOPLEFT:
# Convert to TOPLEFT relative to current image height
cropbox = cropbox.to_top_left_origin(img.height)
left, top, right, bottom = cropbox.as_tuple()
left = max(0, round(left))
top = max(0, round(top))
right = min(img.width, round(right))
bottom = min(img.height, round(bottom))
img = img.crop((left, top, right, bottom))
if scale != 1:
new_w = max(1, round(img.width * scale))
new_h = max(1, round(img.height * scale))
img = img.resize((new_w, new_h))
return img
def get_size(self) -> Size:
assert self._image is not None
return Size(width=self._image.width, height=self._image.height)
def unload(self):
# Help GC and free memory
self._image = None
class ImageDocumentBackend(PdfDocumentBackend):
"""Image-native backend that bypasses pypdfium2.
Notes:
- Subclasses PdfDocumentBackend to satisfy pipeline type checks.
- Intentionally avoids calling PdfDocumentBackend.__init__ to skip
the image→PDF conversion and any pypdfium2 usage.
- Handles multi-page TIFF by extracting frames eagerly to separate
Image objects to keep thread-safety when pages process in parallel.
"""
def __init__(
self,
in_doc: InputDocument,
path_or_stream: Union[BytesIO, Path],
options: PdfBackendOptions = PdfBackendOptions(),
):
# Bypass PdfDocumentBackend.__init__ to avoid image→PDF conversion
AbstractDocumentBackend.__init__(self, in_doc, path_or_stream, options)
self.options: PdfBackendOptions = options
if self.input_format not in {InputFormat.IMAGE}:
raise RuntimeError(
f"Incompatible file format {self.input_format} was passed to ImageDocumentBackend."
)
# Load frames eagerly for thread-safety across pages
self._frames: List[Image.Image] = []
try:
img = Image.open(self.path_or_stream) # type: ignore[arg-type]
# Handle multi-frame and single-frame images
# - multiframe formats: TIFF, GIF, ICO
# - singleframe formats: JPEG (.jpg, .jpeg), PNG (.png), BMP, WEBP (unless animated), HEIC
frame_count = getattr(img, "n_frames", 1)
if frame_count > 1:
for i in range(frame_count):
img.seek(i)
self._frames.append(img.copy().convert("RGB"))
else:
self._frames.append(img.convert("RGB"))
except Exception as e:
raise RuntimeError(f"Could not load image for document {self.file}") from e
def is_valid(self) -> bool:
return len(self._frames) > 0
def page_count(self) -> int:
return len(self._frames)
def load_page(self, page_no: int) -> _ImagePageBackend:
if not (0 <= page_no < len(self._frames)):
raise IndexError(f"Page index out of range: {page_no}")
return _ImagePageBackend(self._frames[page_no])
@classmethod
def supported_formats(cls) -> set[InputFormat]:
# Only IMAGE here; PDF handling remains in PDF-oriented backends
return {InputFormat.IMAGE}
@classmethod
def supports_pagination(cls) -> bool:
return True
def unload(self):
super().unload()
self._frames = []
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/backend/docling_parse_v4_backend.py | docling/backend/docling_parse_v4_backend.py | import logging
from collections.abc import Iterable
from io import BytesIO
from pathlib import Path
from typing import TYPE_CHECKING, Optional, Union
import pypdfium2 as pdfium
from docling_core.types.doc import BoundingBox, CoordOrigin
from docling_core.types.doc.page import SegmentedPdfPage, TextCell
from docling_parse.pdf_parser import DoclingPdfParser, PdfDocument
from PIL import Image
from pypdfium2 import PdfPage
from docling.backend.pdf_backend import PdfDocumentBackend, PdfPageBackend
from docling.datamodel.backend_options import PdfBackendOptions
from docling.datamodel.base_models import Size
from docling.utils.locks import pypdfium2_lock
if TYPE_CHECKING:
from docling.datamodel.document import InputDocument
_log = logging.getLogger(__name__)
class DoclingParseV4PageBackend(PdfPageBackend):
def __init__(
self,
*,
dp_doc: PdfDocument,
page_obj: PdfPage,
page_no: int,
create_words: bool = True,
create_textlines: bool = True,
keep_chars: bool = False,
keep_lines: bool = False,
keep_images: bool = True,
):
self._ppage = page_obj
self._dp_doc = dp_doc
self._page_no = page_no
self._create_words = create_words
self._create_textlines = create_textlines
self._keep_chars = keep_chars
self._keep_lines = keep_lines
self._keep_images = keep_images
self._dpage: Optional[SegmentedPdfPage] = None
self._unloaded = False
self.valid = (self._ppage is not None) and (self._dp_doc is not None)
def _ensure_parsed(self) -> None:
if self._dpage is not None:
return
seg_page = self._dp_doc.get_page(
self._page_no + 1,
keep_chars=self._keep_chars,
keep_lines=self._keep_lines,
keep_bitmaps=self._keep_images,
create_words=self._create_words,
create_textlines=self._create_textlines,
enforce_same_font=True,
)
# In Docling, all TextCell instances are expected with top-left origin.
[
tc.to_top_left_origin(seg_page.dimension.height)
for tc in seg_page.textline_cells
]
[tc.to_top_left_origin(seg_page.dimension.height) for tc in seg_page.char_cells]
[tc.to_top_left_origin(seg_page.dimension.height) for tc in seg_page.word_cells]
self._dpage = seg_page
def is_valid(self) -> bool:
return self.valid
def get_text_in_rect(self, bbox: BoundingBox) -> str:
self._ensure_parsed()
assert self._dpage is not None
# Find intersecting cells on the page
text_piece = ""
page_size = self.get_size()
scale = (
1 # FIX - Replace with param in get_text_in_rect across backends (optional)
)
for i, cell in enumerate(self._dpage.textline_cells):
cell_bbox = (
cell.rect.to_bounding_box()
.to_top_left_origin(page_height=page_size.height)
.scaled(scale)
)
overlap_frac = cell_bbox.intersection_over_self(bbox)
if overlap_frac > 0.5:
if len(text_piece) > 0:
text_piece += " "
text_piece += cell.text
return text_piece
def get_segmented_page(self) -> Optional[SegmentedPdfPage]:
self._ensure_parsed()
return self._dpage
def get_text_cells(self) -> Iterable[TextCell]:
self._ensure_parsed()
assert self._dpage is not None
return self._dpage.textline_cells
def get_bitmap_rects(self, scale: float = 1) -> Iterable[BoundingBox]:
self._ensure_parsed()
assert self._dpage is not None
AREA_THRESHOLD = 0 # 32 * 32
images = self._dpage.bitmap_resources
for img in images:
cropbox = img.rect.to_bounding_box().to_top_left_origin(
self.get_size().height
)
if cropbox.area() > AREA_THRESHOLD:
cropbox = cropbox.scaled(scale=scale)
yield cropbox
def get_page_image(
self, scale: float = 1, cropbox: Optional[BoundingBox] = None
) -> Image.Image:
page_size = self.get_size()
if not cropbox:
cropbox = BoundingBox(
l=0,
r=page_size.width,
t=0,
b=page_size.height,
coord_origin=CoordOrigin.TOPLEFT,
)
padbox = BoundingBox(
l=0, r=0, t=0, b=0, coord_origin=CoordOrigin.BOTTOMLEFT
)
else:
padbox = cropbox.to_bottom_left_origin(page_size.height).model_copy()
padbox.r = page_size.width - padbox.r
padbox.t = page_size.height - padbox.t
with pypdfium2_lock:
image = (
self._ppage.render(
scale=scale * 1.5,
rotation=0, # no additional rotation
crop=padbox.as_tuple(),
)
.to_pil()
.resize(
size=(round(cropbox.width * scale), round(cropbox.height * scale))
)
) # We resize the image from 1.5x the given scale to make it sharper.
return image
def get_size(self) -> Size:
with pypdfium2_lock:
return Size(width=self._ppage.get_width(), height=self._ppage.get_height())
# TODO: Take width and height from docling-parse.
# return Size(
# width=self._dpage.dimension.width,
# height=self._dpage.dimension.height,
# )
def unload(self):
if not self._unloaded and self._dp_doc is not None:
self._dp_doc.unload_pages((self._page_no + 1, self._page_no + 2))
self._unloaded = True
self._ppage = None
self._dpage = None
self._dp_doc = None
class DoclingParseV4DocumentBackend(PdfDocumentBackend):
def __init__(
self,
in_doc: "InputDocument",
path_or_stream: Union[BytesIO, Path],
options: PdfBackendOptions = PdfBackendOptions(),
):
super().__init__(in_doc, path_or_stream, options)
password = (
self.options.password.get_secret_value() if self.options.password else None
)
with pypdfium2_lock:
self._pdoc = pdfium.PdfDocument(self.path_or_stream, password=password)
self.parser = DoclingPdfParser(loglevel="fatal")
self.dp_doc: PdfDocument = self.parser.load(
path_or_stream=self.path_or_stream, password=password
)
success = self.dp_doc is not None
if not success:
raise RuntimeError(
f"docling-parse v4 could not load document {self.document_hash}."
)
def page_count(self) -> int:
# return len(self._pdoc) # To be replaced with docling-parse API
len_1 = len(self._pdoc)
len_2 = self.dp_doc.number_of_pages()
if len_1 != len_2:
_log.error(f"Inconsistent number of pages: {len_1}!={len_2}")
return len_2
def load_page(
self, page_no: int, create_words: bool = True, create_textlines: bool = True
) -> DoclingParseV4PageBackend:
with pypdfium2_lock:
ppage = self._pdoc[page_no]
return DoclingParseV4PageBackend(
dp_doc=self.dp_doc,
page_obj=ppage,
page_no=page_no,
create_words=create_words,
create_textlines=create_textlines,
)
def is_valid(self) -> bool:
return self.page_count() > 0
def unload(self):
super().unload()
# Unload docling-parse document first
if self.dp_doc is not None:
self.dp_doc.unload()
self.dp_doc = None
# Then close pypdfium2 document with proper locking
if self._pdoc is not None:
with pypdfium2_lock:
try:
self._pdoc.close()
except Exception:
# Ignore cleanup errors
pass
self._pdoc = None
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/backend/md_backend.py | docling/backend/md_backend.py | import logging
import re
import warnings
from copy import deepcopy
from enum import Enum
from html import unescape
from io import BytesIO
from pathlib import Path
from typing import Literal, Optional, Union, cast
import marko
import marko.element
import marko.inline
from docling_core.types.doc import (
ContentLayer,
DocItem,
DocItemLabel,
DoclingDocument,
DocumentOrigin,
ListItem,
NodeItem,
TableCell,
TableData,
TextItem,
)
from docling_core.types.doc.document import Formatting
from marko import Markdown
from pydantic import AnyUrl, BaseModel, Field, TypeAdapter
from typing_extensions import Annotated, override
from docling.backend.abstract_backend import (
DeclarativeDocumentBackend,
)
from docling.backend.html_backend import HTMLDocumentBackend
from docling.datamodel.backend_options import (
HTMLBackendOptions,
MarkdownBackendOptions,
)
from docling.datamodel.base_models import InputFormat
from docling.datamodel.document import InputDocument
_log = logging.getLogger(__name__)
_MARKER_BODY = "DOCLING_DOC_MD_HTML_EXPORT"
_START_MARKER = f"#_#_{_MARKER_BODY}_START_#_#"
_STOP_MARKER = f"#_#_{_MARKER_BODY}_STOP_#_#"
class _PendingCreationType(str, Enum):
"""CoordOrigin."""
HEADING = "heading"
LIST_ITEM = "list_item"
class _HeadingCreationPayload(BaseModel):
kind: Literal["heading"] = "heading"
level: int
class _ListItemCreationPayload(BaseModel):
kind: Literal["list_item"] = "list_item"
enumerated: bool
_CreationPayload = Annotated[
Union[
_HeadingCreationPayload,
_ListItemCreationPayload,
],
Field(discriminator="kind"),
]
class MarkdownDocumentBackend(DeclarativeDocumentBackend):
def _shorten_underscore_sequences(self, markdown_text: str, max_length: int = 10):
# This regex will match any sequence of underscores
pattern = r"_+"
def replace_match(match):
underscore_sequence = match.group(
0
) # Get the full match (sequence of underscores)
# Shorten the sequence if it exceeds max_length
if len(underscore_sequence) > max_length:
return "_" * max_length
else:
return underscore_sequence # Leave it unchanged if it is shorter or equal to max_length
# Use re.sub to replace long underscore sequences
shortened_text = re.sub(pattern, replace_match, markdown_text)
if len(shortened_text) != len(markdown_text):
warnings.warn("Detected potentially incorrect Markdown, correcting...")
return shortened_text
@override
def __init__(
self,
in_doc: InputDocument,
path_or_stream: Union[BytesIO, Path],
options: MarkdownBackendOptions = MarkdownBackendOptions(),
):
super().__init__(in_doc, path_or_stream, options)
_log.debug("Starting MarkdownDocumentBackend...")
# Markdown file:
self.path_or_stream = path_or_stream
self.valid = True
self.markdown = "" # To store original Markdown string
self.in_table = False
self.md_table_buffer: list[str] = []
self._html_blocks: int = 0
try:
if isinstance(self.path_or_stream, BytesIO):
text_stream = self.path_or_stream.getvalue().decode("utf-8")
# remove invalid sequences
# very long sequences of underscores will lead to unnecessary long processing times.
# In any proper Markdown files, underscores have to be escaped,
# otherwise they represent emphasis (bold or italic)
self.markdown = self._shorten_underscore_sequences(text_stream)
if isinstance(self.path_or_stream, Path):
with open(self.path_or_stream, encoding="utf-8") as f:
md_content = f.read()
# remove invalid sequences
# very long sequences of underscores will lead to unnecessary long processing times.
# In any proper Markdown files, underscores have to be escaped,
# otherwise they represent emphasis (bold or italic)
self.markdown = self._shorten_underscore_sequences(md_content)
self.valid = True
_log.debug(self.markdown)
except Exception as e:
raise RuntimeError(
f"Could not initialize MD backend for file with hash {self.document_hash}."
) from e
return
def _close_table(self, doc: DoclingDocument):
if self.in_table:
_log.debug("=== TABLE START ===")
for md_table_row in self.md_table_buffer:
_log.debug(md_table_row)
_log.debug("=== TABLE END ===")
tcells: list[TableCell] = []
result_table = []
for n, md_table_row in enumerate(self.md_table_buffer):
data = []
if n == 0:
header = [t.strip() for t in md_table_row.split("|")[1:-1]]
for value in header:
data.append(value)
result_table.append(data)
if n > 1:
values = [t.strip() for t in md_table_row.split("|")[1:-1]]
for value in values:
data.append(value)
result_table.append(data)
for trow_ind, trow in enumerate(result_table):
for tcol_ind, cellval in enumerate(trow):
row_span = (
1 # currently supporting just simple tables (without spans)
)
col_span = (
1 # currently supporting just simple tables (without spans)
)
icell = TableCell(
text=cellval.strip(),
row_span=row_span,
col_span=col_span,
start_row_offset_idx=trow_ind,
end_row_offset_idx=trow_ind + row_span,
start_col_offset_idx=tcol_ind,
end_col_offset_idx=tcol_ind + col_span,
column_header=trow_ind == 0,
row_header=False,
)
tcells.append(icell)
num_rows = len(result_table)
num_cols = len(result_table[0])
self.in_table = False
self.md_table_buffer = [] # clean table markdown buffer
# Initialize Docling TableData
table_data = TableData(
num_rows=num_rows, num_cols=num_cols, table_cells=tcells
)
# Populate
for tcell in tcells:
table_data.table_cells.append(tcell)
if len(tcells) > 0:
doc.add_table(data=table_data)
return
def _create_list_item(
self,
doc: DoclingDocument,
parent_item: Optional[NodeItem],
text: str,
enumerated: bool,
formatting: Optional[Formatting] = None,
hyperlink: Optional[Union[AnyUrl, Path]] = None,
):
item = doc.add_list_item(
text=text,
enumerated=enumerated,
parent=parent_item,
formatting=formatting,
hyperlink=hyperlink,
)
return item
def _create_heading_item(
self,
doc: DoclingDocument,
parent_item: Optional[NodeItem],
text: str,
level: int,
formatting: Optional[Formatting] = None,
hyperlink: Optional[Union[AnyUrl, Path]] = None,
):
if level == 1:
item = doc.add_title(
text=text,
parent=parent_item,
formatting=formatting,
hyperlink=hyperlink,
)
else:
item = doc.add_heading(
text=text,
level=level - 1,
parent=parent_item,
formatting=formatting,
hyperlink=hyperlink,
)
return item
def _iterate_elements( # noqa: C901
self,
*,
element: marko.element.Element,
depth: int,
doc: DoclingDocument,
visited: set[marko.element.Element],
creation_stack: list[
_CreationPayload
], # stack for lazy item creation triggered deep in marko's AST (on RawText)
list_ordered_flag_by_ref: dict[str, bool],
list_last_item_by_ref: dict[str, ListItem],
parent_item: Optional[NodeItem] = None,
formatting: Optional[Formatting] = None,
hyperlink: Optional[Union[AnyUrl, Path]] = None,
):
if element in visited:
return
# Iterates over all elements in the AST
# Check for different element types and process relevant details
if (
isinstance(element, marko.block.Heading)
or isinstance(element, marko.block.SetextHeading)
) and len(element.children) > 0:
self._close_table(doc)
_log.debug(
f" - Heading level {element.level}, content: {element.children[0].children}" # type: ignore
)
if len(element.children) > 1: # inline group will be created further down
parent_item = self._create_heading_item(
doc=doc,
parent_item=parent_item,
text="",
level=element.level,
formatting=formatting,
hyperlink=hyperlink,
)
else:
creation_stack.append(_HeadingCreationPayload(level=element.level))
elif isinstance(element, marko.block.List):
has_non_empty_list_items = False
for child in element.children:
if isinstance(child, marko.block.ListItem) and len(child.children) > 0:
has_non_empty_list_items = True
break
self._close_table(doc)
_log.debug(f" - List {'ordered' if element.ordered else 'unordered'}")
if has_non_empty_list_items:
parent_item = doc.add_list_group(name="list", parent=parent_item)
list_ordered_flag_by_ref[parent_item.self_ref] = element.ordered
elif (
isinstance(element, marko.block.ListItem)
and len(element.children) > 0
and isinstance((child := element.children[0]), marko.block.Paragraph)
and len(child.children) > 0
):
self._close_table(doc)
_log.debug(" - List item")
enumerated = (
list_ordered_flag_by_ref.get(parent_item.self_ref, False)
if parent_item
else False
)
non_list_children: list[marko.element.Element] = [
item
for item in child.children
if not isinstance(item, marko.block.ListItem)
]
if len(non_list_children) > 1: # inline group will be created further down
parent_ref: Optional[str] = (
parent_item.self_ref if parent_item else None
)
parent_item = self._create_list_item(
doc=doc,
parent_item=parent_item,
text="",
enumerated=enumerated,
formatting=formatting,
hyperlink=hyperlink,
)
if parent_ref:
list_last_item_by_ref[parent_ref] = cast(ListItem, parent_item)
else:
creation_stack.append(_ListItemCreationPayload(enumerated=enumerated))
elif isinstance(element, marko.inline.Image):
self._close_table(doc)
_log.debug(f" - Image with alt: {element.title}, url: {element.dest}")
fig_caption: Optional[TextItem] = None
if element.title is not None and element.title != "":
title = unescape(element.title)
fig_caption = doc.add_text(
label=DocItemLabel.CAPTION,
text=title,
formatting=formatting,
hyperlink=hyperlink,
)
doc.add_picture(parent=parent_item, caption=fig_caption)
elif isinstance(element, marko.inline.Emphasis):
_log.debug(f" - Emphasis: {element.children}")
formatting = deepcopy(formatting) if formatting else Formatting()
formatting.italic = True
elif isinstance(element, marko.inline.StrongEmphasis):
_log.debug(f" - StrongEmphasis: {element.children}")
formatting = deepcopy(formatting) if formatting else Formatting()
formatting.bold = True
elif isinstance(element, marko.inline.Link):
_log.debug(f" - Link: {element.children}")
hyperlink = TypeAdapter(Optional[Union[AnyUrl, Path]]).validate_python(
element.dest
)
elif isinstance(element, (marko.inline.RawText, marko.inline.Literal)):
_log.debug(f" - RawText/Literal: {element.children}")
snippet_text = (
element.children.strip() if isinstance(element.children, str) else ""
)
snippet_text = unescape(snippet_text)
# Detect start of the table:
if "|" in snippet_text or self.in_table:
# most likely part of the markdown table
self.in_table = True
if len(self.md_table_buffer) > 0:
self.md_table_buffer[len(self.md_table_buffer) - 1] += snippet_text
else:
self.md_table_buffer.append(snippet_text)
elif snippet_text:
self._close_table(doc)
if creation_stack:
while len(creation_stack) > 0:
to_create = creation_stack.pop()
if isinstance(to_create, _ListItemCreationPayload):
enumerated = (
list_ordered_flag_by_ref.get(
parent_item.self_ref, False
)
if parent_item
else False
)
parent_ref = parent_item.self_ref if parent_item else None
parent_item = self._create_list_item(
doc=doc,
parent_item=parent_item,
text=snippet_text,
enumerated=enumerated,
formatting=formatting,
hyperlink=hyperlink,
)
if parent_ref:
list_last_item_by_ref[parent_ref] = cast(
ListItem, parent_item
)
elif isinstance(to_create, _HeadingCreationPayload):
# not keeping as parent_item as logic for correctly tracking
# that not implemented yet (section components not captured
# as heading children in marko)
self._create_heading_item(
doc=doc,
parent_item=parent_item,
text=snippet_text,
level=to_create.level,
formatting=formatting,
hyperlink=hyperlink,
)
else:
doc.add_text(
label=DocItemLabel.TEXT,
parent=parent_item,
text=snippet_text,
formatting=formatting,
hyperlink=hyperlink,
)
elif isinstance(element, marko.inline.CodeSpan):
self._close_table(doc)
_log.debug(f" - Code Span: {element.children}")
snippet_text = str(element.children).strip()
doc.add_code(
parent=parent_item,
text=snippet_text,
formatting=formatting,
hyperlink=hyperlink,
)
elif (
isinstance(element, (marko.block.CodeBlock, marko.block.FencedCode))
and len(element.children) > 0
and isinstance((child := element.children[0]), marko.inline.RawText)
and len(snippet_text := (child.children.strip())) > 0
):
self._close_table(doc)
_log.debug(f" - Code Block: {element.children}")
doc.add_code(
parent=parent_item,
text=snippet_text,
formatting=formatting,
hyperlink=hyperlink,
)
elif isinstance(element, marko.inline.LineBreak):
if self.in_table:
_log.debug("Line break in a table")
self.md_table_buffer.append("")
elif isinstance(element, marko.block.HTMLBlock):
self._html_blocks += 1
self._close_table(doc)
_log.debug(f"HTML Block: {element}")
if (
len(element.body) > 0
): # If Marko doesn't return any content for HTML block, skip it
html_block = element.body.strip()
# wrap in markers to enable post-processing in convert()
text_to_add = f"{_START_MARKER}{html_block}{_STOP_MARKER}"
doc.add_code(
parent=parent_item,
text=text_to_add,
formatting=formatting,
hyperlink=hyperlink,
)
else:
if not isinstance(element, str):
self._close_table(doc)
_log.debug(f"Some other element: {element}")
if (
isinstance(element, (marko.block.Paragraph, marko.block.Heading))
and len(element.children) > 1
):
parent_item = doc.add_inline_group(parent=parent_item)
processed_block_types = (
marko.block.CodeBlock,
marko.block.FencedCode,
marko.inline.RawText,
)
# Iterate through the element's children (if any)
if hasattr(element, "children") and not isinstance(
element, processed_block_types
):
for child in element.children:
if (
isinstance(element, marko.block.ListItem)
and isinstance(child, marko.block.List)
and parent_item
and list_last_item_by_ref.get(parent_item.self_ref, None)
):
_log.debug(
f"walking into new List hanging from item of parent list {parent_item.self_ref}"
)
parent_item = list_last_item_by_ref[parent_item.self_ref]
self._iterate_elements(
element=child,
depth=depth + 1,
doc=doc,
visited=visited,
creation_stack=creation_stack,
list_ordered_flag_by_ref=list_ordered_flag_by_ref,
list_last_item_by_ref=list_last_item_by_ref,
parent_item=parent_item,
formatting=formatting,
hyperlink=hyperlink,
)
def is_valid(self) -> bool:
return self.valid
def unload(self):
if isinstance(self.path_or_stream, BytesIO):
self.path_or_stream.close()
self.path_or_stream = None
@classmethod
def supports_pagination(cls) -> bool:
return False
@classmethod
def supported_formats(cls) -> set[InputFormat]:
return {InputFormat.MD}
def convert(self) -> DoclingDocument:
_log.debug("converting Markdown...")
origin = DocumentOrigin(
filename=self.file.name or "file",
mimetype="text/markdown",
binary_hash=self.document_hash,
)
doc = DoclingDocument(name=self.file.stem or "file", origin=origin)
if self.is_valid():
# Parse the markdown into an abstract syntax tree (AST)
marko_parser = Markdown()
parsed_ast = marko_parser.parse(self.markdown)
# Start iterating from the root of the AST
self._iterate_elements(
element=parsed_ast,
depth=0,
doc=doc,
parent_item=None,
visited=set(),
creation_stack=[],
list_ordered_flag_by_ref={},
list_last_item_by_ref={},
)
self._close_table(doc=doc) # handle any last hanging table
# if HTML blocks were detected, export to HTML and delegate to HTML backend
if self._html_blocks > 0:
# export to HTML
html_backend_cls = HTMLDocumentBackend
html_str = doc.export_to_html()
def _restore_original_html(txt, regex):
_txt, count = re.subn(regex, "", txt)
if count != self._html_blocks:
raise RuntimeError(
"An internal error has occurred during Markdown conversion."
)
return _txt
# restore original HTML by removing previously added markers
for regex in [
rf"<pre>\s*<code>\s*{_START_MARKER}",
rf"{_STOP_MARKER}\s*</code>\s*</pre>",
]:
html_str = _restore_original_html(txt=html_str, regex=regex)
self._html_blocks = 0
# delegate to HTML backend
stream = BytesIO(bytes(html_str, encoding="utf-8"))
md_options = cast(MarkdownBackendOptions, self.options)
html_options = HTMLBackendOptions(
enable_local_fetch=md_options.enable_local_fetch,
enable_remote_fetch=md_options.enable_remote_fetch,
fetch_images=md_options.fetch_images,
source_uri=md_options.source_uri,
infer_furniture=False,
add_title=False,
)
in_doc = InputDocument(
path_or_stream=stream,
format=InputFormat.HTML,
backend=html_backend_cls,
filename=self.file.name,
backend_options=html_options,
)
html_backend_obj = html_backend_cls(
in_doc=in_doc,
path_or_stream=stream,
options=html_options,
)
doc = html_backend_obj.convert()
else:
raise RuntimeError(
f"Cannot convert md with {self.document_hash} because the backend failed to init."
)
return doc
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/backend/msexcel_backend.py | docling/backend/msexcel_backend.py | import logging
from io import BytesIO
from pathlib import Path
from typing import Annotated, Any, Optional, Union, cast
from docling_core.types.doc import (
BoundingBox,
ContentLayer,
CoordOrigin,
DocItem,
DocItemLabel,
DoclingDocument,
DocumentOrigin,
GroupLabel,
ImageRef,
ProvenanceItem,
Size,
TableCell,
TableData,
)
from openpyxl import load_workbook
from openpyxl.chartsheet.chartsheet import Chartsheet
from openpyxl.drawing.image import Image
from openpyxl.drawing.spreadsheet_drawing import TwoCellAnchor
from openpyxl.worksheet.worksheet import Worksheet
from PIL import Image as PILImage
from pydantic import BaseModel, Field, NonNegativeInt, PositiveInt
from pydantic.dataclasses import dataclass
from typing_extensions import override
from docling.backend.abstract_backend import (
DeclarativeDocumentBackend,
PaginatedDocumentBackend,
)
from docling.datamodel.backend_options import MsExcelBackendOptions
from docling.datamodel.base_models import InputFormat
from docling.datamodel.document import InputDocument
_log = logging.getLogger(__name__)
@dataclass
class DataRegion:
"""Represents the bounding rectangle of non-empty cells in a worksheet."""
min_row: Annotated[
PositiveInt, Field(description="Smallest row index (1-based index).")
]
max_row: Annotated[
PositiveInt, Field(description="Largest row index (1-based index).")
]
min_col: Annotated[
PositiveInt, Field(description="Smallest column index (1-based index).")
]
max_col: Annotated[
PositiveInt, Field(description="Largest column index (1-based index).")
]
def width(self) -> PositiveInt:
"""Number of columns in the data region."""
return self.max_col - self.min_col + 1
def height(self) -> PositiveInt:
"""Number of rows in the data region."""
return self.max_row - self.min_row + 1
class ExcelCell(BaseModel):
"""Represents an Excel cell.
Attributes:
row: The row number of the cell.
col: The column number of the cell.
text: The text content of the cell.
row_span: The number of rows the cell spans.
col_span: The number of columns the cell spans.
"""
row: int
col: int
text: str
row_span: int
col_span: int
class ExcelTable(BaseModel):
"""Represents an Excel table on a worksheet.
Attributes:
anchor: The column and row indices of the upper-left cell of the table
(0-based index).
num_rows: The number of rows in the table.
num_cols: The number of columns in the table.
data: The data in the table, represented as a list of ExcelCell objects.
"""
anchor: tuple[NonNegativeInt, NonNegativeInt]
num_rows: int
num_cols: int
data: list[ExcelCell]
class MsExcelDocumentBackend(DeclarativeDocumentBackend, PaginatedDocumentBackend):
"""Backend for parsing Excel workbooks.
The backend converts an Excel workbook into a DoclingDocument object.
Each worksheet is converted into a separate page.
The following elements are parsed:
- Cell contents, parsed as tables. If two groups of cells are disconnected
between each other, they will be parsed as two different tables.
- Images, parsed as PictureItem objects.
The DoclingDocument tables and pictures have their provenance information, including
the position in their original Excel worksheet. The position is represented by a
bounding box object with the cell indices as units (0-based index). The size of this
bounding box is the number of columns and rows that the table or picture spans.
"""
@override
def __init__(
self,
in_doc: "InputDocument",
path_or_stream: Union[BytesIO, Path],
options: MsExcelBackendOptions = MsExcelBackendOptions(),
) -> None:
"""Initialize the MsExcelDocumentBackend object.
Parameters:
in_doc: The input document object.
path_or_stream: The path or stream to the Excel file.
options: Backend options for Excel parsing.
Raises:
RuntimeError: An error occurred parsing the file.
"""
super().__init__(in_doc, path_or_stream, options)
# Initialise the parents for the hierarchy
self.max_levels = 10
self.parents: dict[int, Any] = {}
for i in range(-1, self.max_levels):
self.parents[i] = None
self.workbook = None
try:
if isinstance(self.path_or_stream, BytesIO):
self.workbook = load_workbook(
filename=self.path_or_stream, data_only=True
)
elif isinstance(self.path_or_stream, Path):
self.workbook = load_workbook(
filename=str(self.path_or_stream), data_only=True
)
self.valid = self.workbook is not None
except Exception as e:
self.valid = False
raise RuntimeError(
f"MsExcelDocumentBackend could not load document with hash {self.document_hash}"
) from e
@override
def is_valid(self) -> bool:
_log.debug(f"valid: {self.valid}")
return self.valid
@classmethod
@override
def supports_pagination(cls) -> bool:
return True
@override
def page_count(self) -> int:
if self.is_valid() and self.workbook:
return len(self.workbook.sheetnames)
else:
return 0
@classmethod
@override
def supported_formats(cls) -> set[InputFormat]:
return {InputFormat.XLSX}
@override
def convert(self) -> DoclingDocument:
"""Parse the Excel workbook into a DoclingDocument object.
Raises:
RuntimeError: Unable to run the conversion since the backend object failed to
initialize.
Returns:
The DoclingDocument object representing the Excel workbook.
"""
origin = DocumentOrigin(
filename=self.file.name or "file.xlsx",
mimetype="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
binary_hash=self.document_hash,
)
doc = DoclingDocument(name=self.file.stem or "file.xlsx", origin=origin)
if self.is_valid():
doc = self._convert_workbook(doc)
else:
raise RuntimeError(
f"Cannot convert doc with {self.document_hash} because the backend failed to init."
)
return doc
def _convert_workbook(self, doc: DoclingDocument) -> DoclingDocument:
"""Parse the Excel workbook and attach its structure to a DoclingDocument.
Args:
doc: A DoclingDocument object.
Returns:
A DoclingDocument object with the parsed items.
"""
if self.workbook is not None:
# Iterate over all sheets
for idx, name in enumerate(self.workbook.sheetnames):
_log.info(f"Processing sheet {idx}: {name}")
sheet = self.workbook[name]
page_no = idx + 1
# do not rely on sheet.max_column, sheet.max_row if there are images
page = doc.add_page(page_no=page_no, size=Size(width=0, height=0))
self.parents[0] = doc.add_group(
parent=None,
label=GroupLabel.SECTION,
name=f"sheet: {name}",
content_layer=self._get_sheet_content_layer(sheet),
)
doc = self._convert_sheet(doc, sheet)
width, height = self._find_page_size(doc, page_no)
page.size = Size(width=width, height=height)
else:
_log.error("Workbook is not initialized.")
return doc
def _convert_sheet(
self, doc: DoclingDocument, sheet: Union[Worksheet, Chartsheet]
) -> DoclingDocument:
"""Parse an Excel worksheet and attach its structure to a DoclingDocument
Args:
doc: The DoclingDocument to be updated.
sheet: The Excel worksheet to be parsed.
Returns:
The updated DoclingDocument.
"""
if isinstance(sheet, Worksheet):
doc = self._find_tables_in_sheet(doc, sheet)
doc = self._find_images_in_sheet(doc, sheet)
# TODO: parse charts in sheet
return doc
def _find_tables_in_sheet(
self, doc: DoclingDocument, sheet: Worksheet
) -> DoclingDocument:
"""Find all tables in an Excel sheet and attach them to a DoclingDocument.
Args:
doc: The DoclingDocument to be updated.
sheet: The Excel worksheet to be parsed.
Returns:
The updated DoclingDocument.
"""
if self.workbook is not None:
content_layer = self._get_sheet_content_layer(sheet)
tables = self._find_data_tables(sheet)
treat_singleton_as_text = (
isinstance(self.options, MsExcelBackendOptions)
and self.options.treat_singleton_as_text
)
for excel_table in tables:
origin_col = excel_table.anchor[0]
origin_row = excel_table.anchor[1]
num_rows = excel_table.num_rows
num_cols = excel_table.num_cols
if (
treat_singleton_as_text
and num_rows == 1
and num_cols == 1
and excel_table.data
):
page_no = self.workbook.index(sheet) + 1
doc.add_text(
text=excel_table.data[0].text,
label=DocItemLabel.TEXT,
parent=self.parents[0],
prov=ProvenanceItem(
page_no=page_no,
charspan=(0, 0),
bbox=BoundingBox.from_tuple(
(
origin_col,
origin_row,
origin_col + num_cols,
origin_row + num_rows,
),
origin=CoordOrigin.TOPLEFT,
),
),
content_layer=content_layer,
)
else:
table_data = TableData(
num_rows=num_rows,
num_cols=num_cols,
table_cells=[],
)
for excel_cell in excel_table.data:
cell = TableCell(
text=excel_cell.text,
row_span=excel_cell.row_span,
col_span=excel_cell.col_span,
start_row_offset_idx=excel_cell.row,
end_row_offset_idx=excel_cell.row + excel_cell.row_span,
start_col_offset_idx=excel_cell.col,
end_col_offset_idx=excel_cell.col + excel_cell.col_span,
column_header=excel_cell.row == 0,
row_header=False,
)
table_data.table_cells.append(cell)
page_no = self.workbook.index(sheet) + 1
doc.add_table(
data=table_data,
parent=self.parents[0],
prov=ProvenanceItem(
page_no=page_no,
charspan=(0, 0),
bbox=BoundingBox.from_tuple(
(
origin_col,
origin_row,
origin_col + num_cols,
origin_row + num_rows,
),
origin=CoordOrigin.TOPLEFT,
),
),
content_layer=content_layer,
)
return doc
def _find_true_data_bounds(self, sheet: Worksheet) -> DataRegion:
"""Find the true data boundaries (min/max rows and columns) in a worksheet.
This function scans all cells to find the smallest rectangular region that contains
all non-empty cells or merged cell ranges. It returns the minimal and maximal
row/column indices that bound the actual data region.
Args:
sheet: The worksheet to analyze.
Returns:
A data region representing the smallest rectangle that covers all data and merged cells.
If the sheet is empty, returns (1, 1, 1, 1) by default.
"""
min_row, min_col = None, None
max_row, max_col = 0, 0
for cell in sheet._cells.values():
if cell.value is not None:
r, c = cell.row, cell.column
min_row = r if min_row is None else min(min_row, r)
min_col = c if min_col is None else min(min_col, c)
max_row = max(max_row, r)
max_col = max(max_col, c)
# Expand bounds to include merged cells
for merged in sheet.merged_cells.ranges:
min_row = (
merged.min_row if min_row is None else min(min_row, merged.min_row)
)
min_col = (
merged.min_col if min_col is None else min(min_col, merged.min_col)
)
max_row = max(max_row, merged.max_row)
max_col = max(max_col, merged.max_col)
# If no data found, default to (1, 1, 1, 1)
if min_row is None or min_col is None:
min_row = min_col = max_row = max_col = 1
return DataRegion(min_row, max_row, min_col, max_col)
def _find_data_tables(self, sheet: Worksheet) -> list[ExcelTable]:
"""Find all compact rectangular data tables in an Excel worksheet.
Args:
sheet: The Excel worksheet to be parsed.
Returns:
A list of ExcelTable objects representing the data tables.
"""
bounds: DataRegion = self._find_true_data_bounds(
sheet
) # The true data boundaries
tables: list[ExcelTable] = [] # List to store found tables
visited: set[tuple[int, int]] = set() # Track already visited cells
# Limit scan to actual data bounds
for ri, row in enumerate(
sheet.iter_rows(
min_row=bounds.min_row,
max_row=bounds.max_row,
min_col=bounds.min_col,
max_col=bounds.max_col,
values_only=False,
),
start=bounds.min_row - 1,
):
for rj, cell in enumerate(row, start=bounds.min_col - 1):
if cell.value is None or (ri, rj) in visited:
continue
# If the cell starts a new table, find its bounds
table_bounds, visited_cells = self._find_table_bounds(
sheet, ri, rj, bounds.max_row, bounds.max_col
)
visited.update(visited_cells) # Mark these cells as visited
tables.append(table_bounds)
return tables
def _find_table_bounds(
self,
sheet: Worksheet,
start_row: int,
start_col: int,
max_row: int,
max_col: int,
) -> tuple[ExcelTable, set[tuple[int, int]]]:
"""Determine the bounds of a compact rectangular table.
Args:
sheet: The Excel worksheet to be parsed.
start_row: The row number of the starting cell.
start_col: The column number of the starting cell.
max_row: Maximum row boundary from true data bounds.
max_col: Maximum column boundary from true data bounds.
Returns:
A tuple with an Excel table and a set of cell coordinates.
"""
_log.debug("find_table_bounds")
table_max_row = self._find_table_bottom(sheet, start_row, start_col, max_row)
table_max_col = self._find_table_right(sheet, start_row, start_col, max_col)
# Collect the data within the bounds
data = []
visited_cells: set[tuple[int, int]] = set()
for ri, row in enumerate(
sheet.iter_rows(
min_row=start_row + 1, # start_row is 0-based but iter_rows is 1-based
max_row=table_max_row + 1,
min_col=start_col + 1,
max_col=table_max_col + 1,
values_only=False,
),
start_row,
):
for rj, cell in enumerate(row, start_col):
# Check if the cell belongs to a merged range
row_span = 1
col_span = 1
for merged_range in sheet.merged_cells.ranges:
if (
merged_range.min_row <= ri + 1
and ri + 1 <= merged_range.max_row
and merged_range.min_col <= rj + 1
and rj + 1 <= merged_range.max_col
):
row_span = merged_range.max_row - merged_range.min_row + 1
col_span = merged_range.max_col - merged_range.min_col + 1
break
if (ri, rj) not in visited_cells:
data.append(
ExcelCell(
row=ri - start_row,
col=rj - start_col,
text=str(cell.value),
row_span=row_span,
col_span=col_span,
)
)
# Mark all cells in the span as visited
for span_row in range(ri, ri + row_span):
for span_col in range(rj, rj + col_span):
visited_cells.add((span_row, span_col))
return (
ExcelTable(
anchor=(start_col, start_row),
num_rows=table_max_row + 1 - start_row,
num_cols=table_max_col + 1 - start_col,
data=data,
),
visited_cells,
)
def _find_table_bottom(
self, sheet: Worksheet, start_row: int, start_col: int, max_row: int
) -> int:
"""Find the bottom boundary of a table.
Args:
sheet: The Excel worksheet to be parsed.
start_row: The starting row of the table.
start_col: The starting column of the table.
max_row: Maximum row boundary from true data bounds.
Returns:
The row index representing the bottom boundary of the table.
"""
table_max_row: int = start_row
for ri, (cell,) in enumerate(
sheet.iter_rows(
min_row=start_row + 2,
max_row=max_row,
min_col=start_col + 1,
max_col=start_col + 1,
values_only=False,
),
start_row + 1,
):
# Check if the cell is part of a merged range
merged_range = next(
(mr for mr in sheet.merged_cells.ranges if cell.coordinate in mr),
None,
)
if cell.value is None and not merged_range:
break # Stop if the cell is empty and not merged
# Expand table_max_row to include the merged range if applicable
if merged_range:
table_max_row = max(table_max_row, merged_range.max_row - 1)
else:
table_max_row = ri
return table_max_row
def _find_table_right(
self, sheet: Worksheet, start_row: int, start_col: int, max_col: int
) -> int:
"""Find the right boundary of a table.
Args:
sheet: The Excel worksheet to be parsed.
start_row: The starting row of the table.
start_col: The starting column of the table.
max_col: The actual max column of the table.
Returns:
The column index representing the right boundary of the table."
"""
table_max_col: int = start_col
for rj, (cell,) in enumerate(
sheet.iter_cols(
min_row=start_row + 1,
max_row=start_row + 1,
min_col=start_col + 2,
max_col=max_col,
values_only=False,
),
start_col + 1,
):
# Check if the cell is part of a merged range
merged_range = next(
(mr for mr in sheet.merged_cells.ranges if cell.coordinate in mr),
None,
)
if cell.value is None and not merged_range:
break # Stop if the cell is empty and not merged
# Expand table_max_col to include the merged range if applicable
if merged_range:
table_max_col = max(table_max_col, merged_range.max_col - 1)
else:
table_max_col = rj
return table_max_col
def _find_images_in_sheet(
self, doc: DoclingDocument, sheet: Worksheet
) -> DoclingDocument:
"""Find images in the Excel sheet and attach them to the DoclingDocument.
Args:
doc: The DoclingDocument to be updated.
sheet: The Excel worksheet to be parsed.
Returns:
The updated DoclingDocument.
"""
if self.workbook is not None:
content_layer = self._get_sheet_content_layer(sheet)
# Iterate over byte images in the sheet
for item in sheet._images: # type: ignore[attr-defined]
try:
image: Image = cast(Image, item)
pil_image = PILImage.open(image.ref) # type: ignore[arg-type]
page_no = self.workbook.index(sheet) + 1
anchor = (0, 0, 0, 0)
if isinstance(image.anchor, TwoCellAnchor):
anchor = (
image.anchor._from.col,
image.anchor._from.row,
image.anchor.to.col + 1,
image.anchor.to.row + 1,
)
doc.add_picture(
parent=self.parents[0],
image=ImageRef.from_pil(image=pil_image, dpi=72),
caption=None,
prov=ProvenanceItem(
page_no=page_no,
charspan=(0, 0),
bbox=BoundingBox.from_tuple(
anchor, origin=CoordOrigin.TOPLEFT
),
),
content_layer=content_layer,
)
except Exception:
_log.error("could not extract the image from excel sheets")
return doc
@staticmethod
def _find_page_size(
doc: DoclingDocument, page_no: PositiveInt
) -> tuple[float, float]:
left: float = -1.0
top: float = -1.0
right: float = -1.0
bottom: float = -1.0
for item, _ in doc.iterate_items(traverse_pictures=True, page_no=page_no):
if not isinstance(item, DocItem):
continue
for provenance in item.prov:
bbox = provenance.bbox
left = min(left, bbox.l) if left != -1 else bbox.l
right = max(right, bbox.r) if right != -1 else bbox.r
top = min(top, bbox.t) if top != -1 else bbox.t
bottom = max(bottom, bbox.b) if bottom != -1 else bbox.b
return (right - left, bottom - top)
@staticmethod
def _get_sheet_content_layer(sheet: Worksheet) -> Optional[ContentLayer]:
return (
None
if sheet.sheet_state == Worksheet.SHEETSTATE_VISIBLE
else ContentLayer.INVISIBLE
)
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/backend/abstract_backend.py | docling/backend/abstract_backend.py | from abc import ABC, abstractmethod
from io import BytesIO
from pathlib import Path
from typing import TYPE_CHECKING, Union
from docling_core.types.doc import DoclingDocument
from docling.datamodel.backend_options import (
BackendOptions,
BaseBackendOptions,
DeclarativeBackendOptions,
)
if TYPE_CHECKING:
from docling.datamodel.base_models import InputFormat
from docling.datamodel.document import InputDocument
class AbstractDocumentBackend(ABC):
@abstractmethod
def __init__(
self,
in_doc: "InputDocument",
path_or_stream: Union[BytesIO, Path],
options: BaseBackendOptions = BaseBackendOptions(),
):
self.file = in_doc.file
self.path_or_stream = path_or_stream
self.document_hash = in_doc.document_hash
self.input_format = in_doc.format
self.options = options
@abstractmethod
def is_valid(self) -> bool:
pass
@classmethod
@abstractmethod
def supports_pagination(cls) -> bool:
pass
def unload(self):
if isinstance(self.path_or_stream, BytesIO):
self.path_or_stream.close()
self.path_or_stream = None
@classmethod
@abstractmethod
def supported_formats(cls) -> set["InputFormat"]:
pass
class PaginatedDocumentBackend(AbstractDocumentBackend):
"""DeclarativeDocumentBackend.
A declarative document backend is a backend that can transform to DoclingDocument
straight without a recognition pipeline.
"""
@abstractmethod
def page_count(self) -> int:
pass
class DeclarativeDocumentBackend(AbstractDocumentBackend):
"""DeclarativeDocumentBackend.
A declarative document backend is a backend that can transform to DoclingDocument
straight without a recognition pipeline.
"""
@abstractmethod
def __init__(
self,
in_doc: "InputDocument",
path_or_stream: Union[BytesIO, Path],
options: BackendOptions = DeclarativeBackendOptions(),
) -> None:
super().__init__(in_doc, path_or_stream, options)
@abstractmethod
def convert(self) -> DoclingDocument:
pass
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/backend/docling_parse_backend.py | docling/backend/docling_parse_backend.py | import logging
import random
from collections.abc import Iterable
from io import BytesIO
from pathlib import Path
from typing import List, Optional, Union
import pypdfium2 as pdfium
from docling_core.types.doc import BoundingBox, CoordOrigin, Size
from docling_core.types.doc.page import (
BoundingRectangle,
SegmentedPdfPage,
TextCell,
)
from docling_parse.pdf_parsers import pdf_parser_v1
from PIL import Image, ImageDraw
from pypdfium2 import PdfPage
from docling.backend.pdf_backend import PdfDocumentBackend, PdfPageBackend
from docling.backend.pypdfium2_backend import get_pdf_page_geometry
from docling.datamodel.document import InputDocument
_log = logging.getLogger(__name__)
class DoclingParsePageBackend(PdfPageBackend):
def __init__(
self, parser: pdf_parser_v1, document_hash: str, page_no: int, page_obj: PdfPage
):
self._ppage = page_obj
parsed_page = parser.parse_pdf_from_key_on_page(document_hash, page_no)
self.valid = "pages" in parsed_page
if self.valid:
self._dpage = parsed_page["pages"][0]
else:
_log.info(
f"An error occurred when loading page {page_no} of document {document_hash}."
)
def is_valid(self) -> bool:
return self.valid
def _compute_text_cells(self) -> List[TextCell]:
"""Compute text cells from docling-parse data."""
cells: List[TextCell] = []
cell_counter = 0
if not self.valid:
return cells
page_size = self.get_size()
parser_width = self._dpage["width"]
parser_height = self._dpage["height"]
for i in range(len(self._dpage["cells"])):
rect = self._dpage["cells"][i]["box"]["device"]
x0, y0, x1, y1 = rect
if x1 < x0:
x0, x1 = x1, x0
if y1 < y0:
y0, y1 = y1, y0
text_piece = self._dpage["cells"][i]["content"]["rnormalized"]
cells.append(
TextCell(
index=cell_counter,
text=text_piece,
orig=text_piece,
from_ocr=False,
rect=BoundingRectangle.from_bounding_box(
BoundingBox(
l=x0 * page_size.width / parser_width,
b=y0 * page_size.height / parser_height,
r=x1 * page_size.width / parser_width,
t=y1 * page_size.height / parser_height,
coord_origin=CoordOrigin.BOTTOMLEFT,
)
).to_top_left_origin(page_size.height),
)
)
cell_counter += 1
return cells
def get_text_in_rect(self, bbox: BoundingBox) -> str:
if not self.valid:
return ""
# Find intersecting cells on the page
text_piece = ""
page_size = self.get_size()
parser_width = self._dpage["width"]
parser_height = self._dpage["height"]
scale = (
1 # FIX - Replace with param in get_text_in_rect across backends (optional)
)
for i in range(len(self._dpage["cells"])):
rect = self._dpage["cells"][i]["box"]["device"]
x0, y0, x1, y1 = rect
cell_bbox = BoundingBox(
l=x0 * scale * page_size.width / parser_width,
b=y0 * scale * page_size.height / parser_height,
r=x1 * scale * page_size.width / parser_width,
t=y1 * scale * page_size.height / parser_height,
coord_origin=CoordOrigin.BOTTOMLEFT,
).to_top_left_origin(page_height=page_size.height * scale)
overlap_frac = cell_bbox.intersection_over_self(bbox)
if overlap_frac > 0.5:
if len(text_piece) > 0:
text_piece += " "
text_piece += self._dpage["cells"][i]["content"]["rnormalized"]
return text_piece
def get_segmented_page(self) -> Optional[SegmentedPdfPage]:
if not self.valid:
return None
text_cells = self._compute_text_cells()
# Get the PDF page geometry from pypdfium2
dimension = get_pdf_page_geometry(self._ppage)
# Create SegmentedPdfPage
return SegmentedPdfPage(
dimension=dimension,
textline_cells=text_cells,
char_cells=[],
word_cells=[],
has_lines=len(text_cells) > 0,
has_words=False,
has_chars=False,
)
def get_text_cells(self) -> Iterable[TextCell]:
return self._compute_text_cells()
def get_bitmap_rects(self, scale: float = 1) -> Iterable[BoundingBox]:
AREA_THRESHOLD = 0 # 32 * 32
for i in range(len(self._dpage["images"])):
bitmap = self._dpage["images"][i]
cropbox = BoundingBox.from_tuple(
bitmap["box"], origin=CoordOrigin.BOTTOMLEFT
).to_top_left_origin(self.get_size().height)
if cropbox.area() > AREA_THRESHOLD:
cropbox = cropbox.scaled(scale=scale)
yield cropbox
def get_page_image(
self, scale: float = 1, cropbox: Optional[BoundingBox] = None
) -> Image.Image:
page_size = self.get_size()
if not cropbox:
cropbox = BoundingBox(
l=0,
r=page_size.width,
t=0,
b=page_size.height,
coord_origin=CoordOrigin.TOPLEFT,
)
padbox = BoundingBox(
l=0, r=0, t=0, b=0, coord_origin=CoordOrigin.BOTTOMLEFT
)
else:
padbox = cropbox.to_bottom_left_origin(page_size.height).model_copy()
padbox.r = page_size.width - padbox.r
padbox.t = page_size.height - padbox.t
image = (
self._ppage.render(
scale=scale * 1.5,
rotation=0, # no additional rotation
crop=padbox.as_tuple(),
)
.to_pil()
.resize(size=(round(cropbox.width * scale), round(cropbox.height * scale)))
) # We resize the image from 1.5x the given scale to make it sharper.
return image
def get_size(self) -> Size:
return Size(width=self._ppage.get_width(), height=self._ppage.get_height())
def unload(self):
self._ppage = None
self._dpage = None
class DoclingParseDocumentBackend(PdfDocumentBackend):
def __init__(self, in_doc: "InputDocument", path_or_stream: Union[BytesIO, Path]):
super().__init__(in_doc, path_or_stream)
self._pdoc = pdfium.PdfDocument(self.path_or_stream)
self.parser = pdf_parser_v1()
success = False
if isinstance(self.path_or_stream, BytesIO):
success = self.parser.load_document_from_bytesio(
self.document_hash, self.path_or_stream
)
elif isinstance(self.path_or_stream, Path):
success = self.parser.load_document(
self.document_hash, str(self.path_or_stream)
)
if not success:
raise RuntimeError(
f"docling-parse could not load document with hash {self.document_hash}."
)
def page_count(self) -> int:
return len(self._pdoc) # To be replaced with docling-parse API
def load_page(self, page_no: int) -> DoclingParsePageBackend:
return DoclingParsePageBackend(
self.parser, self.document_hash, page_no, self._pdoc[page_no]
)
def is_valid(self) -> bool:
return self.page_count() > 0
def unload(self):
super().unload()
self.parser.unload_document(self.document_hash)
self._pdoc.close()
self._pdoc = None
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/backend/docling_parse_v2_backend.py | docling/backend/docling_parse_v2_backend.py | import logging
import random
from collections.abc import Iterable
from io import BytesIO
from pathlib import Path
from typing import TYPE_CHECKING, List, Optional, Union
import pypdfium2 as pdfium
from docling_core.types.doc import BoundingBox, CoordOrigin
from docling_core.types.doc.page import (
BoundingRectangle,
PdfPageBoundaryType,
PdfPageGeometry,
SegmentedPdfPage,
TextCell,
)
from docling_parse.pdf_parsers import pdf_parser_v2
from PIL import Image, ImageDraw
from pypdfium2 import PdfPage
from docling.backend.pdf_backend import PdfDocumentBackend, PdfPageBackend
from docling.backend.pypdfium2_backend import get_pdf_page_geometry
from docling.datamodel.base_models import Size
from docling.utils.locks import pypdfium2_lock
if TYPE_CHECKING:
from docling.datamodel.document import InputDocument
_log = logging.getLogger(__name__)
class DoclingParseV2PageBackend(PdfPageBackend):
def __init__(
self, parser: pdf_parser_v2, document_hash: str, page_no: int, page_obj: PdfPage
):
self._ppage = page_obj
parsed_page = parser.parse_pdf_from_key_on_page(document_hash, page_no)
self.valid = "pages" in parsed_page and len(parsed_page["pages"]) == 1
if self.valid:
self._dpage = parsed_page["pages"][0]
else:
_log.info(
f"An error occurred when loading page {page_no} of document {document_hash}."
)
def is_valid(self) -> bool:
return self.valid
def _compute_text_cells(self) -> List[TextCell]:
"""Compute text cells from docling-parse v2 data."""
cells: List[TextCell] = []
cell_counter = 0
if not self.valid:
return cells
page_size = self.get_size()
parser_width = self._dpage["sanitized"]["dimension"]["width"]
parser_height = self._dpage["sanitized"]["dimension"]["height"]
cells_data = self._dpage["sanitized"]["cells"]["data"]
cells_header = self._dpage["sanitized"]["cells"]["header"]
for i, cell_data in enumerate(cells_data):
x0 = cell_data[cells_header.index("x0")]
y0 = cell_data[cells_header.index("y0")]
x1 = cell_data[cells_header.index("x1")]
y1 = cell_data[cells_header.index("y1")]
if x1 < x0:
x0, x1 = x1, x0
if y1 < y0:
y0, y1 = y1, y0
text_piece = cell_data[cells_header.index("text")]
cells.append(
TextCell(
index=cell_counter,
text=text_piece,
orig=text_piece,
from_ocr=False,
rect=BoundingRectangle.from_bounding_box(
BoundingBox(
l=x0 * page_size.width / parser_width,
b=y0 * page_size.height / parser_height,
r=x1 * page_size.width / parser_width,
t=y1 * page_size.height / parser_height,
coord_origin=CoordOrigin.BOTTOMLEFT,
)
).to_top_left_origin(page_size.height),
)
)
cell_counter += 1
return cells
def get_text_in_rect(self, bbox: BoundingBox) -> str:
if not self.valid:
return ""
# Find intersecting cells on the page
text_piece = ""
page_size = self.get_size()
parser_width = self._dpage["sanitized"]["dimension"]["width"]
parser_height = self._dpage["sanitized"]["dimension"]["height"]
scale = (
1 # FIX - Replace with param in get_text_in_rect across backends (optional)
)
cells_data = self._dpage["sanitized"]["cells"]["data"]
cells_header = self._dpage["sanitized"]["cells"]["header"]
for i, cell_data in enumerate(cells_data):
x0 = cell_data[cells_header.index("x0")]
y0 = cell_data[cells_header.index("y0")]
x1 = cell_data[cells_header.index("x1")]
y1 = cell_data[cells_header.index("y1")]
cell_bbox = BoundingBox(
l=x0 * scale * page_size.width / parser_width,
b=y0 * scale * page_size.height / parser_height,
r=x1 * scale * page_size.width / parser_width,
t=y1 * scale * page_size.height / parser_height,
coord_origin=CoordOrigin.BOTTOMLEFT,
).to_top_left_origin(page_height=page_size.height * scale)
overlap_frac = cell_bbox.intersection_over_self(bbox)
if overlap_frac > 0.5:
if len(text_piece) > 0:
text_piece += " "
text_piece += cell_data[cells_header.index("text")]
return text_piece
def get_segmented_page(self) -> Optional[SegmentedPdfPage]:
if not self.valid:
return None
text_cells = self._compute_text_cells()
# Get the PDF page geometry from pypdfium2
dimension = get_pdf_page_geometry(self._ppage)
# Create SegmentedPdfPage
return SegmentedPdfPage(
dimension=dimension,
textline_cells=text_cells,
char_cells=[],
word_cells=[],
has_textlines=len(text_cells) > 0,
has_words=False,
has_chars=False,
)
def get_text_cells(self) -> Iterable[TextCell]:
return self._compute_text_cells()
def get_bitmap_rects(self, scale: float = 1) -> Iterable[BoundingBox]:
AREA_THRESHOLD = 0 # 32 * 32
images = self._dpage["sanitized"]["images"]["data"]
images_header = self._dpage["sanitized"]["images"]["header"]
for row in images:
x0 = row[images_header.index("x0")]
y0 = row[images_header.index("y0")]
x1 = row[images_header.index("x1")]
y1 = row[images_header.index("y1")]
cropbox = BoundingBox.from_tuple(
(x0, y0, x1, y1), origin=CoordOrigin.BOTTOMLEFT
).to_top_left_origin(self.get_size().height)
if cropbox.area() > AREA_THRESHOLD:
cropbox = cropbox.scaled(scale=scale)
yield cropbox
def get_page_image(
self, scale: float = 1, cropbox: Optional[BoundingBox] = None
) -> Image.Image:
page_size = self.get_size()
if not cropbox:
cropbox = BoundingBox(
l=0,
r=page_size.width,
t=0,
b=page_size.height,
coord_origin=CoordOrigin.TOPLEFT,
)
padbox = BoundingBox(
l=0, r=0, t=0, b=0, coord_origin=CoordOrigin.BOTTOMLEFT
)
else:
padbox = cropbox.to_bottom_left_origin(page_size.height).model_copy()
padbox.r = page_size.width - padbox.r
padbox.t = page_size.height - padbox.t
with pypdfium2_lock:
image = (
self._ppage.render(
scale=scale * 1.5,
rotation=0, # no additional rotation
crop=padbox.as_tuple(),
)
.to_pil()
.resize(
size=(round(cropbox.width * scale), round(cropbox.height * scale))
)
) # We resize the image from 1.5x the given scale to make it sharper.
return image
def get_size(self) -> Size:
with pypdfium2_lock:
return Size(width=self._ppage.get_width(), height=self._ppage.get_height())
def unload(self):
self._ppage = None
self._dpage = None
class DoclingParseV2DocumentBackend(PdfDocumentBackend):
def __init__(self, in_doc: "InputDocument", path_or_stream: Union[BytesIO, Path]):
super().__init__(in_doc, path_or_stream)
with pypdfium2_lock:
self._pdoc = pdfium.PdfDocument(self.path_or_stream)
self.parser = pdf_parser_v2("fatal")
success = False
if isinstance(self.path_or_stream, BytesIO):
success = self.parser.load_document_from_bytesio(
self.document_hash, self.path_or_stream
)
elif isinstance(self.path_or_stream, Path):
success = self.parser.load_document(
self.document_hash, str(self.path_or_stream)
)
if not success:
raise RuntimeError(
f"docling-parse v2 could not load document {self.document_hash}."
)
def page_count(self) -> int:
# return len(self._pdoc) # To be replaced with docling-parse API
len_1 = len(self._pdoc)
len_2 = self.parser.number_of_pages(self.document_hash)
if len_1 != len_2:
_log.error(f"Inconsistent number of pages: {len_1}!={len_2}")
return len_2
def load_page(self, page_no: int) -> DoclingParseV2PageBackend:
with pypdfium2_lock:
return DoclingParseV2PageBackend(
self.parser, self.document_hash, page_no, self._pdoc[page_no]
)
def is_valid(self) -> bool:
return self.page_count() > 0
def unload(self):
super().unload()
self.parser.unload_document(self.document_hash)
with pypdfium2_lock:
self._pdoc.close()
self._pdoc = None
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/backend/msword_backend.py | docling/backend/msword_backend.py | import logging
import re
from copy import deepcopy
from io import BytesIO
from pathlib import Path
from typing import Any, Callable, Final, Optional, Union
from docling_core.types.doc import (
ContentLayer,
DocItemLabel,
DoclingDocument,
DocumentOrigin,
GroupLabel,
ImageRef,
ListGroup,
NodeItem,
RefItem,
RichTableCell,
TableCell,
TableData,
TableItem,
)
from docling_core.types.doc.document import Formatting, Script
from docx import Document
from docx.document import Document as DocxDocument
from docx.oxml.table import CT_Tc
from docx.oxml.xmlchemy import BaseOxmlElement
from docx.styles.style import ParagraphStyle
from docx.table import Table, _Cell
from docx.text.hyperlink import Hyperlink
from docx.text.paragraph import Paragraph
from docx.text.run import Run
from lxml import etree
from PIL import Image, UnidentifiedImageError
from pydantic import AnyUrl
from typing_extensions import override
from docling.backend.abstract_backend import DeclarativeDocumentBackend
from docling.backend.docx.drawingml.utils import (
get_docx_to_pdf_converter,
get_pil_from_dml_docx,
)
from docling.backend.docx.latex.omml import oMath2Latex
from docling.datamodel.base_models import InputFormat
from docling.datamodel.document import InputDocument
_log = logging.getLogger(__name__)
class MsWordDocumentBackend(DeclarativeDocumentBackend):
_BLIP_NAMESPACES: Final = {
"a": "http://schemas.openxmlformats.org/drawingml/2006/main",
"r": "http://schemas.openxmlformats.org/officeDocument/2006/relationships",
"w": "http://schemas.openxmlformats.org/wordprocessingml/2006/main",
"wp": "http://schemas.openxmlformats.org/drawingml/2006/wordprocessingDrawing",
"mc": "http://schemas.openxmlformats.org/markup-compatibility/2006",
"v": "urn:schemas-microsoft-com:vml",
"wps": "http://schemas.microsoft.com/office/word/2010/wordprocessingShape",
"w10": "urn:schemas-microsoft-com:office:word",
"a14": "http://schemas.microsoft.com/office/drawing/2010/main",
}
@override
def __init__(
self, in_doc: "InputDocument", path_or_stream: Union[BytesIO, Path]
) -> None:
super().__init__(in_doc, path_or_stream)
self.XML_KEY = (
"{http://schemas.openxmlformats.org/wordprocessingml/2006/main}val"
)
self.xml_namespaces = {
"w": "http://schemas.microsoft.com/office/word/2003/wordml"
}
self.blip_xpath_expr = etree.XPath(
".//a:blip", namespaces=MsWordDocumentBackend._BLIP_NAMESPACES
)
# self.initialise(path_or_stream)
# Word file:
self.path_or_stream: Union[BytesIO, Path] = path_or_stream
self.valid: bool = False
# Initialise the parents for the hierarchy
self.max_levels: int = 10
self.level_at_new_list: Optional[int] = None
self.parents: dict[int, Optional[NodeItem]] = {}
self.numbered_headers: dict[int, int] = {}
self.equation_bookends: str = "<eq>{EQ}</eq>"
# Track processed textbox elements to avoid duplication
self.processed_textbox_elements: list[int] = []
self.docx_to_pdf_converter: Optional[Callable] = None
self.docx_to_pdf_converter_init = False
self.display_drawingml_warning = True
for i in range(-1, self.max_levels):
self.parents[i] = None
self.level = 0
self.listIter = 0
# Track list counters per numId and ilvl
self.list_counters: dict[tuple[int, int], int] = {}
# Set starting content layer
self.content_layer = ContentLayer.BODY
self.history: dict[str, Any] = {
"names": [None],
"levels": [None],
"numids": [None],
"indents": [None],
}
self.docx_obj = self.load_msword_file(
path_or_stream=self.path_or_stream, document_hash=self.document_hash
)
if self.docx_obj:
self.valid = True
@override
def is_valid(self) -> bool:
return self.valid
@classmethod
@override
def supports_pagination(cls) -> bool:
return False
@override
def unload(self):
if isinstance(self.path_or_stream, BytesIO):
self.path_or_stream.close()
self.path_or_stream = None
@classmethod
@override
def supported_formats(cls) -> set[InputFormat]:
return {InputFormat.DOCX}
@override
def convert(self) -> DoclingDocument:
"""Parses the DOCX into a structured document model.
Returns:
The parsed document.
"""
origin = DocumentOrigin(
filename=self.file.name or "file",
mimetype="application/vnd.openxmlformats-officedocument.wordprocessingml.document",
binary_hash=self.document_hash,
)
doc = DoclingDocument(name=self.file.stem or "file", origin=origin)
if self.is_valid():
assert self.docx_obj is not None
doc, _ = self._walk_linear(self.docx_obj.element.body, doc)
self._add_header_footer(self.docx_obj, doc)
return doc
else:
raise RuntimeError(
f"Cannot convert doc with {self.document_hash} because the backend failed to init."
)
@staticmethod
def load_msword_file(
path_or_stream: Union[BytesIO, Path], document_hash: str
) -> DocxDocument:
try:
if isinstance(path_or_stream, BytesIO):
return Document(path_or_stream)
elif isinstance(path_or_stream, Path):
return Document(str(path_or_stream))
else:
return None
except Exception as e:
raise RuntimeError(
f"MsWordDocumentBackend could not load document with hash {document_hash}"
) from e
def _update_history(
self,
name: str,
level: Optional[int],
numid: Optional[int],
ilevel: Optional[int],
):
self.history["names"].append(name)
self.history["levels"].append(level)
self.history["numids"].append(numid)
self.history["indents"].append(ilevel)
def _prev_name(self) -> Optional[str]:
return self.history["names"][-1]
def _prev_level(self) -> Optional[int]:
return self.history["levels"][-1]
def _prev_numid(self) -> Optional[int]:
return self.history["numids"][-1]
def _prev_indent(self) -> Optional[int]:
return self.history["indents"][-1]
def _get_level(self) -> int:
"""Return the first None index."""
for k, v in self.parents.items():
if k >= 0 and v is None:
return k
return 0
def _walk_linear(
self,
body: BaseOxmlElement,
doc: DoclingDocument,
# parent:
) -> tuple[DoclingDocument, list[RefItem]]:
added_elements = []
for element in body:
tag_name = etree.QName(element).localname
# Check for Inline Images (blip elements)
drawing_blip = self.blip_xpath_expr(element)
drawingml_els = element.findall(
".//w:drawing", namespaces=MsWordDocumentBackend._BLIP_NAMESPACES
)
# Check for textbox content - check multiple textbox formats
# Only process if the element hasn't been processed before
element_id = id(element)
if element_id not in self.processed_textbox_elements:
# Modern Word textboxes
txbx_xpath = etree.XPath(
".//w:txbxContent|.//v:textbox//w:p",
namespaces=MsWordDocumentBackend._BLIP_NAMESPACES,
)
textbox_elements = txbx_xpath(element)
# No modern textboxes found, check for alternate/legacy textbox formats
if not textbox_elements and tag_name in ["drawing", "pict"]:
# Additional checks for textboxes in DrawingML and VML formats
alt_txbx_xpath = etree.XPath(
".//wps:txbx//w:p|.//w10:wrap//w:p|.//a:p//a:t",
namespaces=MsWordDocumentBackend._BLIP_NAMESPACES,
)
textbox_elements = alt_txbx_xpath(element)
# Check for shape text that's not in a standard textbox
if not textbox_elements:
shape_text_xpath = etree.XPath(
".//a:bodyPr/ancestor::*//a:t|.//a:txBody//a:t",
namespaces=MsWordDocumentBackend._BLIP_NAMESPACES,
)
shape_text_elements = shape_text_xpath(element)
if shape_text_elements:
# Create custom text elements from shape text
text_content = " ".join(
[t.text for t in shape_text_elements if t.text]
)
if text_content.strip():
_log.debug(f"Found shape text: {text_content[:50]}...")
# Create a paragraph-like element to process with standard handler
level = self._get_level()
shape_group = doc.add_group(
label=GroupLabel.SECTION,
parent=self.parents[level - 1],
name="shape-text",
content_layer=self.content_layer,
)
added_elements.append(shape_group.get_ref())
doc.add_text(
label=DocItemLabel.TEXT,
parent=shape_group,
text=text_content,
content_layer=self.content_layer,
)
if textbox_elements:
# Mark the parent element as processed
self.processed_textbox_elements.append(element_id)
# Also mark all found textbox elements as processed
for tb_element in textbox_elements:
self.processed_textbox_elements.append(id(tb_element))
_log.debug(
f"Found textbox content with {len(textbox_elements)} elements"
)
tbc = self._handle_textbox_content(textbox_elements, doc)
added_elements.extend(tbc)
# Check for Tables
if tag_name == "tbl":
try:
t = self._handle_tables(element, doc)
added_elements.extend(t)
except Exception:
_log.debug("could not parse a table, broken docx table")
# Check for Image
elif drawing_blip:
pics = self._handle_pictures(drawing_blip, doc)
added_elements.extend(pics)
# Check for Text after the Image
if (
tag_name == "p"
and element.find(
".//w:t", namespaces=MsWordDocumentBackend._BLIP_NAMESPACES
)
is not None
):
te1 = self._handle_text_elements(element, doc)
added_elements.extend(te1)
# Check for DrawingML elements
elif drawingml_els:
if (
self.docx_to_pdf_converter is None
and self.docx_to_pdf_converter_init is False
):
self.docx_to_pdf_converter = get_docx_to_pdf_converter()
self.docx_to_pdf_converter_init = True
if self.docx_to_pdf_converter is None:
if self.display_drawingml_warning:
if self.docx_to_pdf_converter is None:
_log.warning(
"Found DrawingML elements in document, but no DOCX to PDF converters. "
"If you want these exported, make sure you have "
"LibreOffice binary in PATH or specify its path with DOCLING_LIBREOFFICE_CMD."
)
self.display_drawingml_warning = False
else:
self._handle_drawingml(doc=doc, drawingml_els=drawingml_els)
# Check for the sdt containers, like table of contents
elif tag_name == "sdt":
sdt_content = element.find(
".//w:sdtContent", namespaces=MsWordDocumentBackend._BLIP_NAMESPACES
)
if sdt_content is not None:
# Iterate paragraphs, runs, or text inside <w:sdtContent>.
paragraphs = sdt_content.findall(
".//w:p", namespaces=MsWordDocumentBackend._BLIP_NAMESPACES
)
for p in paragraphs:
te = self._handle_text_elements(p, doc)
added_elements.extend(te)
# Check for Text
elif tag_name == "p":
# "tcPr", "sectPr"
te = self._handle_text_elements(element, doc)
added_elements.extend(te)
else:
_log.debug(f"Ignoring element in DOCX with tag: {tag_name}")
return doc, added_elements
def _str_to_int(
self, s: Optional[str], default: Optional[int] = 0
) -> Optional[int]:
if s is None:
return None
try:
return int(s)
except ValueError:
return default
def _split_text_and_number(self, input_string: str) -> list[str]:
match = re.match(r"(\D+)(\d+)$|^(\d+)(\D+)", input_string)
if match:
parts = list(filter(None, match.groups()))
return parts
else:
return [input_string]
def _get_numId_and_ilvl(
self, paragraph: Paragraph
) -> tuple[Optional[int], Optional[int]]:
# Access the XML element of the paragraph
numPr = paragraph._element.find(
".//w:numPr", namespaces=paragraph._element.nsmap
)
if numPr is not None:
# Get the numId element and extract the value
numId_elem = numPr.find("w:numId", namespaces=paragraph._element.nsmap)
ilvl_elem = numPr.find("w:ilvl", namespaces=paragraph._element.nsmap)
numId = numId_elem.get(self.XML_KEY) if numId_elem is not None else None
ilvl = ilvl_elem.get(self.XML_KEY) if ilvl_elem is not None else None
return self._str_to_int(numId, None), self._str_to_int(ilvl, None)
return None, None # If the paragraph is not part of a list
def _get_list_counter(self, numid: int, ilvl: int) -> int:
"""Get and increment the counter for a specific numId and ilvl combination."""
key = (numid, ilvl)
if key not in self.list_counters:
self.list_counters[key] = 0
self.list_counters[key] += 1
return self.list_counters[key]
def _reset_list_counters_for_new_sequence(self, numid: int):
"""Reset counters when starting a new numbering sequence."""
# Reset all counters for this numid
keys_to_reset = [key for key in self.list_counters.keys() if key[0] == numid]
for key in keys_to_reset:
self.list_counters[key] = 0
def _is_numbered_list(self, numId: int, ilvl: int) -> bool:
"""Check if a list is numbered based on its numFmt value."""
try:
# Access the numbering part of the document
if not hasattr(self.docx_obj, "part") or not hasattr(
self.docx_obj.part, "package"
):
return False
numbering_part = None
# Find the numbering part
for part in self.docx_obj.part.package.parts:
if "numbering" in part.partname:
numbering_part = part
break
if numbering_part is None:
return False
# Parse the numbering XML
numbering_root = numbering_part.element
namespaces = {
"w": "http://schemas.openxmlformats.org/wordprocessingml/2006/main"
}
# Find the numbering definition with the given numId
num_xpath = f".//w:num[@w:numId='{numId}']"
num_element = numbering_root.find(num_xpath, namespaces=namespaces)
if num_element is None:
return False
# Get the abstractNumId from the num element
abstract_num_id_elem = num_element.find(
".//w:abstractNumId", namespaces=namespaces
)
if abstract_num_id_elem is None:
return False
abstract_num_id = abstract_num_id_elem.get(
"{http://schemas.openxmlformats.org/wordprocessingml/2006/main}val"
)
if abstract_num_id is None:
return False
# Find the abstract numbering definition
abstract_num_xpath = (
f".//w:abstractNum[@w:abstractNumId='{abstract_num_id}']"
)
abstract_num_element = numbering_root.find(
abstract_num_xpath, namespaces=namespaces
)
if abstract_num_element is None:
return False
# Find the level definition for the given ilvl
lvl_xpath = f".//w:lvl[@w:ilvl='{ilvl}']"
lvl_element = abstract_num_element.find(lvl_xpath, namespaces=namespaces)
if lvl_element is None:
return False
# Get the numFmt element
num_fmt_element = lvl_element.find(".//w:numFmt", namespaces=namespaces)
if num_fmt_element is None:
return False
num_fmt = num_fmt_element.get(
"{http://schemas.openxmlformats.org/wordprocessingml/2006/main}val"
)
# Numbered formats include: decimal, lowerRoman, upperRoman, lowerLetter, upperLetter
# Bullet formats include: bullet
numbered_formats = {
"decimal",
"lowerRoman",
"upperRoman",
"lowerLetter",
"upperLetter",
"decimalZero",
}
return num_fmt in numbered_formats
except Exception as e:
_log.debug(f"Error determining if list is numbered: {e}")
return False
def _get_heading_and_level(self, style_label: str) -> tuple[str, Optional[int]]:
parts = self._split_text_and_number(style_label)
if len(parts) == 2:
parts.sort()
label_str: str = ""
label_level: Optional[int] = 0
if parts[0].strip().lower() == "heading":
label_str = "Heading"
label_level = self._str_to_int(parts[1], None)
if parts[1].strip().lower() == "heading":
label_str = "Heading"
label_level = self._str_to_int(parts[0], None)
return label_str, label_level
return style_label, None
def _get_label_and_level(self, paragraph: Paragraph) -> tuple[str, Optional[int]]:
if paragraph.style is None:
return "Normal", None
label: str = paragraph.style.style_id
name: str = paragraph.style.name or ""
base_style_label: Optional[str] = None
base_style_name: Optional[str] = None
if isinstance(
base_style := getattr(paragraph.style, "base_style", None), ParagraphStyle
):
base_style_label = base_style.style_id
base_style_name = base_style.name
if not label:
return "Normal", None
if ":" in label:
parts = label.split(":")
if len(parts) == 2:
return parts[0], self._str_to_int(parts[1], None)
if "heading" in label.lower():
return self._get_heading_and_level(label)
if "heading" in name.lower():
return self._get_heading_and_level(name)
if base_style_label and "heading" in base_style_label.lower():
return self._get_heading_and_level(base_style_label)
if base_style_name and "heading" in base_style_name.lower():
return self._get_heading_and_level(base_style_name)
return label, None
@classmethod
def _get_format_from_run(cls, run: Run) -> Optional[Formatting]:
# The .bold and .italic properties are booleans, but .underline can be an enum
# like WD_UNDERLINE.THICK (value 6), so we need to convert it to a boolean
is_bold = run.bold or False
is_italic = run.italic or False
is_strikethrough = run.font.strike or False
# Convert any non-None underline value to True
is_underline = bool(run.underline is not None and run.underline)
is_sub = run.font.subscript or False
is_sup = run.font.superscript or False
script = Script.SUB if is_sub else Script.SUPER if is_sup else Script.BASELINE
return Formatting(
bold=is_bold,
italic=is_italic,
underline=is_underline,
strikethrough=is_strikethrough,
script=script,
)
def _get_paragraph_elements(self, paragraph: Paragraph):
"""
Extract paragraph elements along with their formatting and hyperlink
"""
# for now retain empty paragraphs for backwards compatibility:
if paragraph.text.strip() == "":
return [("", None, None)]
paragraph_elements: list[
tuple[str, Optional[Formatting], Optional[Union[AnyUrl, Path]]]
] = []
group_text = ""
previous_format = None
# Iterate over the runs of the paragraph and group them by format
for c in paragraph.iter_inner_content():
if isinstance(c, Hyperlink):
text = c.text
hyperlink = Path(c.address)
format = (
self._get_format_from_run(c.runs[0])
if c.runs and len(c.runs) > 0
else None
)
elif isinstance(c, Run):
text = c.text
hyperlink = None
format = self._get_format_from_run(c)
else:
continue
if (len(text.strip()) and format != previous_format) or (
hyperlink is not None
):
# If the style changes for a non empty text, add the previous group
if len(group_text.strip()) > 0:
paragraph_elements.append(
(group_text.strip(), previous_format, None)
)
group_text = ""
# If there is a hyperlink, add it immediately
if hyperlink is not None:
paragraph_elements.append((text.strip(), format, hyperlink))
text = ""
else:
previous_format = format
group_text += text
# Format the last group
if len(group_text.strip()) > 0:
paragraph_elements.append((group_text.strip(), format, None))
return paragraph_elements
def _get_paragraph_position(self, paragraph_element):
"""Extract vertical position information from paragraph element."""
# First try to directly get the index from w:p element that has an order-related attribute
if (
hasattr(paragraph_element, "getparent")
and paragraph_element.getparent() is not None
):
parent = paragraph_element.getparent()
# Get all paragraph siblings
paragraphs = [
p for p in parent.getchildren() if etree.QName(p).localname == "p"
]
# Find index of current paragraph within its siblings
try:
paragraph_index = paragraphs.index(paragraph_element)
return paragraph_index # Use index as position for consistent ordering
except ValueError:
pass
# Look for position hints in element attributes and ancestor elements
for elem in (*[paragraph_element], *paragraph_element.iterancestors()):
# Check for direct position attributes
for attr_name in ["y", "top", "positionY", "y-position", "position"]:
value = elem.get(attr_name)
if value:
try:
# Remove any non-numeric characters (like 'pt', 'px', etc.)
clean_value = re.sub(r"[^0-9.]", "", value)
if clean_value:
return float(clean_value)
except (ValueError, TypeError):
pass
# Check for position in transform attribute
transform = elem.get("transform")
if transform:
# Extract translation component from transform matrix
match = re.search(r"translate\([^,]+,\s*([0-9.]+)", transform)
if match:
try:
return float(match.group(1))
except ValueError:
pass
# Check for anchors or relative position indicators in Word format
# 'dist' attributes can indicate relative positioning
for attr_name in ["distT", "distB", "anchor", "relativeFrom"]:
if elem.get(attr_name) is not None:
return elem.sourceline # Use the XML source line number as fallback
# For VML shapes, look for specific attributes
for ns_uri in paragraph_element.nsmap.values():
if "vml" in ns_uri:
# Try to extract position from style attribute
style = paragraph_element.get("style")
if style:
match = re.search(r"top:([0-9.]+)pt", style)
if match:
try:
return float(match.group(1))
except ValueError:
pass
# If no better position indicator found, use XML source line number as proxy for order
return (
paragraph_element.sourceline
if hasattr(paragraph_element, "sourceline")
else None
)
def _collect_textbox_paragraphs(self, textbox_elements):
"""Collect and organize paragraphs from textbox elements."""
processed_paragraphs = []
container_paragraphs = {}
for element in textbox_elements:
element_id = id(element)
# Skip if we've already processed this exact element
if element_id in processed_paragraphs:
continue
tag_name = etree.QName(element).localname
processed_paragraphs.append(element_id)
# Handle paragraphs directly found (VML textboxes)
if tag_name == "p":
# Find the containing textbox or shape element
container_id = None
for ancestor in element.iterancestors():
if any(ns in ancestor.tag for ns in ["textbox", "shape", "txbx"]):
container_id = id(ancestor)
break
if container_id not in container_paragraphs:
container_paragraphs[container_id] = []
container_paragraphs[container_id].append(
(element, self._get_paragraph_position(element))
)
# Handle txbxContent elements (Word DrawingML textboxes)
elif tag_name == "txbxContent":
paragraphs = element.findall(".//w:p", namespaces=element.nsmap)
container_id = id(element)
if container_id not in container_paragraphs:
container_paragraphs[container_id] = []
for p in paragraphs:
p_id = id(p)
if p_id not in processed_paragraphs:
processed_paragraphs.append(p_id)
container_paragraphs[container_id].append(
(p, self._get_paragraph_position(p))
)
else:
# Try to extract any paragraphs from unknown elements
paragraphs = element.findall(".//w:p", namespaces=element.nsmap)
container_id = id(element)
if container_id not in container_paragraphs:
container_paragraphs[container_id] = []
for p in paragraphs:
p_id = id(p)
if p_id not in processed_paragraphs:
processed_paragraphs.append(p_id)
container_paragraphs[container_id].append(
(p, self._get_paragraph_position(p))
)
return container_paragraphs
def _handle_textbox_content(
self,
textbox_elements: list,
doc: DoclingDocument,
) -> list[RefItem]:
elem_ref: list[RefItem] = []
"""Process textbox content and add it to the document structure."""
level = self._get_level()
# Create a textbox group to contain all text from the textbox
textbox_group = doc.add_group(
label=GroupLabel.SECTION,
parent=self.parents[level - 1],
name="textbox",
content_layer=self.content_layer,
)
elem_ref.append(textbox_group.get_ref())
# Set this as the current parent to ensure textbox content
# is properly nested in document structure
original_parent = self.parents[level]
self.parents[level] = textbox_group
# Collect and organize paragraphs
container_paragraphs = self._collect_textbox_paragraphs(textbox_elements)
# Process all paragraphs
all_paragraphs = []
# Sort paragraphs within each container, then process containers
for paragraphs in container_paragraphs.values():
# Sort by vertical position within each container
sorted_container_paragraphs = sorted(
paragraphs,
key=lambda x: (
x[1] is None,
x[1] if x[1] is not None else float("inf"),
),
)
# Add the sorted paragraphs to our processing list
all_paragraphs.extend(sorted_container_paragraphs)
# Track processed paragraphs to avoid duplicates (same content and position)
processed_paragraphs = set()
# Process all the paragraphs
for p, position in all_paragraphs:
# Create paragraph object to get text content
paragraph = Paragraph(p, self.docx_obj)
text_content = paragraph.text
# Create a unique identifier based on content and position
paragraph_id = (text_content, position)
# Skip if this paragraph (same content and position) was already processed
if paragraph_id in processed_paragraphs:
_log.debug(
f"Skipping duplicate paragraph: content='{text_content[:50]}...', position={position}"
)
continue
# Mark this paragraph as processed
processed_paragraphs.add(paragraph_id)
elem_ref.extend(self._handle_text_elements(p, doc))
# Restore original parent
self.parents[level] = original_parent
return elem_ref
def _handle_equations_in_text(self, element, text):
only_texts = []
only_equations = []
texts_and_equations = []
for subt in element.iter():
tag_name = etree.QName(subt).localname
if tag_name == "t" and "math" not in subt.tag:
if isinstance(subt.text, str):
only_texts.append(subt.text)
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | true |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/backend/noop_backend.py | docling/backend/noop_backend.py | import logging
from io import BytesIO
from pathlib import Path
from typing import Set, Union
from docling.backend.abstract_backend import AbstractDocumentBackend
from docling.datamodel.base_models import InputFormat
from docling.datamodel.document import InputDocument
_log = logging.getLogger(__name__)
class NoOpBackend(AbstractDocumentBackend):
"""
A no-op backend that only validates input existence.
Used e.g. for audio files where actual processing is handled by the ASR pipeline.
"""
def __init__(self, in_doc: "InputDocument", path_or_stream: Union[BytesIO, Path]):
super().__init__(in_doc, path_or_stream)
_log.debug(f"NoOpBackend initialized for: {path_or_stream}")
# Validate input
try:
if isinstance(self.path_or_stream, BytesIO):
# Check if stream has content
self.valid = len(self.path_or_stream.getvalue()) > 0
_log.debug(
f"BytesIO stream length: {len(self.path_or_stream.getvalue())}"
)
elif isinstance(self.path_or_stream, Path):
# Check if file exists
self.valid = self.path_or_stream.exists()
_log.debug(f"File exists: {self.valid}")
else:
self.valid = False
except Exception as e:
_log.error(f"NoOpBackend validation failed: {e}")
self.valid = False
def is_valid(self) -> bool:
return self.valid
@classmethod
def supports_pagination(cls) -> bool:
return False
@classmethod
def supported_formats(cls) -> Set[InputFormat]:
return set(InputFormat)
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/backend/html_backend.py | docling/backend/html_backend.py | import base64
import logging
import os
import re
import warnings
from contextlib import contextmanager
from copy import deepcopy
from io import BytesIO
from pathlib import Path
from typing import Final, Optional, Union, cast
from urllib.parse import urljoin, urlparse
import requests
from bs4 import BeautifulSoup, NavigableString, PageElement, Tag
from bs4.element import PreformattedString
from docling_core.types.doc import (
DocItem,
DocItemLabel,
DoclingDocument,
DocumentOrigin,
GroupItem,
GroupLabel,
PictureItem,
RefItem,
RichTableCell,
TableCell,
TableData,
TableItem,
TextItem,
)
from docling_core.types.doc.document import ContentLayer, Formatting, ImageRef, Script
from PIL import Image, UnidentifiedImageError
from pydantic import AnyUrl, BaseModel, ValidationError
from typing_extensions import override
from docling.backend.abstract_backend import (
DeclarativeDocumentBackend,
)
from docling.datamodel.backend_options import HTMLBackendOptions
from docling.datamodel.base_models import InputFormat
from docling.datamodel.document import InputDocument
from docling.exceptions import OperationNotAllowed
_log = logging.getLogger(__name__)
DEFAULT_IMAGE_WIDTH = 128
DEFAULT_IMAGE_HEIGHT = 128
# Tags that initiate distinct Docling items
_BLOCK_TAGS: Final = {
"address",
"details",
"figure",
"footer",
"img",
"h1",
"h2",
"h3",
"h4",
"h5",
"h6",
"ol",
"p",
"pre",
"summary",
"table",
"ul",
}
# Block-level elements that should not appear inside <p>
_PARA_BREAKERS = {
"address",
"article",
"aside",
"blockquote",
"div",
"dl",
"fieldset",
"figcaption",
"figure",
"footer",
"form",
"h1",
"h2",
"h3",
"h4",
"h5",
"h6",
"header",
"hr",
"main",
"nav",
"ol",
"ul",
"li",
"p", # <p> inside <p> also forces closing
"pre",
"section",
"table",
"thead",
"tbody",
"tfoot",
"tr",
"td",
}
_CODE_TAG_SET: Final = {"code", "kbd", "samp"}
_FORMAT_TAG_MAP: Final = {
"b": {"bold": True},
"strong": {"bold": True},
"i": {"italic": True},
"em": {"italic": True},
"var": {"italic": True},
# "mark",
# "small",
"s": {"strikethrough": True},
"del": {"strikethrough": True},
"u": {"underline": True},
"ins": {"underline": True},
"sub": {"script": Script.SUB},
"sup": {"script": Script.SUPER},
**{k: {} for k in _CODE_TAG_SET},
}
class _Context(BaseModel):
list_ordered_flag_by_ref: dict[str, bool] = {}
list_start_by_ref: dict[str, int] = {}
class AnnotatedText(BaseModel):
text: str
hyperlink: Union[AnyUrl, Path, None] = None
formatting: Union[Formatting, None] = None
code: bool = False
class AnnotatedTextList(list):
def to_single_text_element(self) -> AnnotatedText:
current_h = None
current_text = ""
current_f = None
current_code = False
for at in self:
t = at.text
h = at.hyperlink
f = at.formatting
c = at.code
current_text += t.strip() + " "
if f is not None and current_f is None:
current_f = f
elif f is not None and current_f is not None and f != current_f:
_log.warning(
f"Clashing formatting: '{f}' and '{current_f}'! Chose '{current_f}'"
)
if h is not None and current_h is None:
current_h = h
elif h is not None and current_h is not None and h != current_h:
_log.warning(
f"Clashing hyperlinks: '{h}' and '{current_h}'! Chose '{current_h}'"
)
current_code = c if c else current_code
return AnnotatedText(
text=current_text.strip(),
hyperlink=current_h,
formatting=current_f,
code=current_code,
)
def simplify_text_elements(self) -> "AnnotatedTextList":
simplified = AnnotatedTextList()
if not self:
return self
text = self[0].text
hyperlink = self[0].hyperlink
formatting = self[0].formatting
code = self[0].code
last_elm = text
for i in range(1, len(self)):
if (
hyperlink == self[i].hyperlink
and formatting == self[i].formatting
and code == self[i].code
):
sep = " "
if not self[i].text.strip() or not last_elm.strip():
sep = ""
text += sep + self[i].text
last_elm = self[i].text
else:
simplified.append(
AnnotatedText(
text=text, hyperlink=hyperlink, formatting=formatting, code=code
)
)
text = self[i].text
last_elm = text
hyperlink = self[i].hyperlink
formatting = self[i].formatting
code = self[i].code
if text:
simplified.append(
AnnotatedText(
text=text, hyperlink=hyperlink, formatting=formatting, code=code
)
)
return simplified
def split_by_newline(self):
super_list = []
active_annotated_text_list = AnnotatedTextList()
for el in self:
sub_texts = el.text.split("\n")
if len(sub_texts) == 1:
active_annotated_text_list.append(el)
else:
for text in sub_texts:
sub_el = deepcopy(el)
sub_el.text = text
active_annotated_text_list.append(sub_el)
super_list.append(active_annotated_text_list)
active_annotated_text_list = AnnotatedTextList()
if active_annotated_text_list:
super_list.append(active_annotated_text_list)
return super_list
class HTMLDocumentBackend(DeclarativeDocumentBackend):
@override
def __init__(
self,
in_doc: InputDocument,
path_or_stream: Union[BytesIO, Path],
options: HTMLBackendOptions = HTMLBackendOptions(),
):
super().__init__(in_doc, path_or_stream, options)
self.options: HTMLBackendOptions
self.soup: Optional[BeautifulSoup] = None
self.path_or_stream: Union[BytesIO, Path] = path_or_stream
self.base_path: Optional[str] = str(options.source_uri)
# Initialize the parents for the hierarchy
self.max_levels = 10
self.level = 0
self.parents: dict[int, Optional[Union[DocItem, GroupItem]]] = {}
self.ctx = _Context()
for i in range(self.max_levels):
self.parents[i] = None
self.hyperlink: Union[AnyUrl, Path, None] = None
self.format_tags: list[str] = []
try:
raw = (
path_or_stream.getvalue()
if isinstance(path_or_stream, BytesIO)
else Path(path_or_stream).read_bytes()
)
self.soup = BeautifulSoup(raw, "html.parser")
except Exception as e:
raise RuntimeError(
"Could not initialize HTML backend for file with "
f"hash {self.document_hash}."
) from e
@override
def is_valid(self) -> bool:
return self.soup is not None
@classmethod
@override
def supports_pagination(cls) -> bool:
return False
@override
def unload(self):
if isinstance(self.path_or_stream, BytesIO):
self.path_or_stream.close()
self.path_or_stream = None
@classmethod
@override
def supported_formats(cls) -> set[InputFormat]:
return {InputFormat.HTML}
@override
def convert(self) -> DoclingDocument:
_log.debug("Starting HTML conversion...")
if not self.is_valid():
raise RuntimeError("Invalid HTML document.")
origin = DocumentOrigin(
filename=self.file.name or "file",
mimetype="text/html",
binary_hash=self.document_hash,
)
doc = DoclingDocument(name=self.file.stem or "file", origin=origin)
assert self.soup is not None
# set the title as furniture, since it is part of the document metadata
title = self.soup.title
if title and self.options.add_title:
title_text = title.get_text(separator=" ", strip=True)
title_clean = HTMLDocumentBackend._clean_unicode(title_text)
doc.add_title(
text=title_clean,
orig=title_text,
content_layer=ContentLayer.FURNITURE,
)
# remove script and style tags
for tag in self.soup(["script", "noscript", "style"]):
tag.decompose()
# remove any hidden tag
for tag in self.soup(hidden=True):
tag.decompose()
# fix flow content that is not permitted inside <p>
HTMLDocumentBackend._fix_invalid_paragraph_structure(self.soup)
content = self.soup.body or self.soup
# normalize <br> tags
for br in content("br"):
br.replace_with(NavigableString("\n"))
# set default content layer
# Furniture before the first heading rule, except for headers in tables
header = None
# Find all headers first
all_headers = content.find_all(["h1", "h2", "h3", "h4", "h5", "h6"])
# Keep only those that do NOT have a <table> in a parent chain
clean_headers = [h for h in all_headers if not h.find_parent("table")]
# Pick the first header from the remaining
if len(clean_headers):
header = clean_headers[0]
# Set starting content layer
self.content_layer = (
ContentLayer.BODY
if (not self.options.infer_furniture) or (header is None)
else ContentLayer.FURNITURE
)
# reset context
self.ctx = _Context()
self._walk(content, doc)
return doc
@staticmethod
def _fix_invalid_paragraph_structure(soup: BeautifulSoup) -> None:
"""Rewrite <p> elements that contain block-level breakers.
This function emulates browser logic when other block-level elements
are found inside a <p> element.
When a <p> is open and a block-level breaker (e.g., h1-h6, div, table)
appears, automatically close the <p>, emit it, then emit the breaker,
and if needed open a new <p> for trailing text.
Args:
soup: The HTML document. The DOM may be rewritten.
"""
def _start_para():
nonlocal current_p
if current_p is None:
current_p = soup.new_tag("p")
new_nodes.append(current_p)
def _flush_para_if_empty():
nonlocal current_p
if current_p is not None and not current_p.get_text(strip=True):
# remove empty paragraph placeholder
if current_p in new_nodes:
new_nodes.remove(current_p)
current_p = None
paragraphs = soup.select(f"p:has({','.join(tag for tag in _PARA_BREAKERS)})")
for p in paragraphs:
parent = p.parent
if parent is None:
continue
new_nodes = []
current_p = None
for node in list(p.contents):
if isinstance(node, NavigableString):
text = str(node)
node.extract()
if text.strip():
_start_para()
if current_p is not None:
current_p.append(NavigableString(text))
# skip whitespace-only text
continue
if isinstance(node, Tag):
node.extract()
if node.name in _PARA_BREAKERS:
_flush_para_if_empty()
new_nodes.append(node)
continue
else:
_start_para()
if current_p is not None:
current_p.append(node)
continue
_flush_para_if_empty()
siblings = list(parent.children)
try:
idx = siblings.index(p)
except ValueError:
# p might have been removed
continue
p.extract()
for n in reversed(new_nodes):
parent.insert(idx, n)
@staticmethod
def _is_remote_url(value: str) -> bool:
parsed = urlparse(value)
return parsed.scheme in {"http", "https", "ftp", "s3", "gs"}
def _resolve_relative_path(self, loc: str) -> str:
abs_loc = loc
if self.base_path:
if loc.startswith("//"):
# Protocol-relative URL - default to https
abs_loc = "https:" + loc
elif not loc.startswith(("http://", "https://", "data:", "file://")):
if HTMLDocumentBackend._is_remote_url(self.base_path): # remote fetch
abs_loc = urljoin(self.base_path, loc)
elif self.base_path: # local fetch
# For local files, resolve relative to the HTML file location
abs_loc = str(Path(self.base_path).parent / loc)
_log.debug(f"Resolved location {loc} to {abs_loc}")
return abs_loc
@staticmethod
def group_cell_elements(
group_name: str,
doc: DoclingDocument,
provs_in_cell: list[RefItem],
docling_table: TableItem,
) -> RefItem:
group_element = doc.add_group(
label=GroupLabel.UNSPECIFIED,
name=group_name,
parent=docling_table,
)
for prov in provs_in_cell:
group_element.children.append(prov)
pr_item = prov.resolve(doc)
item_parent = pr_item.parent.resolve(doc)
if pr_item.get_ref() in item_parent.children:
item_parent.children.remove(pr_item.get_ref())
pr_item.parent = group_element.get_ref()
ref_for_rich_cell = group_element.get_ref()
return ref_for_rich_cell
@staticmethod
def process_rich_table_cells(
provs_in_cell: list[RefItem],
group_name: str,
doc: DoclingDocument,
docling_table: TableItem,
) -> tuple[bool, Union[RefItem, None]]:
rich_table_cell = False
ref_for_rich_cell = None
if len(provs_in_cell) >= 1:
# Cell rich cell has multiple elements, we need to group them
rich_table_cell = True
ref_for_rich_cell = HTMLDocumentBackend.group_cell_elements(
group_name, doc, provs_in_cell, docling_table
)
return rich_table_cell, ref_for_rich_cell
def _is_rich_table_cell(self, table_cell: Tag) -> bool:
"""Determine whether an table cell should be parsed as a Docling RichTableCell.
A table cell can hold rich content and be parsed with a Docling RichTableCell.
However, this requires walking through the content elements and creating
Docling node items. If the cell holds only plain text, the parsing is simpler
and using a TableCell is prefered.
Args:
table_cell: The HTML tag representing a table cell.
Returns:
Whether the cell should be parsed as RichTableCell.
"""
is_rich: bool = True
children = table_cell.find_all(recursive=True) # all descendants of type Tag
if not children:
content = [
item
for item in table_cell.contents
if isinstance(item, NavigableString)
]
is_rich = len(content) > 1
else:
annotations = self._extract_text_and_hyperlink_recursively(
table_cell, find_parent_annotation=True
)
if not annotations:
is_rich = bool(item for item in children if item.name == "img")
elif len(annotations) == 1:
anno: AnnotatedText = annotations[0]
is_rich = bool(anno.formatting) or bool(anno.hyperlink) or anno.code
return is_rich
def parse_table_data(
self,
element: Tag,
doc: DoclingDocument,
docling_table: TableItem,
num_rows: int,
num_cols: int,
) -> Optional[TableData]:
for t in cast(list[Tag], element.find_all(["thead", "tbody"], recursive=False)):
t.unwrap()
_log.debug(f"The table has {num_rows} rows and {num_cols} cols.")
grid: list = [[None for _ in range(num_cols)] for _ in range(num_rows)]
data = TableData(num_rows=num_rows, num_cols=num_cols, table_cells=[])
# Iterate over the rows in the table
start_row_span = 0
row_idx = -1
# We don't want this recursive to support nested tables
for row in element("tr", recursive=False):
if not isinstance(row, Tag):
continue
# For each row, find all the column cells (both <td> and <th>)
# We don't want this recursive to support nested tables
cells = row(["td", "th"], recursive=False)
# Check if cell is in a column header or row header
col_header = True
row_header = True
for html_cell in cells:
if isinstance(html_cell, Tag):
_, row_span = HTMLDocumentBackend._get_cell_spans(html_cell)
if html_cell.name == "td":
col_header = False
row_header = False
elif row_span == 1:
row_header = False
if not row_header:
row_idx += 1
start_row_span = 0
else:
start_row_span += 1
# Extract the text content of each cell
col_idx = 0
for html_cell in cells:
if not isinstance(html_cell, Tag):
continue
# extract inline formulas
for formula in html_cell("inline-formula"):
math_parts = formula.text.split("$$")
if len(math_parts) == 3:
math_formula = f"$${math_parts[1]}$$"
formula.replace_with(NavigableString(math_formula))
provs_in_cell: list[RefItem] = []
rich_table_cell = self._is_rich_table_cell(html_cell)
if rich_table_cell:
# Parse table cell sub-tree for Rich Cells content:
with self._use_table_cell_context():
provs_in_cell = self._walk(html_cell, doc)
group_name = f"rich_cell_group_{len(doc.tables)}_{col_idx}_{start_row_span + row_idx}"
rich_table_cell, ref_for_rich_cell = (
HTMLDocumentBackend.process_rich_table_cells(
provs_in_cell, group_name, doc, docling_table
)
)
# Extracting text
text = HTMLDocumentBackend._clean_unicode(
self.get_text(html_cell).strip()
)
col_span, row_span = self._get_cell_spans(html_cell)
if row_header:
row_span -= 1
while (
col_idx < num_cols
and grid[row_idx + start_row_span][col_idx] is not None
):
col_idx += 1
for r in range(start_row_span, start_row_span + row_span):
for c in range(col_span):
if row_idx + r < num_rows and col_idx + c < num_cols:
grid[row_idx + r][col_idx + c] = text
if rich_table_cell:
rich_cell = RichTableCell(
text=text,
row_span=row_span,
col_span=col_span,
start_row_offset_idx=start_row_span + row_idx,
end_row_offset_idx=start_row_span + row_idx + row_span,
start_col_offset_idx=col_idx,
end_col_offset_idx=col_idx + col_span,
column_header=col_header,
row_header=((not col_header) and html_cell.name == "th"),
ref=ref_for_rich_cell, # points to an artificial group around children
)
doc.add_table_cell(table_item=docling_table, cell=rich_cell)
else:
simple_cell = TableCell(
text=text,
row_span=row_span,
col_span=col_span,
start_row_offset_idx=start_row_span + row_idx,
end_row_offset_idx=start_row_span + row_idx + row_span,
start_col_offset_idx=col_idx,
end_col_offset_idx=col_idx + col_span,
column_header=col_header,
row_header=((not col_header) and html_cell.name == "th"),
)
doc.add_table_cell(table_item=docling_table, cell=simple_cell)
return data
def _walk(self, element: Tag, doc: DoclingDocument) -> list[RefItem]:
"""Parse an XML tag by recursively walking its content.
While walking, the method buffers inline text across tags like <b> or <span>,
emitting text nodes only at block boundaries.
Args:
element: The XML tag to parse.
doc: The Docling document to be updated with the parsed content.
"""
added_refs: list[RefItem] = []
buffer: AnnotatedTextList = AnnotatedTextList()
def _flush_buffer() -> None:
if not buffer:
return
annotated_text_list: AnnotatedTextList = buffer.simplify_text_elements()
parts = annotated_text_list.split_by_newline()
buffer.clear()
if not "".join([el.text for el in annotated_text_list]):
return
for annotated_text_list in parts:
with self._use_inline_group(annotated_text_list, doc):
for annotated_text in annotated_text_list:
if annotated_text.text.strip():
seg_clean = HTMLDocumentBackend._clean_unicode(
annotated_text.text.strip()
)
if annotated_text.code:
docling_code2 = doc.add_code(
parent=self.parents[self.level],
text=seg_clean,
content_layer=self.content_layer,
formatting=annotated_text.formatting,
hyperlink=annotated_text.hyperlink,
)
added_refs.append(docling_code2.get_ref())
else:
docling_text2 = doc.add_text(
parent=self.parents[self.level],
label=DocItemLabel.TEXT,
text=seg_clean,
content_layer=self.content_layer,
formatting=annotated_text.formatting,
hyperlink=annotated_text.hyperlink,
)
added_refs.append(docling_text2.get_ref())
for node in element.contents:
if isinstance(node, Tag):
name = node.name.lower()
if name == "img":
_flush_buffer()
im_ref3 = self._emit_image(node, doc)
if im_ref3:
added_refs.append(im_ref3)
elif name in _FORMAT_TAG_MAP:
_flush_buffer()
with self._use_format([name]):
wk = self._walk(node, doc)
added_refs.extend(wk)
elif name == "a":
with self._use_hyperlink(node):
wk2 = self._walk(node, doc)
added_refs.extend(wk2)
elif name in _BLOCK_TAGS:
_flush_buffer()
blk = self._handle_block(node, doc)
added_refs.extend(blk)
elif node.find(_BLOCK_TAGS):
_flush_buffer()
wk3 = self._walk(node, doc)
added_refs.extend(wk3)
else:
buffer.extend(
self._extract_text_and_hyperlink_recursively(
node, find_parent_annotation=True, keep_newlines=True
)
)
elif isinstance(node, NavigableString) and not isinstance(
node, PreformattedString
):
if str(node).strip("\n\r") == "":
_flush_buffer()
else:
buffer.extend(
self._extract_text_and_hyperlink_recursively(
node, find_parent_annotation=True, keep_newlines=True
)
)
_flush_buffer()
return added_refs
@staticmethod
def _collect_parent_format_tags(item: PageElement) -> list[str]:
tags = []
for format_tag in _FORMAT_TAG_MAP:
this_parent = item.parent
while this_parent is not None:
if this_parent.name == format_tag:
tags.append(format_tag)
break
this_parent = this_parent.parent
return tags
@property
def _formatting(self):
kwargs = {}
for t in self.format_tags:
kwargs.update(_FORMAT_TAG_MAP[t])
if not kwargs:
return None
return Formatting(**kwargs)
def _extract_text_and_hyperlink_recursively(
self,
item: PageElement,
ignore_list=False,
find_parent_annotation=False,
keep_newlines=False,
) -> AnnotatedTextList:
result: AnnotatedTextList = AnnotatedTextList()
# If find_parent_annotation, make sure that we keep track of
# any a- or formatting-tag that has been present in the
# DOM-parents already.
if find_parent_annotation:
format_tags = self._collect_parent_format_tags(item)
this_parent = item.parent
while this_parent is not None:
if this_parent.name == "a" and this_parent.get("href"):
with self._use_format(format_tags):
with self._use_hyperlink(this_parent):
return self._extract_text_and_hyperlink_recursively(
item, ignore_list
)
this_parent = this_parent.parent
if isinstance(item, PreformattedString):
return AnnotatedTextList()
if isinstance(item, NavigableString):
text = item.strip()
code = any(code_tag in self.format_tags for code_tag in _CODE_TAG_SET)
if text:
return AnnotatedTextList(
[
AnnotatedText(
text=text,
hyperlink=self.hyperlink,
formatting=self._formatting,
code=code,
)
]
)
if keep_newlines and item.strip("\n\r") == "":
return AnnotatedTextList(
[
AnnotatedText(
text="\n",
hyperlink=self.hyperlink,
formatting=self._formatting,
code=code,
)
]
)
return AnnotatedTextList()
tag = cast(Tag, item)
if not ignore_list or (tag.name not in ["ul", "ol"]):
for child in tag:
if isinstance(child, Tag) and child.name in _FORMAT_TAG_MAP:
with self._use_format([child.name]):
result.extend(
self._extract_text_and_hyperlink_recursively(
child, ignore_list, keep_newlines=keep_newlines
)
)
elif isinstance(child, Tag) and child.name == "a":
with self._use_hyperlink(child):
result.extend(
self._extract_text_and_hyperlink_recursively(
child, ignore_list, keep_newlines=keep_newlines
)
)
else:
# Recursively get the child's text content
result.extend(
self._extract_text_and_hyperlink_recursively(
child, ignore_list, keep_newlines=keep_newlines
)
)
return result
@contextmanager
def _use_hyperlink(self, tag: Tag):
old_hyperlink: Union[AnyUrl, Path, None] = None
new_hyperlink: Union[AnyUrl, Path, None] = None
this_href = tag.get("href")
if this_href is None:
yield None
else:
if isinstance(this_href, str) and this_href:
old_hyperlink = self.hyperlink
this_href = self._resolve_relative_path(this_href)
# ugly fix for relative links since pydantic does not support them.
try:
new_hyperlink = AnyUrl(this_href)
except ValidationError:
new_hyperlink = Path(this_href)
self.hyperlink = new_hyperlink
try:
yield None
finally:
if new_hyperlink:
self.hyperlink = old_hyperlink
@contextmanager
def _use_format(self, tags: list[str]):
if not tags:
yield None
else:
self.format_tags.extend(tags)
try:
yield None
finally:
self.format_tags = self.format_tags[: -len(tags)]
@contextmanager
def _use_inline_group(
self, annotated_text_list: AnnotatedTextList, doc: DoclingDocument
):
"""Create an inline group for annotated texts.
Checks if annotated_text_list has more than one item and if so creates an inline
group in which the text elements can then be generated. While the context manager
is active the inline group is set as the current parent.
Args:
annotated_text_list (AnnotatedTextList): Annotated text
doc (DoclingDocument): Currently used document
"""
if len(annotated_text_list) > 1:
inline_fmt = doc.add_group(
label=GroupLabel.INLINE,
parent=self.parents[self.level],
content_layer=self.content_layer,
)
self.parents[self.level + 1] = inline_fmt
self.level += 1
try:
yield None
finally:
self.parents[self.level] = None
self.level -= 1
else:
yield None
@contextmanager
def _use_details(self, tag: Tag, doc: DoclingDocument):
"""Create a group with the content of a details tag.
While the context manager is active, the hierarchy level is set one
level higher as the cuurent parent.
Args:
tag: The details tag.
doc: Currently used document.
"""
self.parents[self.level + 1] = doc.add_group(
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | true |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/backend/asciidoc_backend.py | docling/backend/asciidoc_backend.py | import logging
import re
from io import BytesIO
from pathlib import Path
from typing import Final, Union
from docling_core.types.doc import (
DocItemLabel,
DoclingDocument,
DocumentOrigin,
GroupItem,
GroupLabel,
ImageRef,
Size,
TableCell,
TableData,
)
from docling.backend.abstract_backend import DeclarativeDocumentBackend
from docling.datamodel.base_models import InputFormat
from docling.datamodel.document import InputDocument
_log = logging.getLogger(__name__)
DEFAULT_IMAGE_WIDTH: Final = 128
DEFAULT_IMAGE_HEIGHT: Final = 128
class AsciiDocBackend(DeclarativeDocumentBackend):
def __init__(self, in_doc: "InputDocument", path_or_stream: Union[BytesIO, Path]):
super().__init__(in_doc, path_or_stream)
self.path_or_stream = path_or_stream
try:
if isinstance(self.path_or_stream, BytesIO):
text_stream = self.path_or_stream.getvalue().decode("utf-8")
self.lines = text_stream.split("\n")
if isinstance(self.path_or_stream, Path):
with open(self.path_or_stream, encoding="utf-8") as f:
self.lines = f.readlines()
self.valid = True
except Exception as e:
raise RuntimeError(
f"Could not initialize AsciiDoc backend for file with hash {self.document_hash}."
) from e
return
def is_valid(self) -> bool:
return self.valid
@classmethod
def supports_pagination(cls) -> bool:
return False
def unload(self):
return
@classmethod
def supported_formats(cls) -> set[InputFormat]:
return {InputFormat.ASCIIDOC}
def convert(self) -> DoclingDocument:
"""
Parses the ASCII into a structured document model.
"""
origin = DocumentOrigin(
filename=self.file.name or "file",
mimetype="text/asciidoc",
binary_hash=self.document_hash,
)
doc = DoclingDocument(name=self.file.stem or "file", origin=origin)
doc = self._parse(doc)
return doc
def _parse(self, doc: DoclingDocument):
"""
Main function that orchestrates the parsing by yielding components:
title, section headers, text, lists, and tables.
"""
in_list = False
in_table = False
text_data: list[str] = []
table_data: list[str] = []
caption_data: list[str] = []
# parents: dict[int, Union[DocItem, GroupItem, None]] = {}
parents: dict[int, Union[GroupItem, None]] = {}
# indents: dict[int, Union[DocItem, GroupItem, None]] = {}
indents: dict[int, Union[GroupItem, None]] = {}
for i in range(10):
parents[i] = None
indents[i] = None
for line in self.lines:
# line = line.strip()
# Title
if self._is_title(line):
item = self._parse_title(line)
level = item["level"]
parents[level] = doc.add_text(
text=item["text"], label=DocItemLabel.TITLE
)
# Section headers
elif self._is_section_header(line):
item = self._parse_section_header(line)
level = item["level"]
parents[level] = doc.add_heading(
text=item["text"], level=item["level"], parent=parents[level - 1]
)
for k, v in parents.items():
if k > level:
parents[k] = None
# Lists
elif self._is_list_item(line):
_log.debug(f"line: {line}")
item = self._parse_list_item(line)
_log.debug(f"parsed list-item: {item}")
level = self._get_current_level(parents)
if not in_list:
in_list = True
parents[level + 1] = doc.add_group(
parent=parents[level], name="list", label=GroupLabel.LIST
)
indents[level + 1] = item["indent"]
elif in_list and item["indent"] > indents[level]:
parents[level + 1] = doc.add_group(
parent=parents[level], name="list", label=GroupLabel.LIST
)
indents[level + 1] = item["indent"]
elif in_list and item["indent"] < indents[level]:
# print(item["indent"], " => ", indents[level])
while item["indent"] < indents[level]:
# print(item["indent"], " => ", indents[level])
parents[level] = None
indents[level] = None
level -= 1
doc.add_list_item(
item["text"], parent=self._get_current_parent(parents)
)
elif in_list and not self._is_list_item(line):
in_list = False
level = self._get_current_level(parents)
parents[level] = None
# Tables
elif line.strip() == "|===" and not in_table: # start of table
in_table = True
elif self._is_table_line(line): # within a table
in_table = True
table_data.append(self._parse_table_line(line))
elif in_table and (
(not self._is_table_line(line)) or line.strip() == "|==="
): # end of table
caption = None
if len(caption_data) > 0:
caption = doc.add_text(
text=" ".join(caption_data), label=DocItemLabel.CAPTION
)
caption_data = []
data = self._populate_table_as_grid(table_data)
doc.add_table(
data=data, parent=self._get_current_parent(parents), caption=caption
)
in_table = False
table_data = []
# Picture
elif self._is_picture(line):
caption = None
if len(caption_data) > 0:
caption = doc.add_text(
text=" ".join(caption_data), label=DocItemLabel.CAPTION
)
caption_data = []
item = self._parse_picture(line)
size: Size
if "width" in item and "height" in item:
size = Size(width=int(item["width"]), height=int(item["height"]))
else:
size = Size(width=DEFAULT_IMAGE_WIDTH, height=DEFAULT_IMAGE_HEIGHT)
uri = None
if (
"uri" in item
and not item["uri"].startswith("http")
and item["uri"].startswith("//")
):
uri = "file:" + item["uri"]
elif (
"uri" in item
and not item["uri"].startswith("http")
and item["uri"].startswith("/")
):
uri = "file:/" + item["uri"]
elif "uri" in item and not item["uri"].startswith("http"):
uri = "file://" + item["uri"]
image = ImageRef(mimetype="image/png", size=size, dpi=70, uri=uri)
doc.add_picture(image=image, caption=caption)
# Caption
elif self._is_caption(line) and len(caption_data) == 0:
item = self._parse_caption(line)
caption_data.append(item["text"])
elif (
len(line.strip()) > 0 and len(caption_data) > 0
): # allow multiline captions
item = self._parse_text(line)
caption_data.append(item["text"])
# Plain text
elif len(line.strip()) == 0 and len(text_data) > 0:
doc.add_text(
text=" ".join(text_data),
label=DocItemLabel.PARAGRAPH,
parent=self._get_current_parent(parents),
)
text_data = []
elif len(line.strip()) > 0: # allow multiline texts
item = self._parse_text(line)
text_data.append(item["text"])
if len(text_data) > 0:
doc.add_text(
text=" ".join(text_data),
label=DocItemLabel.PARAGRAPH,
parent=self._get_current_parent(parents),
)
text_data = []
if in_table and len(table_data) > 0:
data = self._populate_table_as_grid(table_data)
doc.add_table(data=data, parent=self._get_current_parent(parents))
in_table = False
table_data = []
return doc
@staticmethod
def _get_current_level(parents):
for k, v in parents.items():
if v is None and k > 0:
return k - 1
return 0
@staticmethod
def _get_current_parent(parents):
for k, v in parents.items():
if v is None and k > 0:
return parents[k - 1]
return None
# ========= Title
@staticmethod
def _is_title(line):
return re.match(r"^= ", line)
@staticmethod
def _parse_title(line):
return {"type": "title", "text": line[2:].strip(), "level": 0}
# ========= Section headers
@staticmethod
def _is_section_header(line):
return re.match(r"^==+\s+", line)
@staticmethod
def _parse_section_header(line):
match = re.match(r"^(=+)\s+(.*)", line)
marker = match.group(1) # The list marker (e.g., "*", "-", "1.")
text = match.group(2) # The actual text of the list item
header_level = marker.count("=") # number of '=' represents level
return {
"type": "header",
"level": header_level - 1,
"text": text.strip(),
}
# ========= Lists
@staticmethod
def _is_list_item(line):
return re.match(r"^(\s)*(\*|-|\d+\.|\w+\.) ", line)
@staticmethod
def _parse_list_item(line):
"""Extract the item marker (number or bullet symbol) and the text of the item."""
match = re.match(r"^(\s*)(\*|-|\d+\.)\s+(.*)", line)
if match:
indent = match.group(1)
marker = match.group(2) # The list marker (e.g., "*", "-", "1.")
text = match.group(3) # The actual text of the list item
if marker == "*" or marker == "-":
return {
"type": "list_item",
"marker": marker,
"text": text.strip(),
"numbered": False,
"indent": 0 if indent is None else len(indent),
}
else:
return {
"type": "list_item",
"marker": marker,
"text": text.strip(),
"numbered": True,
"indent": 0 if indent is None else len(indent),
}
else:
# Fallback if no match
return {
"type": "list_item",
"marker": "-",
"text": line,
"numbered": False,
"indent": 0,
}
# ========= Tables
@staticmethod
def _is_table_line(line):
return re.match(r"^\|.*\|", line)
@staticmethod
def _parse_table_line(line):
# Split table cells and trim extra spaces
return [cell.strip() for cell in line.split("|") if cell.strip()]
@staticmethod
def _populate_table_as_grid(table_data):
num_rows = len(table_data)
# Adjust the table data into a grid format
num_cols = max(len(row) for row in table_data)
data = TableData(num_rows=num_rows, num_cols=num_cols, table_cells=[])
for row_idx, row in enumerate(table_data):
# Pad rows with empty strings to match column count
# grid.append(row + [''] * (max_cols - len(row)))
for col_idx, text in enumerate(row):
row_span = 1
col_span = 1
cell = TableCell(
text=text,
row_span=row_span,
col_span=col_span,
start_row_offset_idx=row_idx,
end_row_offset_idx=row_idx + row_span,
start_col_offset_idx=col_idx,
end_col_offset_idx=col_idx + col_span,
column_header=row_idx == 0,
row_header=False,
)
data.table_cells.append(cell)
return data
# ========= Pictures
@staticmethod
def _is_picture(line):
return re.match(r"^image::", line)
@staticmethod
def _parse_picture(line):
"""
Parse an image macro, extracting its path and attributes.
Syntax: image::path/to/image.png[Alt Text, width=200, height=150, align=center]
"""
mtch = re.match(r"^image::(.+)\[(.*)\]$", line)
if mtch:
picture_path = mtch.group(1).strip()
attributes = mtch.group(2).split(",")
picture_info = {"type": "picture", "uri": picture_path}
# Extract optional attributes (alt text, width, height, alignment)
if attributes:
picture_info["alt"] = attributes[0].strip() if attributes[0] else ""
for attr in attributes[1:]:
key, value = attr.split("=")
picture_info[key.strip()] = value.strip()
return picture_info
return {"type": "picture", "uri": line}
# ========= Captions
@staticmethod
def _is_caption(line):
return re.match(r"^\.(.+)", line)
@staticmethod
def _parse_caption(line):
mtch = re.match(r"^\.(.+)", line)
if mtch:
text = mtch.group(1)
return {"type": "caption", "text": text}
return {"type": "caption", "text": ""}
# ========= Plain text
@staticmethod
def _parse_text(line):
return {"type": "text", "text": line.strip()}
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/backend/pdf_backend.py | docling/backend/pdf_backend.py | from abc import ABC, abstractmethod
from collections.abc import Iterable
from io import BytesIO
from pathlib import Path
from typing import Optional, Set, Union
from docling_core.types.doc import BoundingBox, Size
from docling_core.types.doc.page import SegmentedPdfPage, TextCell
from PIL import Image
from docling.backend.abstract_backend import PaginatedDocumentBackend
from docling.datamodel.backend_options import PdfBackendOptions
from docling.datamodel.base_models import InputFormat
from docling.datamodel.document import InputDocument
class PdfPageBackend(ABC):
@abstractmethod
def get_text_in_rect(self, bbox: BoundingBox) -> str:
pass
@abstractmethod
def get_segmented_page(self) -> Optional[SegmentedPdfPage]:
pass
@abstractmethod
def get_text_cells(self) -> Iterable[TextCell]:
pass
@abstractmethod
def get_bitmap_rects(self, float: int = 1) -> Iterable[BoundingBox]:
pass
@abstractmethod
def get_page_image(
self, scale: float = 1, cropbox: Optional[BoundingBox] = None
) -> Image.Image:
pass
@abstractmethod
def get_size(self) -> Size:
pass
@abstractmethod
def is_valid(self) -> bool:
pass
@abstractmethod
def unload(self):
pass
class PdfDocumentBackend(PaginatedDocumentBackend):
def __init__(
self,
in_doc: InputDocument,
path_or_stream: Union[BytesIO, Path],
options: PdfBackendOptions = PdfBackendOptions(),
):
super().__init__(in_doc, path_or_stream, options)
self.options: PdfBackendOptions
if self.input_format not in self.supported_formats():
raise RuntimeError(
f"Incompatible file format {self.input_format} was passed to a PdfDocumentBackend. Valid format are {','.join(self.supported_formats())}."
)
@abstractmethod
def load_page(self, page_no: int) -> PdfPageBackend:
pass
@abstractmethod
def page_count(self) -> int:
pass
@classmethod
def supported_formats(cls) -> Set[InputFormat]:
return {InputFormat.PDF}
@classmethod
def supports_pagination(cls) -> bool:
return True
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/backend/__init__.py | docling/backend/__init__.py | python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false | |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/backend/mets_gbs_backend.py | docling/backend/mets_gbs_backend.py | """Backend for GBS Google Books schema."""
import logging
import tarfile
from collections.abc import Iterable
from dataclasses import dataclass
from enum import Enum
from io import BytesIO
from pathlib import Path
from typing import TYPE_CHECKING, Dict, List, Optional, Set, Tuple, Union
from docling_core.types.doc import BoundingBox, CoordOrigin, Size
from docling_core.types.doc.page import (
BoundingRectangle,
PdfPageBoundaryType,
PdfPageGeometry,
SegmentedPdfPage,
TextCell,
)
from lxml import etree
from PIL import Image
from PIL.Image import Image as PILImage
from docling.backend.abstract_backend import PaginatedDocumentBackend
from docling.backend.pdf_backend import PdfDocumentBackend, PdfPageBackend
from docling.datamodel.base_models import InputFormat
if TYPE_CHECKING:
from docling.datamodel.document import InputDocument
_log = logging.getLogger(__name__)
def _get_pdf_page_geometry(
size: Size,
) -> PdfPageGeometry:
boundary_type: PdfPageBoundaryType = PdfPageBoundaryType.CROP_BOX
bbox_tuple = (0, 0, size.width, size.height)
bbox = BoundingBox.from_tuple(bbox_tuple, CoordOrigin.TOPLEFT)
return PdfPageGeometry(
angle=0.0,
rect=BoundingRectangle.from_bounding_box(bbox),
boundary_type=boundary_type,
art_bbox=bbox,
bleed_bbox=bbox,
crop_bbox=bbox,
media_bbox=bbox,
trim_bbox=bbox,
)
class MetsGbsPageBackend(PdfPageBackend):
def __init__(self, parsed_page: SegmentedPdfPage, page_im: PILImage):
self._im = page_im
self._dpage = parsed_page
self.valid = parsed_page is not None
def is_valid(self) -> bool:
return self.valid
def get_text_in_rect(self, bbox: BoundingBox) -> str:
# Find intersecting cells on the page
text_piece = ""
page_size = self.get_size()
scale = (
1 # FIX - Replace with param in get_text_in_rect across backends (optional)
)
for i, cell in enumerate(self._dpage.textline_cells):
cell_bbox = (
cell.rect.to_bounding_box()
.to_top_left_origin(page_height=page_size.height)
.scaled(scale)
)
overlap_frac = cell_bbox.intersection_over_self(bbox)
if overlap_frac > 0.5:
if len(text_piece) > 0:
text_piece += " "
text_piece += cell.text
return text_piece
def get_segmented_page(self) -> Optional[SegmentedPdfPage]:
return self._dpage
def get_text_cells(self) -> Iterable[TextCell]:
return self._dpage.textline_cells
def get_bitmap_rects(self, scale: float = 1) -> Iterable[BoundingBox]:
AREA_THRESHOLD = 0 # 32 * 32
images = self._dpage.bitmap_resources
for img in images:
cropbox = img.rect.to_bounding_box().to_top_left_origin(
self.get_size().height
)
if cropbox.area() > AREA_THRESHOLD:
cropbox = cropbox.scaled(scale=scale)
yield cropbox
def get_page_image(
self, scale: float = 1, cropbox: Optional[BoundingBox] = None
) -> Image.Image:
page_size = self.get_size()
assert (
page_size.width == self._im.size[0] and page_size.height == self._im.size[1]
)
if not cropbox:
cropbox = BoundingBox(
l=0,
r=page_size.width,
t=0,
b=page_size.height,
coord_origin=CoordOrigin.TOPLEFT,
)
image = self._im.resize(
size=(round(page_size.width * scale), round(page_size.height * scale))
).crop(cropbox.scaled(scale=scale).as_tuple())
return image
def get_size(self) -> Size:
return Size(
width=self._dpage.dimension.width, height=self._dpage.dimension.height
)
def unload(self) -> None:
if hasattr(self, "_im"):
delattr(self, "_im")
if hasattr(self, "_dpage"):
delattr(self, "_dpage")
class _UseType(str, Enum):
IMAGE = "image"
OCR = "OCR"
COORD_OCR = "coordOCR"
@dataclass
class _FileInfo:
file_id: str
mimetype: str
path: str
use: _UseType
@dataclass
class _PageFiles:
image: Optional[_FileInfo] = None
ocr: Optional[_FileInfo] = None
coordOCR: Optional[_FileInfo] = None
def _extract_rect(title_str: str) -> Optional[BoundingRectangle]:
"""
Extracts bbox from title string like 'bbox 279 177 306 214;x_wconf 97'
"""
parts = title_str.split(";")
for part in parts:
part = part.strip()
if part.startswith("bbox "):
try:
coords = part.split()[1:]
rect = BoundingRectangle.from_bounding_box(
bbox=BoundingBox.from_tuple(
tuple(map(int, coords)), origin=CoordOrigin.TOPLEFT
)
)
return rect
except Exception:
return None
return None
def _extract_confidence(title_str) -> float:
"""Extracts x_wconf (OCR confidence) value from title string."""
for part in title_str.split(";"):
part = part.strip()
if part.startswith("x_wconf"):
try:
return float(part.split()[1]) / 100.0
except Exception:
return 1
return 1
class MetsGbsDocumentBackend(PdfDocumentBackend):
def __init__(self, in_doc: "InputDocument", path_or_stream: Union[BytesIO, Path]):
super().__init__(in_doc, path_or_stream)
self._tar: tarfile.TarFile = (
tarfile.open(name=self.path_or_stream, mode="r:gz")
if isinstance(self.path_or_stream, Path)
else tarfile.open(fileobj=self.path_or_stream, mode="r:gz")
)
self.root_mets: Optional[etree._Element] = None
self.page_map: Dict[int, _PageFiles] = {}
for member in self._tar.getmembers():
if member.name.endswith(".xml"):
file = self._tar.extractfile(member)
if file is not None:
content = file.read()
self.root_mets = self._validate_mets_xml(content)
if self.root_mets is not None:
break
if self.root_mets is None:
raise RuntimeError(
f"METS GBS backend could not load document {self.document_hash}."
)
ns = {
"mets": "http://www.loc.gov/METS/",
"xlink": "http://www.w3.org/1999/xlink",
"xsi": "http://www.w3.org/2001/XMLSchema-instance",
"gbs": "http://books.google.com/gbs",
"premis": "info:lc/xmlns/premis-v2",
"marc": "http://www.loc.gov/MARC21/slim",
}
file_info_by_id: Dict[str, _FileInfo] = {}
for filegrp in self.root_mets.xpath(".//mets:fileGrp", namespaces=ns):
use_raw = filegrp.get("USE")
try:
use = _UseType(use_raw)
except ValueError:
continue # Ignore unknown USE types
for file_elem in filegrp.xpath("./mets:file", namespaces=ns):
file_id = file_elem.get("ID")
mimetype = file_elem.get("MIMETYPE")
flocat_elem = file_elem.find("mets:FLocat", namespaces=ns)
href = (
flocat_elem.get("{http://www.w3.org/1999/xlink}href")
if flocat_elem is not None
else None
)
if href is None:
continue
file_info_by_id[file_id] = _FileInfo(
file_id=file_id, mimetype=mimetype, path=href, use=use
)
USE_TO_ATTR = {
_UseType.IMAGE: "image",
_UseType.OCR: "ocr",
_UseType.COORD_OCR: "coordOCR",
}
for div in self.root_mets.xpath('.//mets:div[@TYPE="page"]', namespaces=ns):
order_str = div.get("ORDER")
if not order_str:
continue
try:
page_no = int(order_str) - 1 # make 0-index pages
except ValueError:
continue
page_files = _PageFiles()
for fptr in div.xpath("./mets:fptr", namespaces=ns):
file_id = fptr.get("FILEID")
file_info = file_info_by_id.get(file_id)
if file_info:
attr = USE_TO_ATTR.get(file_info.use)
if attr:
setattr(page_files, attr, file_info)
self.page_map[page_no] = page_files
def _validate_mets_xml(self, xml_string) -> Optional[etree._Element]:
root: etree._Element = etree.fromstring(xml_string)
if (
root.tag == "{http://www.loc.gov/METS/}mets"
and root.get("PROFILE") == "gbs"
):
return root
_log.warning(f"The root element is not <mets:mets> with PROFILE='gbs': {root}")
return None
def _parse_page(self, page_no: int) -> Tuple[SegmentedPdfPage, PILImage]:
# TODO: use better fallbacks...
image_info = self.page_map[page_no].image
assert image_info is not None
ocr_info = self.page_map[page_no].coordOCR
assert ocr_info is not None
image_file = self._tar.extractfile(image_info.path)
assert image_file is not None
buf = BytesIO(image_file.read())
im: PILImage = Image.open(buf)
ocr_file = self._tar.extractfile(ocr_info.path)
assert ocr_file is not None
ocr_content = ocr_file.read()
parser = etree.HTMLParser()
ocr_root: etree._Element = etree.fromstring(ocr_content, parser=parser)
line_cells: List[TextCell] = []
word_cells: List[TextCell] = []
page_div = ocr_root.xpath("//div[@class='ocr_page']")
size = Size(width=im.size[0], height=im.size[1])
if page_div:
title = page_div[0].attrib.get("title", "")
rect = _extract_rect(title)
if rect:
size = Size(width=rect.width, height=rect.height)
else:
_log.error(f"Could not find ocr_page for page {page_no}")
im = im.resize(size=(round(size.width), round(size.height)))
im = im.convert("RGB")
# Extract all ocrx_word spans
for ix, word in enumerate(ocr_root.xpath("//span[@class='ocrx_word']")):
text = "".join(word.itertext()).strip()
title = word.attrib.get("title", "")
rect = _extract_rect(title)
conf = _extract_confidence(title)
if rect:
word_cells.append(
TextCell(
index=ix,
text=text,
orig=text,
rect=rect,
from_ocr=True,
confidence=conf,
)
)
# Extract all ocr_line spans
# line: etree._Element
for ix, line in enumerate(ocr_root.xpath("//span[@class='ocr_line']")):
text = "".join(line.itertext()).strip()
title = line.attrib.get("title", "")
rect = _extract_rect(title)
conf = _extract_confidence(title)
if rect:
line_cells.append(
TextCell(
index=ix,
text=text,
orig=text,
rect=rect,
from_ocr=True,
confidence=conf,
)
)
page = SegmentedPdfPage(
dimension=_get_pdf_page_geometry(size),
textline_cells=line_cells,
char_cells=[],
word_cells=word_cells,
has_textlines=True,
has_words=True,
has_chars=False,
)
return page, im
def page_count(self) -> int:
return len(self.page_map)
def load_page(self, page_no: int) -> MetsGbsPageBackend:
# TODO: is this thread-safe?
page, im = self._parse_page(page_no)
return MetsGbsPageBackend(parsed_page=page, page_im=im)
def is_valid(self) -> bool:
return self.root_mets is not None and self.page_count() > 0
@classmethod
def supported_formats(cls) -> Set[InputFormat]:
return {InputFormat.METS_GBS}
@classmethod
def supports_pagination(cls) -> bool:
return True
def unload(self) -> None:
super().unload()
self._tar.close()
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/backend/csv_backend.py | docling/backend/csv_backend.py | import csv
import logging
import warnings
from io import BytesIO, StringIO
from pathlib import Path
from typing import Set, Union
from docling_core.types.doc import DoclingDocument, DocumentOrigin, TableCell, TableData
from docling.backend.abstract_backend import DeclarativeDocumentBackend
from docling.datamodel.base_models import InputFormat
from docling.datamodel.document import InputDocument
_log = logging.getLogger(__name__)
class CsvDocumentBackend(DeclarativeDocumentBackend):
content: StringIO
def __init__(self, in_doc: "InputDocument", path_or_stream: Union[BytesIO, Path]):
super().__init__(in_doc, path_or_stream)
# Load content
try:
if isinstance(self.path_or_stream, BytesIO):
self.content = StringIO(self.path_or_stream.getvalue().decode("utf-8"))
elif isinstance(self.path_or_stream, Path):
self.content = StringIO(self.path_or_stream.read_text("utf-8"))
self.valid = True
except Exception as e:
raise RuntimeError(
f"CsvDocumentBackend could not load document with hash {self.document_hash}"
) from e
return
def is_valid(self) -> bool:
return self.valid
@classmethod
def supports_pagination(cls) -> bool:
return False
def unload(self):
if isinstance(self.path_or_stream, BytesIO):
self.path_or_stream.close()
self.path_or_stream = None
@classmethod
def supported_formats(cls) -> Set[InputFormat]:
return {InputFormat.CSV}
def convert(self) -> DoclingDocument:
"""
Parses the CSV data into a structured document model.
"""
# Detect CSV dialect
head = self.content.readline()
dialect = csv.Sniffer().sniff(head, ",;\t|:")
_log.info(f'Parsing CSV with delimiter: "{dialect.delimiter}"')
if dialect.delimiter not in {",", ";", "\t", "|", ":"}:
raise RuntimeError(
f"Cannot convert csv with unknown delimiter {dialect.delimiter}."
)
# Parce CSV
self.content.seek(0)
result = csv.reader(self.content, dialect=dialect, strict=True)
self.csv_data = list(result)
_log.info(f"Detected {len(self.csv_data)} lines")
# Ensure uniform column length
expected_length = len(self.csv_data[0])
is_uniform = all(len(row) == expected_length for row in self.csv_data)
if not is_uniform:
warnings.warn(
f"Inconsistent column lengths detected in CSV data. "
f"Expected {expected_length} columns, but found rows with varying lengths. "
f"Ensure all rows have the same number of columns."
)
# Parse the CSV into a structured document model
origin = DocumentOrigin(
filename=self.file.name or "file.csv",
mimetype="text/csv",
binary_hash=self.document_hash,
)
doc = DoclingDocument(name=self.file.stem or "file.csv", origin=origin)
if self.is_valid():
# Convert CSV data to table
if self.csv_data:
num_rows = len(self.csv_data)
num_cols = max(len(row) for row in self.csv_data)
table_data = TableData(
num_rows=num_rows,
num_cols=num_cols,
table_cells=[],
)
# Convert each cell to TableCell
for row_idx, row in enumerate(self.csv_data):
for col_idx, cell_value in enumerate(row):
cell = TableCell(
text=str(cell_value),
row_span=1, # CSV doesn't support merged cells
col_span=1,
start_row_offset_idx=row_idx,
end_row_offset_idx=row_idx + 1,
start_col_offset_idx=col_idx,
end_col_offset_idx=col_idx + 1,
column_header=row_idx == 0, # First row as header
row_header=False,
)
table_data.table_cells.append(cell)
doc.add_table(data=table_data)
else:
raise RuntimeError(
f"Cannot convert doc with {self.document_hash} because the backend failed to init."
)
return doc
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/backend/webvtt_backend.py | docling/backend/webvtt_backend.py | import logging
import re
from io import BytesIO
from pathlib import Path
from typing import Annotated, ClassVar, Literal, Optional, Union, cast
from docling_core.types.doc import (
ContentLayer,
DocItemLabel,
DoclingDocument,
DocumentOrigin,
Formatting,
GroupLabel,
NodeItem,
)
from pydantic import BaseModel, ConfigDict, Field, field_validator, model_validator
from pydantic.types import StringConstraints
from typing_extensions import Self, override
from docling.backend.abstract_backend import DeclarativeDocumentBackend
from docling.datamodel.base_models import InputFormat
from docling.datamodel.document import InputDocument
_log = logging.getLogger(__name__)
class _WebVTTTimestamp(BaseModel):
"""Model representing a WebVTT timestamp.
A WebVTT timestamp is always interpreted relative to the current playback position
of the media data that the WebVTT file is to be synchronized with.
"""
model_config = ConfigDict(regex_engine="python-re")
raw: Annotated[
str,
Field(
description="A representation of the WebVTT Timestamp as a single string"
),
]
_pattern: ClassVar[re.Pattern] = re.compile(
r"^(?:(\d{2,}):)?([0-5]\d):([0-5]\d)\.(\d{3})$"
)
_hours: int
_minutes: int
_seconds: int
_millis: int
@model_validator(mode="after")
def validate_raw(self) -> Self:
m = self._pattern.match(self.raw)
if not m:
raise ValueError(f"Invalid WebVTT timestamp format: {self.raw}")
self._hours = int(m.group(1)) if m.group(1) else 0
self._minutes = int(m.group(2))
self._seconds = int(m.group(3))
self._millis = int(m.group(4))
if self._minutes < 0 or self._minutes > 59:
raise ValueError("Minutes must be between 0 and 59")
if self._seconds < 0 or self._seconds > 59:
raise ValueError("Seconds must be between 0 and 59")
return self
@property
def seconds(self) -> float:
"""A representation of the WebVTT Timestamp in seconds"""
return (
self._hours * 3600
+ self._minutes * 60
+ self._seconds
+ self._millis / 1000.0
)
@override
def __str__(self) -> str:
return self.raw
_WebVTTCueIdentifier = Annotated[
str, StringConstraints(strict=True, pattern=r"^(?!.*-->)[^\n\r]+$")
]
class _WebVTTCueTimings(BaseModel):
"""Model representating WebVTT cue timings."""
start: Annotated[
_WebVTTTimestamp, Field(description="Start time offset of the cue")
]
end: Annotated[_WebVTTTimestamp, Field(description="End time offset of the cue")]
@model_validator(mode="after")
def check_order(self) -> Self:
if self.start and self.end:
if self.end.seconds <= self.start.seconds:
raise ValueError("End timestamp must be greater than start timestamp")
return self
@override
def __str__(self):
return f"{self.start} --> {self.end}"
class _WebVTTCueTextSpan(BaseModel):
"""Model representing a WebVTT cue text span."""
text: str
span_type: Literal["text"] = "text"
@field_validator("text", mode="after")
@classmethod
def validate_text(cls, value: str) -> str:
if any(ch in value for ch in {"\n", "\r", "&", "<"}):
raise ValueError("Cue text span contains invalid characters")
if len(value) == 0:
raise ValueError("Cue text span cannot be empty")
return value
@override
def __str__(self):
return self.text
class _WebVTTCueVoiceSpan(BaseModel):
"""Model representing a WebVTT cue voice span."""
annotation: Annotated[
str,
Field(
description=(
"Cue span start tag annotation text representing the name of thevoice"
)
),
]
classes: Annotated[
list[str],
Field(description="List of classes representing the cue span's significance"),
] = []
components: Annotated[
list["_WebVTTCueComponent"],
Field(description="The components representing the cue internal text"),
] = []
span_type: Literal["v"] = "v"
@field_validator("annotation", mode="after")
@classmethod
def validate_annotation(cls, value: str) -> str:
if any(ch in value for ch in {"\n", "\r", "&", ">"}):
raise ValueError(
"Cue span start tag annotation contains invalid characters"
)
if not value:
raise ValueError("Cue text span cannot be empty")
return value
@field_validator("classes", mode="after")
@classmethod
def validate_classes(cls, value: list[str]) -> list[str]:
for item in value:
if any(ch in item for ch in {"\t", "\n", "\r", " ", "&", "<", ">", "."}):
raise ValueError(
"A cue span start tag class contains invalid characters"
)
if not item:
raise ValueError("Cue span start tag classes cannot be empty")
return value
@override
def __str__(self):
tag = f"v.{'.'.join(self.classes)}" if self.classes else "v"
inner = "".join(str(span) for span in self.components)
return f"<{tag} {self.annotation}>{inner}</v>"
class _WebVTTCueClassSpan(BaseModel):
span_type: Literal["c"] = "c"
components: list["_WebVTTCueComponent"]
@override
def __str__(self):
inner = "".join(str(span) for span in self.components)
return f"<c>{inner}</c>"
class _WebVTTCueItalicSpan(BaseModel):
span_type: Literal["i"] = "i"
components: list["_WebVTTCueComponent"]
@override
def __str__(self):
inner = "".join(str(span) for span in self.components)
return f"<i>{inner}</i>"
class _WebVTTCueBoldSpan(BaseModel):
span_type: Literal["b"] = "b"
components: list["_WebVTTCueComponent"]
@override
def __str__(self):
inner = "".join(str(span) for span in self.components)
return f"<b>{inner}</b>"
class _WebVTTCueUnderlineSpan(BaseModel):
span_type: Literal["u"] = "u"
components: list["_WebVTTCueComponent"]
@override
def __str__(self):
inner = "".join(str(span) for span in self.components)
return f"<u>{inner}</u>"
_WebVTTCueComponent = Annotated[
Union[
_WebVTTCueTextSpan,
_WebVTTCueClassSpan,
_WebVTTCueItalicSpan,
_WebVTTCueBoldSpan,
_WebVTTCueUnderlineSpan,
_WebVTTCueVoiceSpan,
],
Field(discriminator="span_type", description="The WebVTT cue component"),
]
class _WebVTTCueBlock(BaseModel):
"""Model representing a WebVTT cue block.
The optional WebVTT cue settings list is not supported.
The cue payload is limited to the following spans: text, class, italic, bold,
underline, and voice.
"""
model_config = ConfigDict(regex_engine="python-re")
identifier: Optional[_WebVTTCueIdentifier] = Field(
None, description="The WebVTT cue identifier"
)
timings: Annotated[_WebVTTCueTimings, Field(description="The WebVTT cue timings")]
payload: Annotated[list[_WebVTTCueComponent], Field(description="The cue payload")]
_pattern_block: ClassVar[re.Pattern] = re.compile(
r"<(/?)(i|b|c|u|v(?:\.[^\t\n\r &<>.]+)*)(?:\s+([^>]*))?>"
)
_pattern_voice_tag: ClassVar[re.Pattern] = re.compile(
r"^<v(?P<class>\.[^\t\n\r &<>]+)?" # zero or more classes
r"[ \t]+(?P<annotation>[^\n\r&>]+)>" # required space and annotation
)
@field_validator("payload", mode="after")
@classmethod
def validate_payload(cls, payload):
for voice in payload:
if "-->" in str(voice):
raise ValueError("Cue payload must not contain '-->'")
return payload
@classmethod
def parse(cls, raw: str) -> "_WebVTTCueBlock":
lines = raw.strip().splitlines()
if not lines:
raise ValueError("Cue block must have at least one line")
identifier: Optional[_WebVTTCueIdentifier] = None
timing_line = lines[0]
if "-->" not in timing_line and len(lines) > 1:
identifier = timing_line
timing_line = lines[1]
cue_lines = lines[2:]
else:
cue_lines = lines[1:]
if "-->" not in timing_line:
raise ValueError("Cue block must contain WebVTT cue timings")
start, end = [t.strip() for t in timing_line.split("-->")]
end = re.split(" |\t", end)[0] # ignore the cue settings list
timings: _WebVTTCueTimings = _WebVTTCueTimings(
start=_WebVTTTimestamp(raw=start), end=_WebVTTTimestamp(raw=end)
)
cue_text = " ".join(cue_lines).strip()
if cue_text.startswith("<v") and "</v>" not in cue_text:
# adding close tag for cue voice spans without end tag
cue_text += "</v>"
stack: list[list[_WebVTTCueComponent]] = [[]]
tag_stack: list[Union[str, tuple]] = []
pos = 0
matches = list(cls._pattern_block.finditer(cue_text))
i = 0
while i < len(matches):
match = matches[i]
if match.start() > pos:
stack[-1].append(_WebVTTCueTextSpan(text=cue_text[pos : match.start()]))
tag = match.group(0)
if tag.startswith(("<i>", "<b>", "<u>", "<c>")):
tag_type = tag[1:2]
tag_stack.append(tag_type)
stack.append([])
elif tag == "</i>":
children = stack.pop()
stack[-1].append(_WebVTTCueItalicSpan(components=children))
tag_stack.pop()
elif tag == "</b>":
children = stack.pop()
stack[-1].append(_WebVTTCueBoldSpan(components=children))
tag_stack.pop()
elif tag == "</u>":
children = stack.pop()
stack[-1].append(_WebVTTCueUnderlineSpan(components=children))
tag_stack.pop()
elif tag == "</c>":
children = stack.pop()
stack[-1].append(_WebVTTCueClassSpan(components=children))
tag_stack.pop()
elif tag.startswith("<v"):
tag_stack.append(("v", tag))
stack.append([])
elif tag.startswith("</v"):
children = stack.pop() if stack else []
if (
tag_stack
and isinstance(tag_stack[-1], tuple)
and tag_stack[-1][0] == "v"
):
_, voice = cast(tuple, tag_stack.pop())
voice_match = cls._pattern_voice_tag.match(voice)
if voice_match:
class_string = voice_match.group("class")
annotation = voice_match.group("annotation")
if annotation:
classes: list[str] = []
if class_string:
classes = [c for c in class_string.split(".") if c]
stack[-1].append(
_WebVTTCueVoiceSpan(
annotation=annotation.strip(),
classes=classes,
components=children,
)
)
pos = match.end()
i += 1
if pos < len(cue_text):
stack[-1].append(_WebVTTCueTextSpan(text=cue_text[pos:]))
return cls(
identifier=identifier,
timings=timings,
payload=stack[0],
)
def __str__(self):
parts = []
if self.identifier:
parts.append(f"{self.identifier}\n")
timings_line = str(self.timings)
parts.append(timings_line + "\n")
for idx, span in enumerate(self.payload):
if idx == 0 and len(self.payload) == 1 and span.span_type == "v":
# the end tag may be omitted for brevity
parts.append(str(span).removesuffix("</v>"))
else:
parts.append(str(span))
return "".join(parts)
class _WebVTTFile(BaseModel):
"""A model representing a WebVTT file."""
cue_blocks: list[_WebVTTCueBlock]
@staticmethod
def verify_signature(content: str) -> bool:
if not content:
return False
elif len(content) == 6:
return content == "WEBVTT"
elif len(content) > 6 and content.startswith("WEBVTT"):
return content[6] in (" ", "\t", "\n")
else:
return False
@classmethod
def parse(cls, raw: str) -> "_WebVTTFile":
# Normalize newlines to LF
raw = raw.replace("\r\n", "\n").replace("\r", "\n")
# Check WebVTT signature
if not cls.verify_signature(raw):
raise ValueError("Invalid WebVTT file signature")
# Strip "WEBVTT" header line
lines = raw.split("\n", 1)
body = lines[1] if len(lines) > 1 else ""
# Remove NOTE/STYLE/REGION blocks
body = re.sub(r"^(NOTE[^\n]*\n(?:.+\n)*?)\n", "", body, flags=re.MULTILINE)
body = re.sub(r"^(STYLE|REGION)(?:.+\n)*?\n", "", body, flags=re.MULTILINE)
# Split into cue blocks
raw_blocks = re.split(r"\n\s*\n", body.strip())
cues: list[_WebVTTCueBlock] = []
for block in raw_blocks:
try:
cues.append(_WebVTTCueBlock.parse(block))
except ValueError as e:
_log.warning(f"Failed to parse cue block:\n{block}\n{e}")
return cls(cue_blocks=cues)
def __iter__(self):
return iter(self.cue_blocks)
def __getitem__(self, idx):
return self.cue_blocks[idx]
def __len__(self):
return len(self.cue_blocks)
class WebVTTDocumentBackend(DeclarativeDocumentBackend):
"""Declarative backend for WebVTT (.vtt) files.
This parser reads the content of a WebVTT file and converts
it to a DoclingDocument, following the W3C specs on https://www.w3.org/TR/webvtt1
Each cue becomes a TextItem and the items are appended to the
document body by the cue's start time.
"""
@override
def __init__(self, in_doc: InputDocument, path_or_stream: Union[BytesIO, Path]):
super().__init__(in_doc, path_or_stream)
self.content: str = ""
try:
if isinstance(self.path_or_stream, BytesIO):
self.content = self.path_or_stream.getvalue().decode("utf-8")
if isinstance(self.path_or_stream, Path):
with open(self.path_or_stream, encoding="utf-8") as f:
self.content = f.read()
except Exception as e:
raise RuntimeError(
"Could not initialize the WebVTT backend for file with hash "
f"{self.document_hash}."
) from e
@override
def is_valid(self) -> bool:
return _WebVTTFile.verify_signature(self.content)
@classmethod
@override
def supports_pagination(cls) -> bool:
return False
@override
def unload(self):
if isinstance(self.path_or_stream, BytesIO):
self.path_or_stream.close()
self.path_or_stream = None
@classmethod
@override
def supported_formats(cls) -> set[InputFormat]:
return {InputFormat.VTT}
@staticmethod
def _add_text_from_component(
doc: DoclingDocument, item: _WebVTTCueComponent, parent: Optional[NodeItem]
) -> None:
"""Adds a TextItem to a document by extracting text from a cue span component.
TODO: address nesting
"""
formatting = Formatting()
text = ""
if isinstance(item, _WebVTTCueItalicSpan):
formatting.italic = True
elif isinstance(item, _WebVTTCueBoldSpan):
formatting.bold = True
elif isinstance(item, _WebVTTCueUnderlineSpan):
formatting.underline = True
if isinstance(item, _WebVTTCueTextSpan):
text = item.text
else:
# TODO: address nesting
text = "".join(
[t.text for t in item.components if isinstance(t, _WebVTTCueTextSpan)]
)
if text := text.strip():
doc.add_text(
label=DocItemLabel.TEXT,
text=text,
parent=parent,
content_layer=ContentLayer.BODY,
formatting=formatting,
)
@override
def convert(self) -> DoclingDocument:
_log.debug("Starting WebVTT conversion...")
if not self.is_valid():
raise RuntimeError("Invalid WebVTT document.")
origin = DocumentOrigin(
filename=self.file.name or "file",
mimetype="text/vtt",
binary_hash=self.document_hash,
)
doc = DoclingDocument(name=self.file.stem or "file", origin=origin)
vtt: _WebVTTFile = _WebVTTFile.parse(self.content)
for block in vtt.cue_blocks:
block_group = doc.add_group(
label=GroupLabel.SECTION,
name="WebVTT cue block",
parent=None,
content_layer=ContentLayer.BODY,
)
if block.identifier:
doc.add_text(
label=DocItemLabel.TEXT,
text=str(block.identifier),
parent=block_group,
content_layer=ContentLayer.BODY,
)
doc.add_text(
label=DocItemLabel.TEXT,
text=str(block.timings),
parent=block_group,
content_layer=ContentLayer.BODY,
)
for cue_span in block.payload:
if isinstance(cue_span, _WebVTTCueVoiceSpan):
voice_group = doc.add_group(
label=GroupLabel.INLINE,
name="WebVTT cue voice span",
parent=block_group,
content_layer=ContentLayer.BODY,
)
voice = cue_span.annotation
if classes := cue_span.classes:
voice += f" ({', '.join(classes)})"
voice += ": "
doc.add_text(
label=DocItemLabel.TEXT,
text=voice,
parent=voice_group,
content_layer=ContentLayer.BODY,
)
for item in cue_span.components:
WebVTTDocumentBackend._add_text_from_component(
doc, item, voice_group
)
else:
WebVTTDocumentBackend._add_text_from_component(
doc, cue_span, block_group
)
return doc
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/backend/pypdfium2_backend.py | docling/backend/pypdfium2_backend.py | import logging
import random
from collections.abc import Iterable
from io import BytesIO
from pathlib import Path
from typing import TYPE_CHECKING, List, Optional, Union
import pypdfium2 as pdfium
import pypdfium2.raw as pdfium_c
from docling_core.types.doc import BoundingBox, CoordOrigin, Size
from docling_core.types.doc.page import (
BoundingRectangle,
PdfPageBoundaryType,
PdfPageGeometry,
SegmentedPdfPage,
TextCell,
)
from PIL import Image, ImageDraw
from pypdfium2 import PdfTextPage
from pypdfium2._helpers.misc import PdfiumError
from docling.backend.pdf_backend import PdfDocumentBackend, PdfPageBackend
from docling.datamodel.backend_options import PdfBackendOptions
from docling.utils.locks import pypdfium2_lock
def get_pdf_page_geometry(
ppage: pdfium.PdfPage,
angle: float = 0.0,
boundary_type: PdfPageBoundaryType = PdfPageBoundaryType.CROP_BOX,
) -> PdfPageGeometry:
"""
Create PdfPageGeometry from a pypdfium2 PdfPage object.
Args:
ppage: pypdfium2 PdfPage object
angle: Page rotation angle in degrees (default: 0.0)
boundary_type: The boundary type for the page (default: CROP_BOX)
Returns:
PdfPageGeometry with all the different bounding boxes properly set
"""
with pypdfium2_lock:
# Get the main bounding box (intersection of crop_box and media_box)
bbox_tuple = ppage.get_bbox()
bbox = BoundingBox.from_tuple(bbox_tuple, CoordOrigin.BOTTOMLEFT)
# Get all the different page boxes from pypdfium2
media_box_tuple = ppage.get_mediabox()
crop_box_tuple = ppage.get_cropbox()
art_box_tuple = ppage.get_artbox()
bleed_box_tuple = ppage.get_bleedbox()
trim_box_tuple = ppage.get_trimbox()
# Convert to BoundingBox objects using existing from_tuple method
# pypdfium2 returns (x0, y0, x1, y1) in PDF coordinate system (bottom-left origin)
# Use bbox as fallback when specific box types are not defined
media_bbox = (
BoundingBox.from_tuple(media_box_tuple, CoordOrigin.BOTTOMLEFT)
if media_box_tuple
else bbox
)
crop_bbox = (
BoundingBox.from_tuple(crop_box_tuple, CoordOrigin.BOTTOMLEFT)
if crop_box_tuple
else bbox
)
art_bbox = (
BoundingBox.from_tuple(art_box_tuple, CoordOrigin.BOTTOMLEFT)
if art_box_tuple
else bbox
)
bleed_bbox = (
BoundingBox.from_tuple(bleed_box_tuple, CoordOrigin.BOTTOMLEFT)
if bleed_box_tuple
else bbox
)
trim_bbox = (
BoundingBox.from_tuple(trim_box_tuple, CoordOrigin.BOTTOMLEFT)
if trim_box_tuple
else bbox
)
return PdfPageGeometry(
angle=angle,
rect=BoundingRectangle.from_bounding_box(bbox),
boundary_type=boundary_type,
art_bbox=art_bbox,
bleed_bbox=bleed_bbox,
crop_bbox=crop_bbox,
media_bbox=media_bbox,
trim_bbox=trim_bbox,
)
if TYPE_CHECKING:
from docling.datamodel.document import InputDocument
_log = logging.getLogger(__name__)
class PyPdfiumPageBackend(PdfPageBackend):
def __init__(
self, pdfium_doc: pdfium.PdfDocument, document_hash: str, page_no: int
):
# Note: lock applied by the caller
self.valid = True # No better way to tell from pypdfium.
try:
self._ppage: pdfium.PdfPage = pdfium_doc[page_no]
except PdfiumError:
_log.info(
f"An exception occurred when loading page {page_no} of document {document_hash}.",
exc_info=True,
)
self.valid = False
self.text_page: Optional[PdfTextPage] = None
def is_valid(self) -> bool:
return self.valid
def _compute_text_cells(self) -> List[TextCell]:
"""Compute text cells from pypdfium."""
with pypdfium2_lock:
if not self.text_page:
self.text_page = self._ppage.get_textpage()
cells = []
cell_counter = 0
page_size = self.get_size()
with pypdfium2_lock:
for i in range(self.text_page.count_rects()):
rect = self.text_page.get_rect(i)
text_piece = self.text_page.get_text_bounded(*rect)
x0, y0, x1, y1 = rect
cells.append(
TextCell(
index=cell_counter,
text=text_piece,
orig=text_piece,
from_ocr=False,
rect=BoundingRectangle.from_bounding_box(
BoundingBox(
l=x0,
b=y0,
r=x1,
t=y1,
coord_origin=CoordOrigin.BOTTOMLEFT,
)
).to_top_left_origin(page_size.height),
)
)
cell_counter += 1
# PyPdfium2 produces very fragmented cells, with sub-word level boundaries, in many PDFs.
# The cell merging code below is to clean this up.
def merge_horizontal_cells(
cells: List[TextCell],
horizontal_threshold_factor: float = 1.0,
vertical_threshold_factor: float = 0.5,
) -> List[TextCell]:
if not cells:
return []
def group_rows(cells: List[TextCell]) -> List[List[TextCell]]:
rows = []
current_row = [cells[0]]
row_top = cells[0].rect.to_bounding_box().t
row_bottom = cells[0].rect.to_bounding_box().b
row_height = cells[0].rect.to_bounding_box().height
for cell in cells[1:]:
vertical_threshold = row_height * vertical_threshold_factor
if (
abs(cell.rect.to_bounding_box().t - row_top)
<= vertical_threshold
and abs(cell.rect.to_bounding_box().b - row_bottom)
<= vertical_threshold
):
current_row.append(cell)
row_top = min(row_top, cell.rect.to_bounding_box().t)
row_bottom = max(row_bottom, cell.rect.to_bounding_box().b)
row_height = row_bottom - row_top
else:
rows.append(current_row)
current_row = [cell]
row_top = cell.rect.to_bounding_box().t
row_bottom = cell.rect.to_bounding_box().b
row_height = cell.rect.to_bounding_box().height
if current_row:
rows.append(current_row)
return rows
def merge_row(row: List[TextCell]) -> List[TextCell]:
merged = []
current_group = [row[0]]
for cell in row[1:]:
prev_cell = current_group[-1]
avg_height = (
prev_cell.rect.height + cell.rect.to_bounding_box().height
) / 2
if (
cell.rect.to_bounding_box().l
- prev_cell.rect.to_bounding_box().r
<= avg_height * horizontal_threshold_factor
):
current_group.append(cell)
else:
merged.append(merge_group(current_group))
current_group = [cell]
if current_group:
merged.append(merge_group(current_group))
return merged
def merge_group(group: List[TextCell]) -> TextCell:
if len(group) == 1:
return group[0]
merged_bbox = BoundingBox(
l=min(cell.rect.to_bounding_box().l for cell in group),
t=min(cell.rect.to_bounding_box().t for cell in group),
r=max(cell.rect.to_bounding_box().r for cell in group),
b=max(cell.rect.to_bounding_box().b for cell in group),
)
assert self.text_page is not None
bbox = merged_bbox.to_bottom_left_origin(page_size.height)
with pypdfium2_lock:
merged_text = self.text_page.get_text_bounded(*bbox.as_tuple())
return TextCell(
index=group[0].index,
text=merged_text,
orig=merged_text,
rect=BoundingRectangle.from_bounding_box(merged_bbox),
from_ocr=False,
)
rows = group_rows(cells)
merged_cells = [cell for row in rows for cell in merge_row(row)]
for i, cell in enumerate(merged_cells, 1):
cell.index = i
return merged_cells
return merge_horizontal_cells(cells)
def get_bitmap_rects(self, scale: float = 1) -> Iterable[BoundingBox]:
AREA_THRESHOLD = 0 # 32 * 32
page_size = self.get_size()
with pypdfium2_lock:
rotation = self._ppage.get_rotation()
for obj in self._ppage.get_objects(filter=[pdfium_c.FPDF_PAGEOBJ_IMAGE]):
pos = obj.get_pos()
if rotation == 90:
pos = (
pos[1],
page_size.height - pos[2],
pos[3],
page_size.height - pos[0],
)
elif rotation == 180:
pos = (
page_size.width - pos[2],
page_size.height - pos[3],
page_size.width - pos[0],
page_size.height - pos[1],
)
elif rotation == 270:
pos = (
page_size.width - pos[3],
pos[0],
page_size.width - pos[1],
pos[2],
)
cropbox = BoundingBox.from_tuple(
pos, origin=CoordOrigin.BOTTOMLEFT
).to_top_left_origin(page_height=page_size.height)
if cropbox.area() > AREA_THRESHOLD:
cropbox = cropbox.scaled(scale=scale)
yield cropbox
def get_text_in_rect(self, bbox: BoundingBox) -> str:
with pypdfium2_lock:
if not self.text_page:
self.text_page = self._ppage.get_textpage()
if bbox.coord_origin != CoordOrigin.BOTTOMLEFT:
bbox = bbox.to_bottom_left_origin(self.get_size().height)
with pypdfium2_lock:
text_piece = self.text_page.get_text_bounded(*bbox.as_tuple())
return text_piece
def get_segmented_page(self) -> Optional[SegmentedPdfPage]:
if not self.valid:
return None
text_cells = self._compute_text_cells()
# Get the PDF page geometry from pypdfium2
dimension = get_pdf_page_geometry(self._ppage)
# Create SegmentedPdfPage
return SegmentedPdfPage(
dimension=dimension,
textline_cells=text_cells,
char_cells=[],
word_cells=[],
has_textlines=len(text_cells) > 0,
has_words=False,
has_chars=False,
)
def get_text_cells(self) -> Iterable[TextCell]:
return self._compute_text_cells()
def get_page_image(
self, scale: float = 1, cropbox: Optional[BoundingBox] = None
) -> Image.Image:
page_size = self.get_size()
if not cropbox:
cropbox = BoundingBox(
l=0,
r=page_size.width,
t=0,
b=page_size.height,
coord_origin=CoordOrigin.TOPLEFT,
)
padbox = BoundingBox(
l=0, r=0, t=0, b=0, coord_origin=CoordOrigin.BOTTOMLEFT
)
else:
padbox = cropbox.to_bottom_left_origin(page_size.height).model_copy()
padbox.r = page_size.width - padbox.r
padbox.t = page_size.height - padbox.t
with pypdfium2_lock:
image = (
self._ppage.render(
scale=scale * 1.5,
rotation=0, # no additional rotation
crop=padbox.as_tuple(),
)
.to_pil()
.resize(
size=(round(cropbox.width * scale), round(cropbox.height * scale))
)
) # We resize the image from 1.5x the given scale to make it sharper.
return image
def get_size(self) -> Size:
with pypdfium2_lock:
return Size(width=self._ppage.get_width(), height=self._ppage.get_height())
def unload(self):
self._ppage = None
self.text_page = None
class PyPdfiumDocumentBackend(PdfDocumentBackend):
def __init__(
self,
in_doc: "InputDocument",
path_or_stream: Union[BytesIO, Path],
options: PdfBackendOptions = PdfBackendOptions(),
):
super().__init__(in_doc, path_or_stream, options)
password = (
self.options.password.get_secret_value() if self.options.password else None
)
try:
with pypdfium2_lock:
self._pdoc = pdfium.PdfDocument(self.path_or_stream, password=password)
except PdfiumError as e:
raise RuntimeError(
f"pypdfium could not load document with hash {self.document_hash}"
) from e
def page_count(self) -> int:
with pypdfium2_lock:
return len(self._pdoc)
def load_page(self, page_no: int) -> PyPdfiumPageBackend:
with pypdfium2_lock:
return PyPdfiumPageBackend(self._pdoc, self.document_hash, page_no)
def is_valid(self) -> bool:
return self.page_count() > 0
def unload(self):
super().unload()
with pypdfium2_lock:
self._pdoc.close()
self._pdoc = None
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/backend/mspowerpoint_backend.py | docling/backend/mspowerpoint_backend.py | import logging
from io import BytesIO
from pathlib import Path
from typing import Union
from docling_core.types.doc import (
BoundingBox,
CoordOrigin,
DocItemLabel,
DoclingDocument,
DocumentOrigin,
GroupLabel,
ImageRef,
ProvenanceItem,
Size,
TableCell,
TableData,
)
from docling_core.types.doc.document import ContentLayer
from PIL import Image, UnidentifiedImageError
from pptx import Presentation
from pptx.enum.shapes import MSO_SHAPE_TYPE, PP_PLACEHOLDER
from pptx.oxml.text import CT_TextLineBreak
from docling.backend.abstract_backend import (
DeclarativeDocumentBackend,
PaginatedDocumentBackend,
)
from docling.datamodel.base_models import InputFormat
from docling.datamodel.document import InputDocument
_log = logging.getLogger(__name__)
class MsPowerpointDocumentBackend(DeclarativeDocumentBackend, PaginatedDocumentBackend):
def __init__(self, in_doc: "InputDocument", path_or_stream: Union[BytesIO, Path]):
super().__init__(in_doc, path_or_stream)
self.namespaces = {
"a": "http://schemas.openxmlformats.org/drawingml/2006/main",
"c": "http://schemas.openxmlformats.org/drawingml/2006/chart",
"p": "http://schemas.openxmlformats.org/presentationml/2006/main",
}
# Powerpoint file:
self.path_or_stream = path_or_stream
self.pptx_obj = None
self.valid = False
try:
if isinstance(self.path_or_stream, BytesIO):
self.pptx_obj = Presentation(self.path_or_stream)
elif isinstance(self.path_or_stream, Path):
self.pptx_obj = Presentation(str(self.path_or_stream))
self.valid = True
except Exception as e:
raise RuntimeError(
f"MsPowerpointDocumentBackend could not load document with hash {self.document_hash}"
) from e
return
def page_count(self) -> int:
if self.is_valid():
assert self.pptx_obj is not None
return len(self.pptx_obj.slides)
else:
return 0
def is_valid(self) -> bool:
return self.valid
@classmethod
def supports_pagination(cls) -> bool:
return True # True? if so, how to handle pages...
def unload(self):
if isinstance(self.path_or_stream, BytesIO):
self.path_or_stream.close()
self.path_or_stream = None
@classmethod
def supported_formats(cls) -> set[InputFormat]:
return {InputFormat.PPTX}
def convert(self) -> DoclingDocument:
# Parses the PPTX into a structured document model.
# origin = DocumentOrigin(filename=self.path_or_stream.name, mimetype=next(iter(FormatToMimeType.get(InputFormat.PPTX))), binary_hash=self.document_hash)
origin = DocumentOrigin(
filename=self.file.name or "file",
mimetype="application/vnd.ms-powerpoint",
binary_hash=self.document_hash,
)
doc = DoclingDocument(
name=self.file.stem or "file", origin=origin
) # must add origin information
doc = self.walk_linear(self.pptx_obj, doc)
return doc
def generate_prov(
self, shape, slide_ind, text="", slide_size=Size(width=1, height=1)
):
if shape.left:
left = shape.left
top = shape.top
width = shape.width
height = shape.height
else:
left = 0
top = 0
width = slide_size.width
height = slide_size.height
shape_bbox = [left, top, left + width, top + height]
shape_bbox = BoundingBox.from_tuple(shape_bbox, origin=CoordOrigin.BOTTOMLEFT)
prov = ProvenanceItem(
page_no=slide_ind + 1, charspan=[0, len(text)], bbox=shape_bbox
)
return prov
def handle_text_elements(
self, shape, parent_slide, slide_ind, doc: DoclingDocument, slide_size
):
is_list_group_created = False
enum_list_item_value = 0
new_list = None
doc_label = DocItemLabel.LIST_ITEM
prov = self.generate_prov(shape, slide_ind, shape.text.strip(), slide_size)
def is_list_item(paragraph):
"""Check if the paragraph is a list item."""
p = paragraph._element
if (
p.find(".//a:buChar", namespaces={"a": self.namespaces["a"]})
is not None
):
return (True, "Bullet")
elif (
p.find(".//a:buAutoNum", namespaces={"a": self.namespaces["a"]})
is not None
):
return (True, "Numbered")
elif paragraph.level > 0:
# Most likely a sub-list
return (True, "None")
else:
return (False, "None")
# Iterate through paragraphs to build up text
for paragraph in shape.text_frame.paragraphs:
is_a_list, bullet_type = is_list_item(paragraph)
p = paragraph._element
# Convert line breaks to spaces and accumulate text
p_text = ""
for e in p.content_children:
if isinstance(e, CT_TextLineBreak):
p_text += " "
else:
p_text += e.text
if is_a_list:
enum_marker = ""
enumerated = bullet_type == "Numbered"
if not is_list_group_created:
new_list = doc.add_list_group(
name="list",
parent=parent_slide,
)
is_list_group_created = True
enum_list_item_value = 0
if enumerated:
enum_list_item_value += 1
enum_marker = str(enum_list_item_value) + "."
doc.add_list_item(
marker=enum_marker,
enumerated=enumerated,
parent=new_list,
text=p_text,
prov=prov,
)
else: # is paragraph not a list item
# Assign proper label to the text, depending if it's a Title or Section Header
# For other types of text, assign - PARAGRAPH
doc_label = DocItemLabel.PARAGRAPH
if shape.is_placeholder:
placeholder_type = shape.placeholder_format.type
if placeholder_type in [
PP_PLACEHOLDER.CENTER_TITLE,
PP_PLACEHOLDER.TITLE,
]:
# It's a title
doc_label = DocItemLabel.TITLE
elif placeholder_type == PP_PLACEHOLDER.SUBTITLE:
DocItemLabel.SECTION_HEADER
# output accumulated inline text:
doc.add_text(
label=doc_label,
parent=parent_slide,
text=p_text,
prov=prov,
)
return
def handle_title(self, shape, parent_slide, slide_ind, doc):
placeholder_type = shape.placeholder_format.type
txt = shape.text.strip()
prov = self.generate_prov(shape, slide_ind, txt)
if len(txt.strip()) > 0:
# title = slide.shapes.title.text if slide.shapes.title else "No title"
if placeholder_type in [PP_PLACEHOLDER.CENTER_TITLE, PP_PLACEHOLDER.TITLE]:
_log.info(f"Title found: {shape.text}")
doc.add_text(
label=DocItemLabel.TITLE, parent=parent_slide, text=txt, prov=prov
)
elif placeholder_type == PP_PLACEHOLDER.SUBTITLE:
_log.info(f"Subtitle found: {shape.text}")
# Using DocItemLabel.FOOTNOTE, while SUBTITLE label is not avail.
doc.add_text(
label=DocItemLabel.SECTION_HEADER,
parent=parent_slide,
text=txt,
prov=prov,
)
return
def handle_pictures(self, shape, parent_slide, slide_ind, doc, slide_size):
# Open it with PIL
try:
# Get the image bytes
image = shape.image
image_bytes = image.blob
im_dpi, _ = image.dpi
pil_image = Image.open(BytesIO(image_bytes))
# shape has picture
prov = self.generate_prov(shape, slide_ind, "", slide_size)
doc.add_picture(
parent=parent_slide,
image=ImageRef.from_pil(image=pil_image, dpi=im_dpi),
caption=None,
prov=prov,
)
except (UnidentifiedImageError, OSError) as e:
_log.warning(f"Warning: image cannot be loaded by Pillow: {e}")
return
def handle_tables(self, shape, parent_slide, slide_ind, doc, slide_size):
# Handling tables, images, charts
if shape.has_table:
table = shape.table
table_xml = shape._element
prov = self.generate_prov(shape, slide_ind, "", slide_size)
num_cols = 0
num_rows = len(table.rows)
tcells = []
# Access the XML element for the shape that contains the table
table_xml = shape._element
for row_idx, row in enumerate(table.rows):
if len(row.cells) > num_cols:
num_cols = len(row.cells)
for col_idx, cell in enumerate(row.cells):
# Access the XML of the cell (this is the 'tc' element in table XML)
cell_xml = table_xml.xpath(
f".//a:tbl/a:tr[{row_idx + 1}]/a:tc[{col_idx + 1}]"
)
if not cell_xml:
continue # If no cell XML is found, skip
cell_xml = cell_xml[0] # Get the first matching XML node
row_span = cell_xml.get("rowSpan") # Vertical span
col_span = cell_xml.get("gridSpan") # Horizontal span
if row_span is None:
row_span = 1
else:
row_span = int(row_span)
if col_span is None:
col_span = 1
else:
col_span = int(col_span)
icell = TableCell(
text=cell.text.strip(),
row_span=row_span,
col_span=col_span,
start_row_offset_idx=row_idx,
end_row_offset_idx=row_idx + row_span,
start_col_offset_idx=col_idx,
end_col_offset_idx=col_idx + col_span,
column_header=row_idx == 0,
row_header=False,
)
if len(cell.text.strip()) > 0:
tcells.append(icell)
# Initialize Docling TableData
data = TableData(num_rows=num_rows, num_cols=num_cols, table_cells=[])
# Populate
for tcell in tcells:
data.table_cells.append(tcell)
if len(tcells) > 0:
# If table is not fully empty...
# Create Docling table
doc.add_table(parent=parent_slide, data=data, prov=prov)
return
def walk_linear(self, pptx_obj, doc) -> DoclingDocument:
# Units of size in PPTX by default are EMU units (English Metric Units)
slide_width = pptx_obj.slide_width
slide_height = pptx_obj.slide_height
max_levels = 10
parents = {} # type: ignore
for i in range(max_levels):
parents[i] = None
# Loop through each slide
for slide_num, slide in enumerate(pptx_obj.slides):
slide_ind = pptx_obj.slides.index(slide)
parent_slide = doc.add_group(
name=f"slide-{slide_ind}", label=GroupLabel.CHAPTER, parent=parents[0]
)
slide_size = Size(width=slide_width, height=slide_height)
doc.add_page(page_no=slide_ind + 1, size=slide_size)
def handle_shapes(shape, parent_slide, slide_ind, doc, slide_size):
handle_groups(shape, parent_slide, slide_ind, doc, slide_size)
if shape.has_table:
# Handle Tables
self.handle_tables(shape, parent_slide, slide_ind, doc, slide_size)
if shape.shape_type == MSO_SHAPE_TYPE.PICTURE:
# Handle Pictures
if hasattr(shape, "image"):
self.handle_pictures(
shape, parent_slide, slide_ind, doc, slide_size
)
# If shape doesn't have any text, move on to the next shape
if not hasattr(shape, "text"):
return
if shape.text is None:
return
if len(shape.text.strip()) == 0:
return
if not shape.has_text_frame:
_log.warning("Warning: shape has text but not text_frame")
return
# Handle other text elements, including lists (bullet lists, numbered lists)
self.handle_text_elements(
shape, parent_slide, slide_ind, doc, slide_size
)
return
def handle_groups(shape, parent_slide, slide_ind, doc, slide_size):
if shape.shape_type == MSO_SHAPE_TYPE.GROUP:
for groupedshape in shape.shapes:
handle_shapes(
groupedshape, parent_slide, slide_ind, doc, slide_size
)
# Loop through each shape in the slide
for shape in slide.shapes:
handle_shapes(shape, parent_slide, slide_ind, doc, slide_size)
# Handle notes slide
if slide.has_notes_slide:
notes_slide = slide.notes_slide
if notes_slide.notes_text_frame is not None:
notes_text = notes_slide.notes_text_frame.text.strip()
if notes_text:
bbox = BoundingBox(l=0, t=0, r=0, b=0)
prov = ProvenanceItem(
page_no=slide_ind + 1,
charspan=[0, len(notes_text)],
bbox=bbox,
)
doc.add_text(
label=DocItemLabel.TEXT,
parent=parent_slide,
text=notes_text,
prov=prov,
content_layer=ContentLayer.FURNITURE,
)
return doc
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/backend/xml/__init__.py | docling/backend/xml/__init__.py | python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false | |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/backend/xml/jats_backend.py | docling/backend/xml/jats_backend.py | import logging
import traceback
from io import BytesIO
from pathlib import Path
from typing import Final, Optional, Union, cast
from bs4 import BeautifulSoup, NavigableString, Tag
from docling_core.types.doc import (
DocItemLabel,
DoclingDocument,
DocumentOrigin,
GroupItem,
GroupLabel,
NodeItem,
TableCell,
TableData,
TextItem,
)
from lxml import etree
from typing_extensions import TypedDict, override
from docling.backend.abstract_backend import DeclarativeDocumentBackend
from docling.backend.html_backend import HTMLDocumentBackend
from docling.datamodel.base_models import InputFormat
from docling.datamodel.document import InputDocument
_log = logging.getLogger(__name__)
JATS_DTD_URL: Final = ["JATS-journalpublishing", "JATS-archive"]
DEFAULT_HEADER_ACKNOWLEDGMENTS: Final = "Acknowledgments"
DEFAULT_HEADER_ABSTRACT: Final = "Abstract"
DEFAULT_HEADER_REFERENCES: Final = "References"
DEFAULT_TEXT_ETAL: Final = "et al."
class Abstract(TypedDict):
label: str
content: str
class Author(TypedDict):
name: str
affiliation_names: list[str]
class Citation(TypedDict):
author_names: str
title: str
source: str
year: str
volume: str
page: str
pub_id: str
publisher_name: str
publisher_loc: str
class Table(TypedDict):
label: str
caption: str
content: str
class XMLComponents(TypedDict):
title: str
authors: list[Author]
abstract: list[Abstract]
class JatsDocumentBackend(DeclarativeDocumentBackend):
"""Backend to parse articles in XML format tagged according to JATS definition.
The Journal Article Tag Suite (JATS) is an definition standard for the
representation of journal articles in XML format. Several publishers and journal
archives provide content in JATS format, including PubMed Central® (PMC), bioRxiv,
medRxiv, or Springer Nature.
Refer to https://jats.nlm.nih.gov for more details on JATS.
The code from this document backend has been developed by modifying parts of the
PubMed Parser library (version 0.5.0, released on 12.08.2024):
Achakulvisut et al., (2020).
Pubmed Parser: A Python Parser for PubMed Open-Access XML Subset and MEDLINE XML
Dataset XML Dataset.
Journal of Open Source Software, 5(46), 1979,
https://doi.org/10.21105/joss.01979
"""
@override
def __init__(
self, in_doc: "InputDocument", path_or_stream: Union[BytesIO, Path]
) -> None:
super().__init__(in_doc, path_or_stream)
self.path_or_stream = path_or_stream
# Initialize the root of the document hierarchy
self.root: Optional[NodeItem] = None
self.hlevel: int = 0
self.valid: bool = False
try:
if isinstance(self.path_or_stream, BytesIO):
self.path_or_stream.seek(0)
self.tree: etree._ElementTree = etree.parse(self.path_or_stream)
doc_info: etree.DocInfo = self.tree.docinfo
if doc_info.system_url and any(
kwd in doc_info.system_url for kwd in JATS_DTD_URL
):
self.valid = True
return
for ent in doc_info.internalDTD.iterentities():
if ent.system_url and any(
kwd in ent.system_url for kwd in JATS_DTD_URL
):
self.valid = True
return
except Exception as exc:
raise RuntimeError(
f"Could not initialize JATS backend for file with hash {self.document_hash}."
) from exc
@override
def is_valid(self) -> bool:
return self.valid
@classmethod
@override
def supports_pagination(cls) -> bool:
return False
@override
def unload(self):
if isinstance(self.path_or_stream, BytesIO):
self.path_or_stream.close()
self.path_or_stream = None
@classmethod
@override
def supported_formats(cls) -> set[InputFormat]:
return {InputFormat.XML_JATS}
@override
def convert(self) -> DoclingDocument:
try:
# Create empty document
origin = DocumentOrigin(
filename=self.file.name or "file",
mimetype="application/xml",
binary_hash=self.document_hash,
)
doc = DoclingDocument(name=self.file.stem or "file", origin=origin)
self.hlevel = 0
# Get metadata XML components
xml_components: XMLComponents = self._parse_metadata()
# Add metadata to the document
self._add_metadata(doc, xml_components)
# walk over the XML body
body = self.tree.xpath("//body")
if self.root and len(body) > 0:
self._walk_linear(doc, self.root, body[0])
# walk over the XML back matter
back = self.tree.xpath("//back")
if self.root and len(back) > 0:
self._walk_linear(doc, self.root, back[0])
except Exception:
_log.error(traceback.format_exc())
return doc
@staticmethod
def _get_text(node: etree._Element, sep: Optional[str] = None) -> str:
skip_tags = ["term", "disp-formula", "inline-formula"]
text: str = (
node.text.replace("\n", " ")
if (node.tag not in skip_tags and node.text)
else ""
)
for child in list(node):
if child.tag not in skip_tags:
# TODO: apply styling according to child.tag when supported by docling-core
text += JatsDocumentBackend._get_text(child, sep)
if sep:
text = text.rstrip(sep) + sep
text += child.tail.replace("\n", " ") if child.tail else ""
return text
def _find_metadata(self) -> Optional[etree._Element]:
meta_names: list[str] = ["article-meta", "book-part-meta"]
meta: Optional[etree._Element] = None
for name in meta_names:
node = self.tree.xpath(f".//{name}")
if len(node) > 0:
meta = node[0]
break
return meta
def _parse_abstract(self) -> list[Abstract]:
# TODO: address cases with multiple sections
abs_list: list[Abstract] = []
for abs_node in self.tree.xpath(".//abstract"):
abstract: Abstract = dict(label="", content="")
texts = []
for abs_par in abs_node.xpath("p"):
texts.append(JatsDocumentBackend._get_text(abs_par).strip())
abstract["content"] = " ".join(texts)
label_node = abs_node.xpath("title|label")
if len(label_node) > 0:
abstract["label"] = label_node[0].text.strip()
abs_list.append(abstract)
return abs_list
def _parse_authors(self) -> list[Author]:
# Get mapping between affiliation ids and names
authors: list[Author] = []
meta: Optional[etree._Element] = self._find_metadata()
if meta is None:
return authors
affiliation_names = []
for affiliation_node in meta.xpath(".//aff[@id]"):
aff = ", ".join([t for t in affiliation_node.itertext() if t.strip()])
aff = aff.replace("\n", " ")
label = affiliation_node.xpath("label")
if label:
# TODO: once superscript is supported, add label with formatting
aff = aff.removeprefix(f"{label[0].text}, ")
affiliation_names.append(aff)
affiliation_ids_names = dict(
zip(meta.xpath(".//aff[@id]/@id"), affiliation_names)
)
# Get author names and affiliation names
for author_node in meta.xpath(
'.//contrib-group/contrib[@contrib-type="author"]'
):
author: Author = {
"name": "",
"affiliation_names": [],
}
# Affiliation names
affiliation_ids = [
a.attrib["rid"] for a in author_node.xpath('xref[@ref-type="aff"]')
]
for id in affiliation_ids:
if id in affiliation_ids_names:
author["affiliation_names"].append(affiliation_ids_names[id])
# Name
author["name"] = (
author_node.xpath("name/given-names")[0].text
+ " "
+ author_node.xpath("name/surname")[0].text
)
authors.append(author)
return authors
def _parse_title(self) -> str:
meta_names: list[str] = [
"article-meta",
"collection-meta",
"book-meta",
"book-part-meta",
]
title_names: list[str] = ["article-title", "subtitle", "title", "label"]
titles: list[str] = [
" ".join(
elem.text.replace("\n", " ").strip()
for elem in list(title_node)
if elem.tag in title_names
).strip()
for title_node in self.tree.xpath(
"|".join([f".//{item}/title-group" for item in meta_names])
)
]
text = " - ".join(titles)
return text
def _parse_metadata(self) -> XMLComponents:
"""Parsing JATS document metadata."""
xml_components: XMLComponents = {
"title": self._parse_title(),
"authors": self._parse_authors(),
"abstract": self._parse_abstract(),
}
return xml_components
def _add_abstract(
self, doc: DoclingDocument, xml_components: XMLComponents
) -> None:
for abstract in xml_components["abstract"]:
text: str = abstract["content"]
title: str = abstract["label"] or DEFAULT_HEADER_ABSTRACT
if not text:
continue
parent = doc.add_heading(
parent=self.root, text=title, level=self.hlevel + 1
)
doc.add_text(
parent=parent,
text=text,
label=DocItemLabel.TEXT,
)
return
def _add_authors(self, doc: DoclingDocument, xml_components: XMLComponents) -> None:
# TODO: once docling supports text formatting, add affiliation reference to
# author names through superscripts
authors: list = [item["name"] for item in xml_components["authors"]]
authors_str = ", ".join(authors)
affiliations: list = [
item
for author in xml_components["authors"]
for item in author["affiliation_names"]
]
affiliations_str = "; ".join(list(dict.fromkeys(affiliations)))
if authors_str:
doc.add_text(
parent=self.root,
text=authors_str,
label=DocItemLabel.PARAGRAPH,
)
if affiliations_str:
doc.add_text(
parent=self.root,
text=affiliations_str,
label=DocItemLabel.PARAGRAPH,
)
return
def _add_citation(self, doc: DoclingDocument, parent: NodeItem, text: str) -> None:
if isinstance(parent, GroupItem) and parent.label == GroupLabel.LIST:
doc.add_list_item(text=text, enumerated=False, parent=parent)
else:
doc.add_text(text=text, label=DocItemLabel.TEXT, parent=parent)
return
def _parse_element_citation(self, node: etree._Element) -> str:
citation: Citation = {
"author_names": "",
"title": "",
"source": "",
"year": "",
"volume": "",
"page": "",
"pub_id": "",
"publisher_name": "",
"publisher_loc": "",
}
_log.debug("Citation parsing started")
# Author names
names = []
for name_node in node.xpath(".//name"):
name_str = (
name_node.xpath("surname")[0].text.replace("\n", " ").strip()
+ " "
+ name_node.xpath("given-names")[0].text.replace("\n", " ").strip()
)
names.append(name_str)
etal_node = node.xpath(".//etal")
if len(etal_node) > 0:
etal_text = etal_node[0].text or DEFAULT_TEXT_ETAL
names.append(etal_text)
citation["author_names"] = ", ".join(names)
titles: list[str] = [
"article-title",
"chapter-title",
"data-title",
"issue-title",
"part-title",
"trans-title",
]
title_node: Optional[etree._Element] = None
for name in titles:
name_node = node.xpath(name)
if len(name_node) > 0:
title_node = name_node[0]
break
citation["title"] = (
JatsDocumentBackend._get_text(title_node)
if title_node is not None
else node.text.replace("\n", " ").strip()
)
# Journal, year, publisher name, publisher location, volume, elocation
fields: list[str] = [
"source",
"year",
"publisher-name",
"publisher-loc",
"volume",
]
for item in fields:
item_node = node.xpath(item)
if len(item_node) > 0:
citation[item.replace("-", "_")] = ( # type: ignore[literal-required]
item_node[0].text.replace("\n", " ").strip()
)
# Publication identifier
if len(node.xpath("pub-id")) > 0:
pub_id: list[str] = []
for id_node in node.xpath("pub-id"):
id_type = id_node.get("assigning-authority") or id_node.get(
"pub-id-type"
)
id_text = id_node.text
if id_type and id_text:
pub_id.append(
id_type.replace("\n", " ").strip().upper()
+ ": "
+ id_text.replace("\n", " ").strip()
)
if pub_id:
citation["pub_id"] = ", ".join(pub_id)
# Pages
if len(node.xpath("elocation-id")) > 0:
citation["page"] = (
node.xpath("elocation-id")[0].text.replace("\n", " ").strip()
)
elif len(node.xpath("fpage")) > 0:
citation["page"] = node.xpath("fpage")[0].text.replace("\n", " ").strip()
if len(node.xpath("lpage")) > 0:
citation["page"] += (
"–" + node.xpath("lpage")[0].text.replace("\n", " ").strip() # noqa: RUF001
)
# Flatten the citation to string
text = ""
if citation["author_names"]:
text += citation["author_names"].rstrip(".") + ". "
if citation["title"]:
text += citation["title"] + ". "
if citation["source"]:
text += citation["source"] + ". "
if citation["publisher_name"]:
if citation["publisher_loc"]:
text += f"{citation['publisher_loc']}: "
text += citation["publisher_name"] + ". "
if citation["volume"]:
text = text.rstrip(". ")
text += f" {citation['volume']}. "
if citation["page"]:
text = text.rstrip(". ")
if citation["volume"]:
text += ":"
text += citation["page"] + ". "
if citation["year"]:
text = text.rstrip(". ")
text += f" ({citation['year']})."
if citation["pub_id"]:
text = text.rstrip(".") + ". "
text += citation["pub_id"]
_log.debug("Citation flattened")
return text
def _add_equation(
self, doc: DoclingDocument, parent: NodeItem, node: etree._Element
) -> None:
math_text = node.text
math_parts = math_text.split("$$")
if len(math_parts) == 3:
math_formula = math_parts[1]
doc.add_text(label=DocItemLabel.FORMULA, text=math_formula, parent=parent)
return
def _add_figure_captions(
self, doc: DoclingDocument, parent: NodeItem, node: etree._Element
) -> None:
label_node = node.xpath("label")
label: Optional[str] = (
JatsDocumentBackend._get_text(label_node[0]).strip() if label_node else ""
)
caption_node = node.xpath("caption")
caption: Optional[str]
if len(caption_node) > 0:
caption = ""
for caption_par in list(caption_node[0]):
if caption_par.xpath(".//supplementary-material"):
continue
caption += JatsDocumentBackend._get_text(caption_par).strip() + " "
caption = caption.strip()
else:
caption = None
# TODO: format label vs caption once styling is supported
fig_text: str = f"{label}{' ' if label and caption else ''}{caption}"
fig_caption: Optional[TextItem] = (
doc.add_text(label=DocItemLabel.CAPTION, text=fig_text)
if fig_text
else None
)
doc.add_picture(parent=parent, caption=fig_caption)
return
# TODO: add footnotes when DocItemLabel.FOOTNOTE and styling are supported
# def _add_footnote_group(self, doc: DoclingDocument, parent: NodeItem, node: etree._Element) -> None:
# new_parent = doc.add_group(label=GroupLabel.LIST, name="footnotes", parent=parent)
# for child in node.iterchildren(tag="fn"):
# text = JatsDocumentBackend._get_text(child)
# doc.add_list_item(text=text, parent=new_parent)
def _add_metadata(
self, doc: DoclingDocument, xml_components: XMLComponents
) -> None:
self._add_title(doc, xml_components)
self._add_authors(doc, xml_components)
self._add_abstract(doc, xml_components)
return
@staticmethod
def parse_table_data(element: Tag) -> Optional[TableData]:
# TODO, see how to implement proper support for rich tables from HTML backend
nested_tables = element.find("table")
if nested_tables is not None:
_log.debug("Skipping nested table.")
return None
# Find the number of rows and columns (taking into account spans)
num_rows = 0
num_cols = 0
for row in element("tr"):
col_count = 0
is_row_header = True
if not isinstance(row, Tag):
continue
for cell in row(["td", "th"]):
if not isinstance(row, Tag):
continue
cell_tag = cast(Tag, cell)
col_span, row_span = HTMLDocumentBackend._get_cell_spans(cell_tag)
col_count += col_span
if cell_tag.name == "td" or row_span == 1:
is_row_header = False
num_cols = max(num_cols, col_count)
if not is_row_header:
num_rows += 1
_log.debug(f"The table has {num_rows} rows and {num_cols} cols.")
grid: list = [[None for _ in range(num_cols)] for _ in range(num_rows)]
data = TableData(num_rows=num_rows, num_cols=num_cols, table_cells=[])
# Iterate over the rows in the table
start_row_span = 0
row_idx = -1
for row in element("tr"):
if not isinstance(row, Tag):
continue
# For each row, find all the column cells (both <td> and <th>)
cells = row(["td", "th"])
# Check if cell is in a column header or row header
col_header = True
row_header = True
for html_cell in cells:
if isinstance(html_cell, Tag):
_, row_span = HTMLDocumentBackend._get_cell_spans(html_cell)
if html_cell.name == "td":
col_header = False
row_header = False
elif row_span == 1:
row_header = False
if not row_header:
row_idx += 1
start_row_span = 0
else:
start_row_span += 1
# Extract the text content of each cell
col_idx = 0
for html_cell in cells:
if not isinstance(html_cell, Tag):
continue
# extract inline formulas
for formula in html_cell("inline-formula"):
math_parts = formula.text.split("$$")
if len(math_parts) == 3:
math_formula = f"$${math_parts[1]}$$"
formula.replace_with(NavigableString(math_formula))
# TODO: extract content correctly from table-cells with lists
text = HTMLDocumentBackend.get_text(html_cell).strip()
col_span, row_span = HTMLDocumentBackend._get_cell_spans(html_cell)
if row_header:
row_span -= 1
while (
col_idx < num_cols
and grid[row_idx + start_row_span][col_idx] is not None
):
col_idx += 1
for r in range(start_row_span, start_row_span + row_span):
for c in range(col_span):
if row_idx + r < num_rows and col_idx + c < num_cols:
grid[row_idx + r][col_idx + c] = text
table_cell = TableCell(
text=text,
row_span=row_span,
col_span=col_span,
start_row_offset_idx=start_row_span + row_idx,
end_row_offset_idx=start_row_span + row_idx + row_span,
start_col_offset_idx=col_idx,
end_col_offset_idx=col_idx + col_span,
column_header=col_header,
row_header=((not col_header) and html_cell.name == "th"),
)
data.table_cells.append(table_cell)
return data
def _add_table(
self, doc: DoclingDocument, parent: NodeItem, table_xml_component: Table
) -> None:
soup = BeautifulSoup(table_xml_component["content"], "html.parser")
table_tag = soup.find("table")
if not isinstance(table_tag, Tag):
return
data = JatsDocumentBackend.parse_table_data(table_tag)
# TODO: format label vs caption once styling is supported
label = table_xml_component["label"]
caption = table_xml_component["caption"]
table_text: str = f"{label}{' ' if label and caption else ''}{caption}"
table_caption: Optional[TextItem] = (
doc.add_text(label=DocItemLabel.CAPTION, text=table_text)
if table_text
else None
)
if data is not None:
doc.add_table(data=data, parent=parent, caption=table_caption)
return
def _add_tables(
self, doc: DoclingDocument, parent: NodeItem, node: etree._Element
) -> None:
table: Table = {"label": "", "caption": "", "content": ""}
# Content
if len(node.xpath("table")) > 0:
table_content_node = node.xpath("table")[0]
elif len(node.xpath("alternatives/table")) > 0:
table_content_node = node.xpath("alternatives/table")[0]
else:
table_content_node = None
if table_content_node is not None:
table["content"] = etree.tostring(table_content_node).decode("utf-8")
# Caption
caption_node = node.xpath("caption")
caption: Optional[str]
if caption_node:
caption = ""
for caption_par in list(caption_node[0]):
if caption_par.xpath(".//supplementary-material"):
continue
caption += JatsDocumentBackend._get_text(caption_par).strip() + " "
caption = caption.strip()
else:
caption = None
if caption is not None:
table["caption"] = caption
# Label
if len(node.xpath("label")) > 0:
table["label"] = node.xpath("label")[0].text
try:
self._add_table(doc, parent, table)
except Exception:
_log.warning(f"Skipping unsupported table in {self.file!s}")
return
def _add_title(self, doc: DoclingDocument, xml_components: XMLComponents) -> None:
self.root = doc.add_text(
parent=None,
text=xml_components["title"],
label=DocItemLabel.TITLE,
)
return
def _walk_linear(
self, doc: DoclingDocument, parent: NodeItem, node: etree._Element
) -> str:
skip_tags = ["term"]
flush_tags = ["ack", "sec", "list", "boxed-text", "disp-formula", "fig"]
new_parent: NodeItem = parent
node_text: str = (
node.text.replace("\n", " ")
if (node.tag not in skip_tags and node.text)
else ""
)
for child in list(node):
stop_walk: bool = False
# flush text into TextItem for some tags in paragraph nodes
if node.tag == "p" and node_text.strip() and child.tag in flush_tags:
doc.add_text(
label=DocItemLabel.TEXT, text=node_text.strip(), parent=parent
)
node_text = ""
# add elements and decide whether to stop walking
if child.tag in ("sec", "ack"):
header = child.xpath("title|label")
text: Optional[str] = None
if len(header) > 0:
text = JatsDocumentBackend._get_text(header[0])
elif child.tag == "ack":
text = DEFAULT_HEADER_ACKNOWLEDGMENTS
if text:
self.hlevel += 1
new_parent = doc.add_heading(
text=text, parent=parent, level=self.hlevel
)
elif child.tag == "list":
new_parent = doc.add_group(
label=GroupLabel.LIST, name="list", parent=parent
)
elif child.tag == "list-item":
# TODO: address any type of content (another list, formula,...)
# TODO: address list type and item label
text = JatsDocumentBackend._get_text(child).strip()
new_parent = doc.add_list_item(text=text, parent=parent)
stop_walk = True
elif child.tag == "fig":
self._add_figure_captions(doc, parent, child)
stop_walk = True
elif child.tag == "table-wrap":
self._add_tables(doc, parent, child)
stop_walk = True
elif child.tag == "suplementary-material":
stop_walk = True
elif child.tag == "fn-group":
# header = child.xpath(".//title") or child.xpath(".//label")
# if header:
# text = JatsDocumentBackend._get_text(header[0])
# fn_parent = doc.add_heading(text=text, parent=new_parent)
# self._add_footnote_group(doc, fn_parent, child)
stop_walk = True
elif child.tag == "ref-list" and node.tag != "ref-list":
header = child.xpath("title|label")
text = (
JatsDocumentBackend._get_text(header[0])
if len(header) > 0
else DEFAULT_HEADER_REFERENCES
)
new_parent = doc.add_heading(text=text, parent=parent)
new_parent = doc.add_group(
parent=new_parent, label=GroupLabel.LIST, name="list"
)
elif child.tag == "element-citation":
text = self._parse_element_citation(child)
self._add_citation(doc, parent, text)
stop_walk = True
elif child.tag == "mixed-citation":
text = JatsDocumentBackend._get_text(child).strip()
self._add_citation(doc, parent, text)
stop_walk = True
elif child.tag == "tex-math":
self._add_equation(doc, parent, child)
stop_walk = True
elif child.tag == "inline-formula":
# TODO: address inline formulas when supported by docling-core
stop_walk = True
# step into child
if not stop_walk:
new_text = self._walk_linear(doc, new_parent, child)
if not (node.getparent().tag == "p" and node.tag in flush_tags):
node_text += new_text
if child.tag in ("sec", "ack") and text:
self.hlevel -= 1
# pick up the tail text
node_text += child.tail.replace("\n", " ") if child.tail else ""
# create paragraph
if node.tag == "p" and node_text.strip():
doc.add_text(label=DocItemLabel.TEXT, text=node_text.strip(), parent=parent)
return ""
else:
# backpropagate the text
return node_text
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/backend/xml/uspto_backend.py | docling/backend/xml/uspto_backend.py | """Backend to parse patents from the United States Patent Office (USPTO).
The parsers included in this module can handle patent grants published since 1976 and
patent applications since 2001.
The original files can be found in https://bulkdata.uspto.gov.
"""
import html
import logging
import re
import xml.sax
import xml.sax.xmlreader
from abc import ABC, abstractmethod
from enum import Enum, unique
from io import BytesIO
from pathlib import Path
from typing import Final, Optional, Union
from bs4 import BeautifulSoup, Tag
from docling_core.types.doc import (
DocItem,
DocItemLabel,
DoclingDocument,
DocumentOrigin,
TableCell,
TableData,
TextItem,
)
from docling_core.types.doc.document import LevelNumber
from pydantic import NonNegativeInt
from typing_extensions import Self, TypedDict, override
from docling.backend.abstract_backend import DeclarativeDocumentBackend
from docling.datamodel.base_models import InputFormat
from docling.datamodel.document import InputDocument
_log = logging.getLogger(__name__)
XML_DECLARATION: Final = '<?xml version="1.0" encoding="UTF-8"?>'
@unique
class PatentHeading(Enum):
"""Text of docling headings for tagged sections in USPTO patent documents."""
ABSTRACT = "ABSTRACT", 2
CLAIMS = "CLAIMS", 2
@override
def __new__(cls, value: str, _) -> Self:
obj = object.__new__(cls)
obj._value_ = value
return obj
@override
def __init__(self, _, level: LevelNumber) -> None:
self.level: LevelNumber = level
class PatentUsptoDocumentBackend(DeclarativeDocumentBackend):
@override
def __init__(
self, in_doc: InputDocument, path_or_stream: Union[BytesIO, Path]
) -> None:
super().__init__(in_doc, path_or_stream)
self.patent_content: str = ""
self.parser: Optional[PatentUspto] = None
try:
if isinstance(self.path_or_stream, BytesIO):
while line := self.path_or_stream.readline().decode("utf-8"):
if line.startswith("<!DOCTYPE") or line == "PATN\n":
self._set_parser(line)
self.patent_content += line
elif isinstance(self.path_or_stream, Path):
with open(self.path_or_stream, encoding="utf-8") as file_obj:
while line := file_obj.readline():
if line.startswith("<!DOCTYPE") or line == "PATN\n":
self._set_parser(line)
self.patent_content += line
except Exception as exc:
raise RuntimeError(
f"Could not initialize USPTO backend for file with hash {self.document_hash}."
) from exc
def _set_parser(self, doctype: str) -> None:
doctype_line = doctype.lower()
if doctype == "PATN\n":
self.parser = PatentUsptoGrantAps()
elif "us-patent-application-v4" in doctype_line:
self.parser = PatentUsptoIce()
elif "us-patent-grant-v4" in doctype_line:
self.parser = PatentUsptoIce()
elif "us-grant-025" in doctype_line:
self.parser = PatentUsptoGrantV2()
elif all(
item in doctype_line
for item in ("patent-application-publication", "pap-v1")
):
self.parser = PatentUsptoAppV1()
else:
self.parser = None
@override
def is_valid(self) -> bool:
return bool(self.patent_content) and bool(self.parser)
@classmethod
@override
def supports_pagination(cls) -> bool:
return False
@override
def unload(self) -> None:
return
@classmethod
@override
def supported_formats(cls) -> set[InputFormat]:
return {InputFormat.XML_USPTO}
@override
def convert(self) -> DoclingDocument:
if self.parser is not None:
doc = self.parser.parse(self.patent_content)
if doc is None:
raise RuntimeError(
f"Failed to convert doc (hash={self.document_hash}, "
f"name={self.file.name})."
)
doc.name = self.file.name or "file"
mime_type = (
"text/plain"
if isinstance(self.parser, PatentUsptoGrantAps)
else "application/xml"
)
doc.origin = DocumentOrigin(
mimetype=mime_type,
binary_hash=self.document_hash,
filename=self.file.name or "file",
)
return doc
else:
raise RuntimeError(
f"Cannot convert doc (hash={self.document_hash}, "
f"name={self.file.name}) because the backend failed to init."
)
class PatentUspto(ABC):
"""Parser of patent documents from the US Patent Office."""
@abstractmethod
def parse(self, patent_content: str) -> Optional[DoclingDocument]:
"""Parse a USPTO patent.
Parameters:
patent_content: The content of a single patent in a USPTO file.
Returns:
The patent parsed as a docling document.
"""
class PatentUsptoIce(PatentUspto):
"""Parser of patent documents from the US Patent Office (ICE).
The compatible formats are:
- Patent Grant Full Text Data/XML Version 4.x ICE (from January 2005)
- Patent Application Full Text Data/XML Version 4.x ICE (from January 2005)
"""
def __init__(self) -> None:
"""Build an instance of PatentUsptoIce class."""
self.handler = PatentUsptoIce.PatentHandler()
self.pattern = re.compile(r"^(<table .*?</table>)", re.MULTILINE | re.DOTALL)
def parse(self, patent_content: str) -> Optional[DoclingDocument]:
try:
xml.sax.parseString(patent_content, self.handler)
except xml.sax._exceptions.SAXParseException as exc_sax:
_log.error(f"Error in parsing USPTO document: {exc_sax}")
return None
doc = self.handler.doc
if doc:
raw_tables = re.findall(self.pattern, patent_content)
parsed_tables: list[TableData] = []
_log.debug(f"Found {len(raw_tables)} tables to be parsed with XmlTable.")
for table in raw_tables:
table_parser = XmlTable(XML_DECLARATION + "\n" + table)
try:
table_data = table_parser.parse()
if table_data:
parsed_tables.append(table_data)
except Exception as exc_table:
_log.error(f"Error in parsing USPTO tables: {exc_table}")
if len(parsed_tables) != len(doc.tables):
_log.error(
f"Number of referenced ({len(doc.tables)}) and parsed "
f"({len(parsed_tables)}) tables differ."
)
else:
for idx, item in enumerate(parsed_tables):
doc.tables[idx].data = item
return doc
class PatentHandler(xml.sax.handler.ContentHandler):
"""SAX ContentHandler for patent documents."""
APP_DOC_ELEMENT: Final = "us-patent-application"
GRANT_DOC_ELEMENT: Final = "us-patent-grant"
@unique
class Element(Enum):
"""Represents an element of interest in the patent application document."""
ABSTRACT = "abstract", True
TITLE = "invention-title", True
CLAIMS = "claims", False
CLAIM = "claim", False
CLAIM_TEXT = "claim-text", True
PARAGRAPH = "p", True
HEADING = "heading", True
DESCRIPTION = "description", False
TABLE = "table", False # to track its position, without text
DRAWINGS = "description-of-drawings", True
STYLE_SUPERSCRIPT = "sup", True
STYLE_SUBSCRIPT = "sub", True
MATHS = "maths", False # to avoid keeping formulas
@override
def __new__(cls, value: str, _) -> Self:
obj = object.__new__(cls)
obj._value_ = value
return obj
@override
def __init__(self, _, is_text: bool) -> None:
self.is_text: bool = is_text
@override
def __init__(self) -> None:
"""Build an instance of the patent handler."""
# Current patent being parsed
self.doc: Optional[DoclingDocument] = None
# Keep track of docling hierarchy level
self.level: LevelNumber = 1
# Keep track of docling parents by level
self.parents: dict[LevelNumber, Optional[DocItem]] = {1: None}
# Content to retain for the current patent
self.property: list[str]
self.claim: str
self.claims: list[str]
self.abstract: str
self.text: str
self._clean_data()
# To handle mathematical styling
self.style_html = HtmlEntity()
@override
def startElement(self, tag, attributes):
"""Signal the start of an element.
Args:
tag: The element tag.
attributes: The element attributes.
"""
if tag in (
self.APP_DOC_ELEMENT,
self.GRANT_DOC_ELEMENT,
):
self.doc = DoclingDocument(name="file")
self.text = ""
self._start_registered_elements(tag, attributes)
@override
def skippedEntity(self, name):
"""Receive notification of a skipped entity.
HTML entities will be skipped by the parser. This method will unescape them
and add them to the text.
Args:
name: Entity name.
"""
if self.property:
elm_val = self.property[-1]
element = self.Element(elm_val)
if element.is_text:
escaped = self.style_html.get_greek_from_iso8879(f"&{name};")
unescaped = html.unescape(escaped)
if unescaped == escaped:
_log.debug(f"Unrecognized HTML entity: {name}")
return
if element in (
self.Element.STYLE_SUPERSCRIPT,
self.Element.STYLE_SUBSCRIPT,
):
# superscripts and subscripts need to be under text elements
if len(self.property) < 2:
return
parent_val = self.property[-2]
parent = self.Element(parent_val)
if parent.is_text:
self.text += self._apply_style(unescaped, elm_val)
else:
self.text += unescaped
@override
def endElement(self, tag):
"""Signal the end of an element.
Args:
tag: The element tag.
"""
if tag in (
self.APP_DOC_ELEMENT,
self.GRANT_DOC_ELEMENT,
):
self._clean_data()
self._end_registered_element(tag)
@override
def characters(self, content):
"""Receive notification of character data.
Args:
content: Data reported by the handler.
"""
if self.property:
elm_val = self.property[-1]
element = self.Element(elm_val)
if element.is_text:
if element in (
self.Element.STYLE_SUPERSCRIPT,
self.Element.STYLE_SUBSCRIPT,
):
# superscripts and subscripts need to be under text elements
if len(self.property) < 2:
return
parent_val = self.property[-2]
parent = self.Element(parent_val)
if parent.is_text:
self.text += self._apply_style(content, elm_val)
else:
self.text += content
def _start_registered_elements(
self, tag: str, attributes: xml.sax.xmlreader.AttributesImpl
) -> None:
if tag in [member.value for member in self.Element]:
# special case for claims: claim lines may start before the
# previous one is closed
if (
tag == self.Element.CLAIM_TEXT.value
and self.property
and self.property[-1] == tag
and self.text.strip()
):
self.claim += " " + self.text.strip()
self.text = ""
elif tag == self.Element.HEADING.value:
level_attr: str = attributes.get("level", "")
new_level: int = int(level_attr) if level_attr.isnumeric() else 1
max_level = min(self.parents.keys())
# increase heading level with 1 for title, if any
self.level = (
new_level + 1 if (new_level + 1) in self.parents else max_level
)
self.property.append(tag)
def _end_registered_element(self, tag: str) -> None:
if tag in [item.value for item in self.Element] and self.property:
current_tag = self.property.pop()
self._add_property(current_tag, self.text.strip())
def _add_property(self, name: str, text: str) -> None:
if not name or not self.doc:
return
if name == self.Element.TITLE.value:
if text:
self.parents[self.level + 1] = self.doc.add_title(
parent=self.parents[self.level],
text=text,
)
self.level += 1
self.text = ""
elif name == self.Element.ABSTRACT.value:
if self.abstract:
heading_text = PatentHeading.ABSTRACT.value
heading_level = (
PatentHeading.ABSTRACT.level
if PatentHeading.ABSTRACT.level in self.parents
else 1
)
abstract_item = self.doc.add_heading(
heading_text,
level=heading_level,
parent=self.parents[heading_level],
)
self.doc.add_text(
label=DocItemLabel.PARAGRAPH,
text=self.abstract,
parent=abstract_item,
)
elif name == self.Element.CLAIM_TEXT.value:
text = re.sub("\\s+", " ", text).strip()
if text:
self.claim += " " + text
self.text = ""
elif name == self.Element.CLAIM.value and self.claim:
self.claims.append(self.claim.strip())
self.claim = ""
elif name == self.Element.CLAIMS.value and self.claims:
heading_text = PatentHeading.CLAIMS.value
heading_level = (
PatentHeading.CLAIMS.level
if PatentHeading.CLAIMS.level in self.parents
else 1
)
claims_item = self.doc.add_heading(
heading_text,
level=heading_level,
parent=self.parents[heading_level],
)
for text in self.claims:
self.doc.add_text(
label=DocItemLabel.PARAGRAPH, text=text, parent=claims_item
)
elif name == self.Element.PARAGRAPH.value and text:
# remove blank spaces added in paragraphs
text = re.sub("\\s+", " ", text)
if self.Element.ABSTRACT.value in self.property:
self.abstract = (
(self.abstract + " " + text) if self.abstract else text
)
else:
self.doc.add_text(
label=DocItemLabel.PARAGRAPH,
text=text,
parent=self.parents[self.level],
)
self.text = ""
elif name == self.Element.HEADING.value and text:
self.parents[self.level + 1] = self.doc.add_heading(
text=text,
level=self.level,
parent=self.parents[self.level],
)
self.level += 1
self.text = ""
elif name == self.Element.TABLE.value:
# set an empty table as placeholder
empty_table = TableData(num_rows=0, num_cols=0, table_cells=[])
self.doc.add_table(
data=empty_table,
parent=self.parents[self.level],
)
def _apply_style(self, text: str, style_tag: str) -> str:
"""Apply an HTML style to text.
Args:
text: A string containing plain text.
style_tag: An HTML tag name for styling text. If the tag name is not
recognized as one of the supported styles, the method will return
the original `text`.
Returns:
A string after applying the style.
"""
formatted = text
if style_tag == self.Element.STYLE_SUPERSCRIPT.value:
formatted = html.unescape(self.style_html.get_superscript(text))
elif style_tag == self.Element.STYLE_SUBSCRIPT.value:
formatted = html.unescape(self.style_html.get_subscript(text))
return formatted
def _clean_data(self) -> None:
"""Reset the variables from stream data."""
self.property = []
self.claim = ""
self.claims = []
self.abstract = ""
class PatentUsptoGrantV2(PatentUspto):
"""Parser of patent documents from the US Patent Office (grants v2.5).
The compatible format is:
- Patent Grant Full Text Data/XML Version 2.5 (from January 2002 till December 2004)
"""
@override
def __init__(self) -> None:
"""Build an instance of PatentUsptoGrantV2 class."""
self.handler = PatentUsptoGrantV2.PatentHandler()
self.pattern = re.compile(r"^(<table .*?</table>)", re.MULTILINE | re.DOTALL)
@override
def parse(self, patent_content: str) -> Optional[DoclingDocument]:
try:
xml.sax.parseString(patent_content, self.handler)
except xml.sax._exceptions.SAXParseException as exc_sax:
_log.error(f"Error in parsing USPTO document: {exc_sax}")
return None
doc = self.handler.doc
if doc:
raw_tables = re.findall(self.pattern, patent_content)
parsed_tables: list[TableData] = []
_log.debug(f"Found {len(raw_tables)} tables to be parsed with XmlTable.")
for table in raw_tables:
table_parser = XmlTable(XML_DECLARATION + "\n" + table)
try:
table_data = table_parser.parse()
if table_data:
parsed_tables.append(table_data)
except Exception as exc_table:
_log.error(f"Error in parsing USPTO tables: {exc_table}")
if len(parsed_tables) != len(doc.tables):
_log.error(
f"Number of referenced ({len(doc.tables)}) and parsed "
f"({len(parsed_tables)}) tables differ."
)
else:
for idx, item in enumerate(parsed_tables):
doc.tables[idx].data = item
return doc
class PatentHandler(xml.sax.handler.ContentHandler):
"""SAX ContentHandler for patent documents."""
GRANT_DOC_ELEMENT: Final = "PATDOC"
CLAIM_STATEMENT: Final = "What is claimed is:"
@unique
class Element(Enum):
"""Represents an element of interest in the patent application document."""
PDAT = "PDAT", True # any type of data
ABSTRACT = ("SDOAB", False)
SDOCL = ("SDOCL", False)
TITLE = ("B540", False)
CLAIMS = ("CL", False)
CLAIM = ("CLM", False)
PARAGRAPH = ("PARA", True)
HEADING = ("H", True)
DRAWINGS = ("DRWDESC", False)
STYLE_SUPERSCRIPT = ("SP", False)
STYLE_SUBSCRIPT = ("SB", False)
STYLE_ITALIC = ("ITALIC", False)
CWU = ("CWU", False) # avoid tables, chemicals, formulas
TABLE = ("table", False) # to keep track of table positions
@override
def __new__(cls, value: str, _) -> Self:
obj = object.__new__(cls)
obj._value_ = value
return obj
@override
def __init__(self, _, is_text: bool) -> None:
self.is_text: bool = is_text
@override
def __init__(self) -> None:
"""Build an instance of the patent handler."""
# Current patent being parsed
self.doc: Optional[DoclingDocument] = None
# Keep track of docling hierarchy level
self.level: LevelNumber = 1
# Keep track of docling parents by level
self.parents: dict[LevelNumber, Optional[DocItem]] = {1: None}
# Content to retain for the current patent
self.property: list[str]
self.claim: str
self.claims: list[str]
self.paragraph: str
self.abstract: str
self._clean_data()
# To handle mathematical styling
self.style_html = HtmlEntity()
@override
def startElement(self, tag, attributes):
"""Signal the start of an element.
Args:
tag: The element tag.
attributes: The element attributes.
"""
if tag == self.GRANT_DOC_ELEMENT:
self.doc = DoclingDocument(name="file")
self.text = ""
self._start_registered_elements(tag, attributes)
@override
def skippedEntity(self, name):
"""Receive notification of a skipped entity.
HTML entities will be skipped by the parser. This method will unescape them
and add them to the text.
Args:
name: Entity name.
"""
if self.property:
elm_val = self.property[-1]
element = self.Element(elm_val)
if element.is_text:
escaped = self.style_html.get_greek_from_iso8879(f"&{name};")
unescaped = html.unescape(escaped)
if unescaped == escaped:
logging.debug("Unrecognized HTML entity: " + name)
return
if element in (
self.Element.STYLE_SUPERSCRIPT,
self.Element.STYLE_SUBSCRIPT,
):
# superscripts and subscripts need to be under text elements
if len(self.property) < 2:
return
parent_val = self.property[-2]
parent = self.Element(parent_val)
if parent.is_text:
self.text += self._apply_style(unescaped, elm_val)
else:
self.text += unescaped
@override
def endElement(self, tag):
"""Signal the end of an element.
Args:
tag: The element tag.
"""
if tag == self.GRANT_DOC_ELEMENT:
self._clean_data()
self._end_registered_element(tag)
@override
def characters(self, content):
"""Receive notification of character data.
Args:
content: Data reported by the handler.
"""
if self.property:
elm_val = self.property[-1]
element = self.Element(elm_val)
if element.is_text:
if element in (
self.Element.STYLE_SUPERSCRIPT,
self.Element.STYLE_SUBSCRIPT,
):
# superscripts and subscripts need to be under text elements
if len(self.property) < 2:
return
parent_val = self.property[-2]
parent = self.Element(parent_val)
if parent.is_text:
self.text += self._apply_style(content, elm_val)
else:
self.text += content
def _start_registered_elements(
self, tag: str, attributes: xml.sax.xmlreader.AttributesImpl
) -> None:
if tag in [member.value for member in self.Element]:
if (
tag == self.Element.HEADING.value
and self.Element.SDOCL.value not in self.property
):
level_attr: str = attributes.get("LVL", "")
new_level: int = int(level_attr) if level_attr.isnumeric() else 1
max_level = min(self.parents.keys())
# increase heading level with 1 for title, if any
self.level = (
new_level + 1 if (new_level + 1) in self.parents else max_level
)
self.property.append(tag)
def _end_registered_element(self, tag: str) -> None:
if tag in [elm.value for elm in self.Element] and self.property:
current_tag = self.property.pop()
self._add_property(current_tag, self.text)
def _add_property(self, name: str, text: str) -> None:
if not name or not self.doc:
return
if name == self.Element.PDAT.value and text:
if not self.property:
self.text = ""
return
wrapper = self.property[-1]
text = self._apply_style(text, wrapper)
if self.Element.TITLE.value in self.property and text.strip():
title = text.strip()
self.parents[self.level + 1] = self.doc.add_title(
parent=self.parents[self.level],
text=title,
)
self.level += 1
elif self.Element.ABSTRACT.value in self.property:
self.abstract += text
elif self.Element.CLAIM.value in self.property:
self.claim += text
# Paragraph text not in claims or abstract
elif (
self.Element.PARAGRAPH.value in self.property
and self.Element.CLAIM.value not in self.property
and self.Element.ABSTRACT.value not in self.property
):
self.paragraph += text
# headers except claims statement
elif (
self.Element.HEADING.value in self.property
and self.Element.SDOCL.value not in self.property
and text.strip()
):
self.parents[self.level + 1] = self.doc.add_heading(
text=text.strip(),
level=self.level,
parent=self.parents[self.level],
)
self.level += 1
self.text = ""
elif name == self.Element.CLAIM.value and self.claim.strip():
self.claims.append(self.claim.strip())
self.claim = ""
elif name == self.Element.CLAIMS.value and self.claims:
heading_text = PatentHeading.CLAIMS.value
heading_level = (
PatentHeading.CLAIMS.level
if PatentHeading.CLAIMS.level in self.parents
else 1
)
claims_item = self.doc.add_heading(
heading_text,
level=heading_level,
parent=self.parents[heading_level],
)
for text in self.claims:
self.doc.add_text(
label=DocItemLabel.PARAGRAPH, text=text, parent=claims_item
)
elif name == self.Element.ABSTRACT.value and self.abstract.strip():
abstract = self.abstract.strip()
heading_text = PatentHeading.ABSTRACT.value
heading_level = (
PatentHeading.ABSTRACT.level
if PatentHeading.ABSTRACT.level in self.parents
else 1
)
abstract_item = self.doc.add_heading(
heading_text,
level=heading_level,
parent=self.parents[heading_level],
)
self.doc.add_text(
label=DocItemLabel.PARAGRAPH, text=abstract, parent=abstract_item
)
elif name == self.Element.PARAGRAPH.value:
paragraph = self.paragraph.strip()
if paragraph and self.Element.CLAIM.value not in self.property:
self.doc.add_text(
label=DocItemLabel.PARAGRAPH,
text=paragraph,
parent=self.parents[self.level],
)
elif self.Element.CLAIM.value in self.property:
# we may need a space after a paragraph in claim text
self.claim += " "
self.paragraph = ""
elif name == self.Element.TABLE.value:
# set an empty table as placeholder
empty_table = TableData(num_rows=0, num_cols=0, table_cells=[])
self.doc.add_table(
data=empty_table,
parent=self.parents[self.level],
)
def _apply_style(self, text: str, style_tag: str) -> str:
"""Apply an HTML style to text.
Args:
text: A string containing plain text.
style_tag: An HTML tag name for styling text. If the tag name is not
recognized as one of the supported styles, the method will return
the original `text`.
Returns:
A string after applying the style.
"""
formatted = text
if style_tag == self.Element.STYLE_SUPERSCRIPT.value:
formatted = html.unescape(self.style_html.get_superscript(text))
elif style_tag == self.Element.STYLE_SUBSCRIPT.value:
formatted = html.unescape(self.style_html.get_subscript(text))
elif style_tag == self.Element.STYLE_ITALIC.value:
formatted = html.unescape(self.style_html.get_math_italic(text))
return formatted
def _clean_data(self) -> None:
"""Reset the variables from stream data."""
self.text = ""
self.property = []
self.claim = ""
self.claims = []
self.paragraph = ""
self.abstract = ""
class PatentUsptoGrantAps(PatentUspto):
"""Parser of patents documents from the US Patent Office (grants APS).
The compatible format is:
- Patent Grant Full Text Data/APS (from January 1976 till December 2001)
"""
@unique
class Section(Enum):
"""Represent a section in a patent APS document."""
ABSTRACT = "ABST"
SUMMARY = "BSUM"
DETAILS = "DETD"
CLAIMS = "CLMS"
DRAWINGS = "DRWD"
@unique
class Field(Enum):
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | true |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/backend/json/docling_json_backend.py | docling/backend/json/docling_json_backend.py | from io import BytesIO
from pathlib import Path
from typing import Union
from docling_core.types.doc import DoclingDocument
from typing_extensions import override
from docling.backend.abstract_backend import DeclarativeDocumentBackend
from docling.datamodel.base_models import InputFormat
from docling.datamodel.document import InputDocument
class DoclingJSONBackend(DeclarativeDocumentBackend):
@override
def __init__(
self, in_doc: InputDocument, path_or_stream: Union[BytesIO, Path]
) -> None:
super().__init__(in_doc, path_or_stream)
# given we need to store any actual conversion exception for raising it from
# convert(), this captures the successful result or the actual error in a
# mutually exclusive way:
self._doc_or_err = self._get_doc_or_err()
@override
def is_valid(self) -> bool:
return isinstance(self._doc_or_err, DoclingDocument)
@classmethod
@override
def supports_pagination(cls) -> bool:
return False
@classmethod
@override
def supported_formats(cls) -> set[InputFormat]:
return {InputFormat.JSON_DOCLING}
def _get_doc_or_err(self) -> Union[DoclingDocument, Exception]:
try:
json_data: Union[str, bytes]
if isinstance(self.path_or_stream, Path):
with open(self.path_or_stream, encoding="utf-8") as f:
json_data = f.read()
elif isinstance(self.path_or_stream, BytesIO):
json_data = self.path_or_stream.getvalue()
else:
raise RuntimeError(f"Unexpected: {type(self.path_or_stream)=}")
return DoclingDocument.model_validate_json(json_data=json_data)
except Exception as e:
return e
@override
def convert(self) -> DoclingDocument:
if isinstance(self._doc_or_err, DoclingDocument):
return self._doc_or_err
else:
raise self._doc_or_err
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/backend/json/__init__.py | docling/backend/json/__init__.py | python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false | |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/backend/docx/__init__.py | docling/backend/docx/__init__.py | python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false | |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/backend/docx/latex/omml.py | docling/backend/docx/latex/omml.py | """
Office Math Markup Language (OMML)
Adapted from https://github.com/xiilei/dwml/blob/master/dwml/omml.py
On 23/01/2025
"""
import logging
import lxml.etree as ET
from pylatexenc.latexencode import UnicodeToLatexEncoder
from docling.backend.docx.latex.latex_dict import (
ALN,
ARR,
BACKSLASH,
BLANK,
BRK,
CHARS,
CHR,
CHR_BO,
CHR_DEFAULT,
D_DEFAULT,
F_DEFAULT,
FUNC,
FUNC_PLACE,
LIM_FUNC,
LIM_TO,
LIM_UPP,
POS,
POS_DEFAULT,
RAD,
RAD_DEFAULT,
SUB,
SUP,
D,
F,
M,
T,
)
OMML_NS = "{http://schemas.openxmlformats.org/officeDocument/2006/math}"
_log = logging.getLogger(__name__)
def load(stream):
tree = ET.parse(stream)
for omath in tree.findall(OMML_NS + "oMath"):
yield oMath2Latex(omath)
def load_string(string):
root = ET.fromstring(string)
for omath in root.findall(OMML_NS + "oMath"):
yield oMath2Latex(omath)
def escape_latex(strs):
last = None
new_chr = []
strs = strs.replace(r"\\", "\\")
for c in strs:
if (c in CHARS) and (last != BACKSLASH):
new_chr.append(BACKSLASH + c)
else:
new_chr.append(c)
last = c
return BLANK.join(new_chr)
def get_val(key, default=None, store=CHR):
if key is not None:
return key if not store else store.get(key, key)
else:
return default
class Tag2Method:
def call_method(self, elm, stag=None):
getmethod = self.tag2meth.get
if stag is None:
stag = elm.tag.replace(OMML_NS, "")
method = getmethod(stag)
if method:
return method(self, elm)
else:
return None
def process_children_list(self, elm, include=None):
"""
process children of the elm,return iterable
"""
for _e in list(elm):
if OMML_NS not in _e.tag:
continue
stag = _e.tag.replace(OMML_NS, "")
if include and (stag not in include):
continue
t = self.call_method(_e, stag=stag)
if t is None:
t = self.process_unknow(_e, stag)
if t is None:
continue
yield (stag, t, _e)
def process_children_dict(self, elm, include=None):
"""
process children of the elm,return dict
"""
latex_chars = dict()
for stag, t, e in self.process_children_list(elm, include):
latex_chars[stag] = t
return latex_chars
def process_children(self, elm, include=None):
"""
process children of the elm,return string
"""
return BLANK.join(
(
t if not isinstance(t, Tag2Method) else str(t)
for stag, t, e in self.process_children_list(elm, include)
)
)
def process_unknow(self, elm, stag):
return None
class Pr(Tag2Method):
text = ""
__val_tags = ("chr", "pos", "begChr", "endChr", "type")
__innerdict = None # can't use the __dict__
""" common properties of element"""
def __init__(self, elm):
self.__innerdict = {}
self.text = self.process_children(elm)
def __str__(self):
return self.text
def __unicode__(self):
return self.__str__(self)
def __getattr__(self, name):
return self.__innerdict.get(name, None)
def do_brk(self, elm):
self.__innerdict["brk"] = BRK
return BRK
def do_common(self, elm):
stag = elm.tag.replace(OMML_NS, "")
if stag in self.__val_tags:
t = elm.get(f"{OMML_NS}val")
self.__innerdict[stag] = t
return None
tag2meth = {
"brk": do_brk,
"chr": do_common,
"pos": do_common,
"begChr": do_common,
"endChr": do_common,
"type": do_common,
}
class oMath2Latex(Tag2Method):
"""
Convert oMath element of omml to latex
"""
_t_dict = T
__direct_tags = ("box", "sSub", "sSup", "sSubSup", "num", "den", "deg", "e")
u = UnicodeToLatexEncoder(
replacement_latex_protection="braces-all",
unknown_char_policy="keep",
unknown_char_warning=False,
)
def __init__(self, element):
self._latex = self.process_children(element)
def __str__(self):
return self.latex.replace(" ", " ")
def __unicode__(self):
return self.__str__(self)
def process_unknow(self, elm, stag):
if stag in self.__direct_tags:
return self.process_children(elm)
elif stag[-2:] == "Pr":
return Pr(elm)
else:
return None
@property
def latex(self):
return self._latex
def do_acc(self, elm):
"""
the accent function
"""
c_dict = self.process_children_dict(elm)
latex_s = get_val(
c_dict["accPr"].chr, default=CHR_DEFAULT.get("ACC_VAL"), store=CHR
)
return latex_s.format(c_dict["e"])
def do_bar(self, elm):
"""
the bar function
"""
c_dict = self.process_children_dict(elm)
pr = c_dict["barPr"]
latex_s = get_val(pr.pos, default=POS_DEFAULT.get("BAR_VAL"), store=POS)
return pr.text + latex_s.format(c_dict["e"])
def do_d(self, elm):
"""
the delimiter object
"""
c_dict = self.process_children_dict(elm)
pr = c_dict["dPr"]
null = D_DEFAULT.get("null")
s_val = get_val(pr.begChr, default=D_DEFAULT.get("left"), store=T)
e_val = get_val(pr.endChr, default=D_DEFAULT.get("right"), store=T)
delim = pr.text + D.format(
left=null if not s_val else escape_latex(s_val),
text=c_dict["e"],
right=null if not e_val else escape_latex(e_val),
)
return delim
def do_spre(self, elm):
"""
the Pre-Sub-Superscript object -- Not support yet
"""
def do_sub(self, elm):
text = self.process_children(elm)
return SUB.format(text)
def do_sup(self, elm):
text = self.process_children(elm)
return SUP.format(text)
def do_f(self, elm):
"""
the fraction object
"""
c_dict = self.process_children_dict(elm)
pr = c_dict.get("fPr")
if pr is None:
# Handle missing fPr element gracefully
_log.debug("Missing fPr element in fraction, using default formatting")
latex_s = F_DEFAULT
return latex_s.format(
num=c_dict.get("num"),
den=c_dict.get("den"),
)
latex_s = get_val(pr.type, default=F_DEFAULT, store=F)
return pr.text + latex_s.format(num=c_dict.get("num"), den=c_dict.get("den"))
def do_func(self, elm):
"""
the Function-Apply object (Examples:sin cos)
"""
c_dict = self.process_children_dict(elm)
func_name = c_dict.get("fName")
return func_name.replace(FUNC_PLACE, c_dict.get("e"))
def do_fname(self, elm):
"""
the func name
"""
latex_chars = []
for stag, t, e in self.process_children_list(elm):
if stag == "r":
if FUNC.get(t):
latex_chars.append(FUNC[t])
else:
_log.warning("Function not supported, will default to text: %s", t)
if isinstance(t, str):
latex_chars.append(t)
elif isinstance(t, str):
latex_chars.append(t)
t = BLANK.join(latex_chars)
return t if FUNC_PLACE in t else t + FUNC_PLACE # do_func will replace this
def do_groupchr(self, elm):
"""
the Group-Character object
"""
c_dict = self.process_children_dict(elm)
pr = c_dict["groupChrPr"]
latex_s = get_val(pr.chr)
return pr.text + latex_s.format(c_dict["e"])
def do_rad(self, elm):
"""
the radical object
"""
c_dict = self.process_children_dict(elm)
text = c_dict.get("e")
deg_text = c_dict.get("deg")
if deg_text:
return RAD.format(deg=deg_text, text=text)
else:
return RAD_DEFAULT.format(text=text)
def do_eqarr(self, elm):
"""
the Array object
"""
return ARR.format(
text=BRK.join(
[t for stag, t, e in self.process_children_list(elm, include=("e",))]
)
)
def do_limlow(self, elm):
"""
the Lower-Limit object
"""
t_dict = self.process_children_dict(elm, include=("e", "lim"))
latex_s = LIM_FUNC.get(t_dict["e"])
if not latex_s:
raise RuntimeError("Not support lim {}".format(t_dict["e"]))
else:
return latex_s.format(lim=t_dict.get("lim"))
def do_limupp(self, elm):
"""
the Upper-Limit object
"""
t_dict = self.process_children_dict(elm, include=("e", "lim"))
return LIM_UPP.format(lim=t_dict.get("lim"), text=t_dict.get("e"))
def do_lim(self, elm):
"""
the lower limit of the limLow object and the upper limit of the limUpp function
"""
return self.process_children(elm).replace(LIM_TO[0], LIM_TO[1])
def do_m(self, elm):
"""
the Matrix object
"""
rows = []
for stag, t, e in self.process_children_list(elm):
if stag == "mPr":
pass
elif stag == "mr":
rows.append(t)
return M.format(text=BRK.join(rows))
def do_mr(self, elm):
"""
a single row of the matrix m
"""
return ALN.join(
[t for stag, t, e in self.process_children_list(elm, include=("e",))]
)
def do_nary(self, elm):
"""
the n-ary object
"""
res = []
bo = ""
for stag, t, e in self.process_children_list(elm):
if stag == "naryPr":
# if <m:naryPr> contains no <m:chr>, the n-ary represents an integral
bo = get_val(t.chr, default="\\int", store=CHR_BO)
else:
res.append(t)
return bo + BLANK.join(res)
def process_unicode(self, s):
# s = s if isinstance(s,unicode) else unicode(s,'utf-8')
# print(s, self._t_dict.get(s, s), unicode_to_latex(s))
# _str.append( self._t_dict.get(s, s) )
out_latex_str = self.u.unicode_to_latex(s)
if (
s.startswith("{") is False
and out_latex_str.startswith("{")
and s.endswith("}") is False
and out_latex_str.endswith("}")
):
out_latex_str = f" {out_latex_str[1:-1]} "
if "ensuremath" in out_latex_str:
out_latex_str = out_latex_str.replace("\\ensuremath{", " ")
out_latex_str = out_latex_str.replace("}", " ")
if out_latex_str.strip().startswith("\\text"):
out_latex_str = f" \\text{{{out_latex_str}}} "
return out_latex_str
def do_r(self, elm):
"""
Get text from 'r' element,And try convert them to latex symbols
@todo text style support , (sty)
@todo \text (latex pure text support)
"""
_str = []
_base_str = []
found_text = elm.findtext(f"./{OMML_NS}t")
if found_text:
for s in found_text:
out_latex_str = self.process_unicode(s)
_str.append(out_latex_str)
_base_str.append(s)
proc_str = escape_latex(BLANK.join(_str))
base_proc_str = BLANK.join(_base_str)
if "{" not in base_proc_str and "\\{" in proc_str:
proc_str = proc_str.replace("\\{", "{")
if "}" not in base_proc_str and "\\}" in proc_str:
proc_str = proc_str.replace("\\}", "}")
return proc_str
tag2meth = {
"acc": do_acc,
"r": do_r,
"bar": do_bar,
"sub": do_sub,
"sup": do_sup,
"f": do_f,
"func": do_func,
"fName": do_fname,
"groupChr": do_groupchr,
"d": do_d,
"rad": do_rad,
"eqArr": do_eqarr,
"limLow": do_limlow,
"limUpp": do_limupp,
"lim": do_lim,
"m": do_m,
"mr": do_mr,
"nary": do_nary,
}
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/backend/docx/latex/latex_dict.py | docling/backend/docx/latex/latex_dict.py | """
Adapted from https://github.com/xiilei/dwml/blob/master/dwml/latex_dict.py
On 23/01/2025
"""
CHARS = ("{", "}", "_", "^", "#", "&", "$", "%", "~")
BLANK = ""
BACKSLASH = "\\"
ALN = "&"
CHR = {
# Unicode : Latex Math Symbols
# Top accents
"\u0300": "\\grave{{{0}}}",
"\u0301": "\\acute{{{0}}}",
"\u0302": "\\hat{{{0}}}",
"\u0303": "\\tilde{{{0}}}",
"\u0304": "\\bar{{{0}}}",
"\u0305": "\\overbar{{{0}}}",
"\u0306": "\\breve{{{0}}}",
"\u0307": "\\dot{{{0}}}",
"\u0308": "\\ddot{{{0}}}",
"\u0309": "\\ovhook{{{0}}}",
"\u030a": "\\ocirc{{{0}}}}",
"\u030c": "\\check{{{0}}}}",
"\u0310": "\\candra{{{0}}}",
"\u0312": "\\oturnedcomma{{{0}}}",
"\u0315": "\\ocommatopright{{{0}}}",
"\u031a": "\\droang{{{0}}}",
"\u0338": "\\not{{{0}}}",
"\u20d0": "\\leftharpoonaccent{{{0}}}",
"\u20d1": "\\rightharpoonaccent{{{0}}}",
"\u20d2": "\\vertoverlay{{{0}}}",
"\u20d6": "\\overleftarrow{{{0}}}",
"\u20d7": "\\vec{{{0}}}",
"\u20db": "\\dddot{{{0}}}",
"\u20dc": "\\ddddot{{{0}}}",
"\u20e1": "\\overleftrightarrow{{{0}}}",
"\u20e7": "\\annuity{{{0}}}",
"\u20e9": "\\widebridgeabove{{{0}}}",
"\u20f0": "\\asteraccent{{{0}}}",
# Bottom accents
"\u0330": "\\wideutilde{{{0}}}",
"\u0331": "\\underbar{{{0}}}",
"\u20e8": "\\threeunderdot{{{0}}}",
"\u20ec": "\\underrightharpoondown{{{0}}}",
"\u20ed": "\\underleftharpoondown{{{0}}}",
"\u20ee": "\\underledtarrow{{{0}}}",
"\u20ef": "\\underrightarrow{{{0}}}",
# Over | group
"\u23b4": "\\overbracket{{{0}}}",
"\u23dc": "\\overparen{{{0}}}",
"\u23de": "\\overbrace{{{0}}}",
# Under| group
"\u23b5": "\\underbracket{{{0}}}",
"\u23dd": "\\underparen{{{0}}}",
"\u23df": "\\underbrace{{{0}}}",
}
CHR_BO = {
# Big operators,
"\u2140": "\\Bbbsum",
"\u220f": "\\prod",
"\u2210": "\\coprod",
"\u2211": "\\sum",
"\u222b": "\\int",
"\u222c": "\\iint",
"\u222d": "\\iiint",
"\u222e": "\\oint",
"\u222f": "\\oiint",
"\u2230": "\\oiiint",
"\u22c0": "\\bigwedge",
"\u22c1": "\\bigvee",
"\u22c2": "\\bigcap",
"\u22c3": "\\bigcup",
"\u2a00": "\\bigodot",
"\u2a01": "\\bigoplus",
"\u2a02": "\\bigotimes",
}
T = {
# Greek letters
"\U0001d6fc": "\\alpha ",
"\U0001d6fd": "\\beta ",
"\U0001d6fe": "\\gamma ",
"\U0001d6ff": "\\theta ",
"\U0001d700": "\\epsilon ",
"\U0001d701": "\\zeta ",
"\U0001d702": "\\eta ",
"\U0001d703": "\\theta ",
"\U0001d704": "\\iota ",
"\U0001d705": "\\kappa ",
"\U0001d706": "\\lambda ",
"\U0001d707": "\\m ",
"\U0001d708": "\\n ",
"\U0001d709": "\\xi ",
"\U0001d70a": "\\omicron ",
"\U0001d70b": "\\pi ",
"\U0001d70c": "\\rho ",
"\U0001d70d": "\\varsigma ",
"\U0001d70e": "\\sigma ",
"\U0001d70f": "\\ta ",
"\U0001d710": "\\upsilon ",
"\U0001d711": "\\phi ",
"\U0001d712": "\\chi ",
"\U0001d713": "\\psi ",
"\U0001d714": "\\omega ",
"\U0001d715": "\\partial ",
"\U0001d716": "\\varepsilon ",
"\U0001d717": "\\vartheta ",
"\U0001d718": "\\varkappa ",
"\U0001d719": "\\varphi ",
"\U0001d71a": "\\varrho ",
"\U0001d71b": "\\varpi ",
# Relation symbols
"\u2190": "\\leftarrow ",
"\u2191": "\\uparrow ",
"\u2192": "\\rightarrow ",
"\u2193": "\\downright ",
"\u2194": "\\leftrightarrow ",
"\u2195": "\\updownarrow ",
"\u2196": "\\nwarrow ",
"\u2197": "\\nearrow ",
"\u2198": "\\searrow ",
"\u2199": "\\swarrow ",
"\u22ee": "\\vdots ",
"\u22ef": "\\cdots ",
"\u22f0": "\\adots ",
"\u22f1": "\\ddots ",
"\u2260": "\\ne ",
"\u2264": "\\leq ",
"\u2265": "\\geq ",
"\u2266": "\\leqq ",
"\u2267": "\\geqq ",
"\u2268": "\\lneqq ",
"\u2269": "\\gneqq ",
"\u226a": "\\ll ",
"\u226b": "\\gg ",
"\u2208": "\\in ",
"\u2209": "\\notin ",
"\u220b": "\\ni ",
"\u220c": "\\nni ",
# Ordinary symbols
"\u221e": "\\infty ",
# Binary relations
"\u00b1": "\\pm ",
"\u2213": "\\mp ",
# Italic, Latin, uppercase
"\U0001d434": "A",
"\U0001d435": "B",
"\U0001d436": "C",
"\U0001d437": "D",
"\U0001d438": "E",
"\U0001d439": "F",
"\U0001d43a": "G",
"\U0001d43b": "H",
"\U0001d43c": "I",
"\U0001d43d": "J",
"\U0001d43e": "K",
"\U0001d43f": "L",
"\U0001d440": "M",
"\U0001d441": "N",
"\U0001d442": "O",
"\U0001d443": "P",
"\U0001d444": "Q",
"\U0001d445": "R",
"\U0001d446": "S",
"\U0001d447": "T",
"\U0001d448": "U",
"\U0001d449": "V",
"\U0001d44a": "W",
"\U0001d44b": "X",
"\U0001d44c": "Y",
"\U0001d44d": "Z",
# Italic, Latin, lowercase
"\U0001d44e": "a",
"\U0001d44f": "b",
"\U0001d450": "c",
"\U0001d451": "d",
"\U0001d452": "e",
"\U0001d453": "f",
"\U0001d454": "g",
"\U0001d456": "i",
"\U0001d457": "j",
"\U0001d458": "k",
"\U0001d459": "l",
"\U0001d45a": "m",
"\U0001d45b": "n",
"\U0001d45c": "o",
"\U0001d45d": "p",
"\U0001d45e": "q",
"\U0001d45f": "r",
"\U0001d460": "s",
"\U0001d461": "t",
"\U0001d462": "u",
"\U0001d463": "v",
"\U0001d464": "w",
"\U0001d465": "x",
"\U0001d466": "y",
"\U0001d467": "z",
}
FUNC = {
"sin": "\\sin({fe})",
"cos": "\\cos({fe})",
"tan": "\\tan({fe})",
"arcsin": "\\arcsin({fe})",
"arccos": "\\arccos({fe})",
"arctan": "\\arctan({fe})",
"arccot": "\\arccot({fe})",
"sinh": "\\sinh({fe})",
"cosh": "\\cosh({fe})",
"tanh": "\\tanh({fe})",
"coth": "\\coth({fe})",
"sec": "\\sec({fe})",
"csc": "\\csc({fe})",
"mod": "\\mod {fe}",
"max": "\\max({fe})",
"min": "\\min({fe})",
}
FUNC_PLACE = "{fe}"
BRK = "\\\\"
CHR_DEFAULT = {
"ACC_VAL": "\\hat{{{0}}}",
}
POS = {
"top": "\\overline{{{0}}}", # not sure
"bot": "\\underline{{{0}}}",
}
POS_DEFAULT = {
"BAR_VAL": "\\overline{{{0}}}",
}
SUB = "_{{{0}}}"
SUP = "^{{{0}}}"
F = {
"bar": "\\frac{{{num}}}{{{den}}}",
"skw": r"^{{{num}}}/_{{{den}}}",
"noBar": "\\genfrac{{}}{{}}{{0pt}}{{}}{{{num}}}{{{den}}}",
"lin": "{{{num}}}/{{{den}}}",
}
F_DEFAULT = "\\frac{{{num}}}{{{den}}}"
D = "\\left{left}{text}\\right{right}"
D_DEFAULT = {
"left": "(",
"right": ")",
"null": ".",
}
RAD = "\\sqrt[{deg}]{{{text}}}"
RAD_DEFAULT = "\\sqrt{{{text}}}"
ARR = "{text}"
LIM_FUNC = {
"lim": "\\lim_{{{lim}}}",
"max": "\\max_{{{lim}}}",
"min": "\\min_{{{lim}}}",
}
LIM_TO = ("\\rightarrow", "\\to")
LIM_UPP = "\\overset{{{lim}}}{{{text}}}"
M = "\\begin{{matrix}}{text}\\end{{matrix}}"
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/backend/docx/latex/__init__.py | docling/backend/docx/latex/__init__.py | python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false | |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/backend/docx/drawingml/utils.py | docling/backend/docx/drawingml/utils.py | import os
import shutil
import subprocess
from pathlib import Path
from tempfile import mkdtemp
from typing import Callable, Optional
import pypdfium2
from docx.document import Document
from PIL import Image, ImageChops
def get_libreoffice_cmd(raise_if_unavailable: bool = False) -> Optional[str]:
"""Return the libreoffice cmd and optionally test it."""
libreoffice_cmd = (
shutil.which("libreoffice")
or shutil.which("soffice")
or (
"/Applications/LibreOffice.app/Contents/MacOS/soffice"
if os.path.isfile("/Applications/LibreOffice.app/Contents/MacOS/soffice")
else None
)
)
if raise_if_unavailable:
if libreoffice_cmd is None:
raise RuntimeError("Libreoffice not found")
# The following test will raise if the libreoffice_cmd cannot be used
subprocess.run(
[
libreoffice_cmd,
"-h",
],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
check=True,
)
return libreoffice_cmd
def get_docx_to_pdf_converter() -> Optional[Callable]:
"""
Detects the best available DOCX to PDF tool and returns a conversion function.
The returned function accepts (input_path, output_path).
Returns None if no tool is available.
"""
# Try LibreOffice
libreoffice_cmd = get_libreoffice_cmd()
if libreoffice_cmd:
def convert_with_libreoffice(input_path, output_path):
subprocess.run(
[
libreoffice_cmd,
"--headless",
"--convert-to",
"pdf",
"--outdir",
os.path.dirname(output_path),
input_path,
],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
check=True,
)
expected_output = os.path.join(
os.path.dirname(output_path),
os.path.splitext(os.path.basename(input_path))[0] + ".pdf",
)
if expected_output != output_path:
os.rename(expected_output, output_path)
return convert_with_libreoffice
## Space for other DOCX to PDF converters if available
# No tools found
return None
def crop_whitespace(image: Image.Image, bg_color=None, padding=0) -> Image.Image:
if bg_color is None:
bg_color = image.getpixel((0, 0))
bg = Image.new(image.mode, image.size, bg_color)
diff = ImageChops.difference(image, bg)
bbox = diff.getbbox()
if bbox:
left, upper, right, lower = bbox
left = max(0, left - padding)
upper = max(0, upper - padding)
right = min(image.width, right + padding)
lower = min(image.height, lower + padding)
return image.crop((left, upper, right, lower))
else:
return image
def get_pil_from_dml_docx(
docx: Document, converter: Optional[Callable]
) -> Optional[Image.Image]:
if converter is None:
return None
temp_dir = Path(mkdtemp())
temp_docx = Path(temp_dir / "drawing_only.docx")
temp_pdf = Path(temp_dir / "drawing_only.pdf")
# 1) Save docx temporarily
docx.save(str(temp_docx))
# 2) Export to PDF
converter(temp_docx, temp_pdf)
# 3) Load PDF as PNG
pdf = pypdfium2.PdfDocument(temp_pdf)
page = pdf[0]
image = crop_whitespace(page.render(scale=2).to_pil())
page.close()
pdf.close()
shutil.rmtree(temp_dir, ignore_errors=True)
return image
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/experimental/__init__.py | docling/experimental/__init__.py | """Experimental modules for Docling.
This package contains experimental features that are under development
and may change or be removed in future versions.
"""
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/experimental/datamodel/table_crops_layout_options.py | docling/experimental/datamodel/table_crops_layout_options.py | """Internal options for the experimental TableCrops layout model."""
from typing import ClassVar
from docling.datamodel.pipeline_options import BaseLayoutOptions
__all__ = ["TableCropsLayoutOptions"]
class TableCropsLayoutOptions(BaseLayoutOptions):
"""Options for TableCropsLayoutModel (internal-only)."""
kind: ClassVar[str] = "docling_experimental_table_crops_layout"
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/experimental/datamodel/__init__.py | docling/experimental/datamodel/__init__.py | """Experimental datamodel modules."""
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/experimental/datamodel/threaded_layout_vlm_pipeline_options.py | docling/experimental/datamodel/threaded_layout_vlm_pipeline_options.py | """Options for the threaded layout+VLM pipeline."""
from typing import Union
from pydantic import model_validator
from docling.datamodel.layout_model_specs import DOCLING_LAYOUT_HERON
from docling.datamodel.pipeline_options import LayoutOptions, PaginatedPipelineOptions
from docling.datamodel.pipeline_options_vlm_model import (
ApiVlmOptions,
InlineVlmOptions,
ResponseFormat,
)
from docling.datamodel.vlm_model_specs import GRANITEDOCLING_TRANSFORMERS
class ThreadedLayoutVlmPipelineOptions(PaginatedPipelineOptions):
"""Pipeline options for the threaded layout+VLM pipeline."""
images_scale: float = 2.0
# VLM configuration (will be enhanced with layout awareness by the pipeline)
vlm_options: Union[InlineVlmOptions, ApiVlmOptions] = GRANITEDOCLING_TRANSFORMERS
# Layout model configuration
layout_options: LayoutOptions = LayoutOptions(
model_spec=DOCLING_LAYOUT_HERON, skip_cell_assignment=True
)
# Threading and batching controls
layout_batch_size: int = 4
vlm_batch_size: int = 4
batch_timeout_seconds: float = 2.0
queue_max_size: int = 50
@model_validator(mode="after")
def validate_response_format(self):
"""Validate that VLM response format is DOCTAGS (required for this pipeline)."""
if self.vlm_options.response_format != ResponseFormat.DOCTAGS:
raise ValueError(
f"ThreadedLayoutVlmPipeline only supports DOCTAGS response format, "
f"but got {self.vlm_options.response_format}. "
f"Please set vlm_options.response_format=ResponseFormat.DOCTAGS"
)
return self
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/experimental/models/__init__.py | docling/experimental/models/__init__.py | """Experimental models for Docling."""
__all__: list[str] = []
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/experimental/models/table_crops_layout_model.py | docling/experimental/models/table_crops_layout_model.py | """Internal TableCrops layout model that marks full pages as table clusters."""
from __future__ import annotations
import warnings
from collections.abc import Sequence
from pathlib import Path
from typing import Optional
import numpy as np
from docling_core.types.doc import DocItemLabel
from docling.datamodel.accelerator_options import AcceleratorOptions
from docling.datamodel.base_models import BoundingBox, Cluster, LayoutPrediction, Page
from docling.datamodel.document import ConversionResult
from docling.experimental.datamodel.table_crops_layout_options import (
TableCropsLayoutOptions,
)
from docling.models.base_layout_model import BaseLayoutModel
__all__ = ["TableCropsLayoutModel"]
class TableCropsLayoutModel(BaseLayoutModel):
"""Experimental layout model that treats the full page as a table cluster.
This is useful in cases where a Docling pipeline is applied to images of table crops only.
This model is internal and not part of the stable public interface.
"""
def __init__(
self,
artifacts_path: Optional[Path],
accelerator_options: AcceleratorOptions,
options: TableCropsLayoutOptions,
):
self.options = options
self.artifacts_path = artifacts_path
self.accelerator_options = accelerator_options
@classmethod
def get_options_type(cls) -> type[TableCropsLayoutOptions]:
return TableCropsLayoutOptions
def predict_layout(
self,
conv_res: ConversionResult,
pages: Sequence[Page],
) -> Sequence[LayoutPrediction]:
layout_predictions: list[LayoutPrediction] = []
for page in pages:
if page._backend is None or not page._backend.is_valid():
existing_prediction = page.predictions.layout or LayoutPrediction()
layout_predictions.append(existing_prediction)
continue
clusters = self._build_page_clusters(page)
prediction = LayoutPrediction(clusters=clusters)
self._update_confidence(conv_res, page, clusters)
layout_predictions.append(prediction)
return layout_predictions
def _build_page_clusters(self, page: Page) -> list[Cluster]:
page_size = page.size
if page_size is None:
return []
bbox = BoundingBox(
l=0.0,
t=0.0,
r=page_size.width,
b=page_size.height,
)
cluster = Cluster(
id=0,
label=DocItemLabel.TABLE,
bbox=bbox,
confidence=1.0,
cells=[],
)
clusters = [cluster]
if not self.options.skip_cell_assignment:
page_cells = list(page.cells)
cluster.cells = page_cells
if not page_cells and not self.options.keep_empty_clusters:
clusters = []
return clusters
def _update_confidence(
self, conv_res: ConversionResult, page: Page, clusters: list[Cluster]
) -> None:
"""Populate layout and OCR confidence scores for the page."""
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
"Mean of empty slice|invalid value encountered in scalar divide",
RuntimeWarning,
"numpy",
)
conv_res.confidence.pages[page.page_no].layout_score = 1.0
ocr_cells = [cell for cell in page.cells if cell.from_ocr]
ocr_confidence = float(np.mean([cell.confidence for cell in ocr_cells]))
conv_res.confidence.pages[page.page_no].ocr_score = ocr_confidence
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/experimental/pipeline/threaded_layout_vlm_pipeline.py | docling/experimental/pipeline/threaded_layout_vlm_pipeline.py | """Threaded Layout+VLM Pipeline
================================
A specialized two-stage threaded pipeline that combines layout model preprocessing
with VLM processing. The layout model detects document elements and coordinates,
which are then injected into the VLM prompt for enhanced structured output.
"""
from __future__ import annotations
import itertools
import logging
from pathlib import Path
from typing import TYPE_CHECKING, List, Optional, Union, cast
from docling_core.types.doc import DoclingDocument
from docling_core.types.doc.document import DocTagsDocument
from PIL import Image as PILImage
if TYPE_CHECKING:
from docling_core.types.doc.page import SegmentedPage
from docling.backend.abstract_backend import AbstractDocumentBackend
from docling.backend.pdf_backend import PdfDocumentBackend
from docling.datamodel.base_models import ConversionStatus, Page
from docling.datamodel.document import ConversionResult
from docling.datamodel.pipeline_options_vlm_model import (
ApiVlmOptions,
InferenceFramework,
InlineVlmOptions,
)
from docling.datamodel.settings import settings
from docling.experimental.datamodel.threaded_layout_vlm_pipeline_options import (
ThreadedLayoutVlmPipelineOptions,
)
from docling.models.api_vlm_model import ApiVlmModel
from docling.models.base_model import BaseVlmPageModel
from docling.models.layout_model import LayoutModel
from docling.models.vlm_models_inline.hf_transformers_model import (
HuggingFaceTransformersVlmModel,
)
from docling.models.vlm_models_inline.mlx_model import HuggingFaceMlxModel
from docling.pipeline.base_pipeline import BasePipeline
from docling.pipeline.standard_pdf_pipeline import (
ProcessingResult,
RunContext,
ThreadedItem,
ThreadedPipelineStage,
ThreadedQueue,
)
from docling.utils.profiling import ProfilingScope, TimeRecorder
_log = logging.getLogger(__name__)
class ThreadedLayoutVlmPipeline(BasePipeline):
"""Two-stage threaded pipeline: Layout Model → VLM Model."""
def __init__(self, pipeline_options: ThreadedLayoutVlmPipelineOptions) -> None:
super().__init__(pipeline_options)
self.pipeline_options: ThreadedLayoutVlmPipelineOptions = pipeline_options
self._run_seq = itertools.count(1) # deterministic, monotonic run ids
# VLM model type (initialized in _init_models)
self.vlm_model: BaseVlmPageModel
# Initialize models
self._init_models()
def _init_models(self) -> None:
"""Initialize layout and VLM models."""
art_path = self._resolve_artifacts_path()
# Layout model
self.layout_model = LayoutModel(
artifacts_path=art_path,
accelerator_options=self.pipeline_options.accelerator_options,
options=self.pipeline_options.layout_options,
)
# VLM model based on options type
# Create layout-aware VLM options internally
base_vlm_options = self.pipeline_options.vlm_options
class LayoutAwareVlmOptions(type(base_vlm_options)): # type: ignore[misc]
def build_prompt(
self,
page: Optional[SegmentedPage],
*,
_internal_page: Optional[Page] = None,
) -> str:
base_prompt = self.prompt
augmented_prompt = base_prompt
# In this layout-aware pipeline, _internal_page is always provided
if _internal_page is None:
return base_prompt
if not _internal_page.size:
_log.warning(
f"Page size not available for page {_internal_page.page_no}. Cannot enhance prompt with layout info."
)
return base_prompt
if _internal_page.predictions.layout:
from docling_core.types.doc.tokens import DocumentToken
layout_elements = []
for cluster in _internal_page.predictions.layout.clusters:
# Get proper tag name from DocItemLabel
tag_name = DocumentToken.create_token_name_from_doc_item_label(
label=cluster.label
)
# Convert bbox to tuple and get location tokens
bbox_tuple = cluster.bbox.as_tuple()
location_tokens = DocumentToken.get_location(
bbox=bbox_tuple,
page_w=_internal_page.size.width,
page_h=_internal_page.size.height,
)
# Create XML element with DocTags format
xml_element = f"<{tag_name}>{location_tokens}</{tag_name}>"
layout_elements.append(xml_element)
if layout_elements:
# Join elements with newlines and wrap in layout tags
layout_xml = (
"<layout>" + "\n".join(layout_elements) + "</layout>"
)
layout_injection = f"{layout_xml}"
augmented_prompt = base_prompt + layout_injection
_log.debug(
"Enhanced Prompt with Layout Info: %s\n", augmented_prompt
)
return augmented_prompt
vlm_options = LayoutAwareVlmOptions(**base_vlm_options.model_dump())
if isinstance(base_vlm_options, ApiVlmOptions):
self.vlm_model = ApiVlmModel(
enabled=True,
enable_remote_services=self.pipeline_options.enable_remote_services,
vlm_options=vlm_options,
)
elif isinstance(base_vlm_options, InlineVlmOptions):
if vlm_options.inference_framework == InferenceFramework.TRANSFORMERS:
self.vlm_model = HuggingFaceTransformersVlmModel(
enabled=True,
artifacts_path=art_path,
accelerator_options=self.pipeline_options.accelerator_options,
vlm_options=vlm_options,
)
elif vlm_options.inference_framework == InferenceFramework.MLX:
self.vlm_model = HuggingFaceMlxModel(
enabled=True,
artifacts_path=art_path,
accelerator_options=self.pipeline_options.accelerator_options,
vlm_options=vlm_options,
)
elif vlm_options.inference_framework == InferenceFramework.VLLM:
from docling.models.vlm_models_inline.vllm_model import VllmVlmModel
self.vlm_model = VllmVlmModel(
enabled=True,
artifacts_path=art_path,
accelerator_options=self.pipeline_options.accelerator_options,
vlm_options=vlm_options,
)
else:
raise ValueError(
f"Unsupported VLM inference framework: {vlm_options.inference_framework}"
)
else:
raise ValueError(f"Unsupported VLM options type: {type(base_vlm_options)}")
def _resolve_artifacts_path(self) -> Optional[Path]:
"""Resolve artifacts path from options or settings."""
if self.pipeline_options.artifacts_path:
p = Path(self.pipeline_options.artifacts_path).expanduser()
elif settings.artifacts_path:
p = Path(settings.artifacts_path).expanduser()
else:
return None
if not p.is_dir():
raise RuntimeError(
f"{p} does not exist or is not a directory containing the required models"
)
return p
def _create_run_ctx(self) -> RunContext:
"""Create pipeline stages and wire them together."""
opts = self.pipeline_options
# Layout stage
layout_stage = ThreadedPipelineStage(
name="layout",
model=self.layout_model,
batch_size=opts.layout_batch_size,
batch_timeout=opts.batch_timeout_seconds,
queue_max_size=opts.queue_max_size,
)
# VLM stage - now layout-aware through enhanced build_prompt
vlm_stage = ThreadedPipelineStage(
name="vlm",
model=self.vlm_model,
batch_size=opts.vlm_batch_size,
batch_timeout=opts.batch_timeout_seconds,
queue_max_size=opts.queue_max_size,
)
# Wire stages
output_q = ThreadedQueue(opts.queue_max_size)
layout_stage.add_output_queue(vlm_stage.input_queue)
vlm_stage.add_output_queue(output_q)
stages = [layout_stage, vlm_stage]
return RunContext(
stages=stages, first_stage=layout_stage, output_queue=output_q
)
def _build_document(self, conv_res: ConversionResult) -> ConversionResult:
"""Build document using threaded layout+VLM pipeline."""
run_id = next(self._run_seq)
assert isinstance(conv_res.input._backend, PdfDocumentBackend)
backend = conv_res.input._backend
# Initialize pages
start_page, end_page = conv_res.input.limits.page_range
pages: List[Page] = []
images_scale = self.pipeline_options.images_scale
for i in range(conv_res.input.page_count):
if start_page - 1 <= i <= end_page - 1:
page = Page(page_no=i)
if images_scale is not None:
page._default_image_scale = images_scale
page._backend = backend.load_page(i)
if page._backend and page._backend.is_valid():
page.size = page._backend.get_size()
conv_res.pages.append(page)
pages.append(page)
if not pages:
conv_res.status = ConversionStatus.FAILURE
return conv_res
total_pages = len(pages)
ctx = self._create_run_ctx()
for st in ctx.stages:
st.start()
proc = ProcessingResult(total_expected=total_pages)
fed_idx = 0
batch_size = 32
try:
while proc.success_count + proc.failure_count < total_pages:
# Feed pages to first stage
while fed_idx < total_pages:
ok = ctx.first_stage.input_queue.put(
ThreadedItem(
payload=pages[fed_idx],
run_id=run_id,
page_no=pages[fed_idx].page_no,
conv_res=conv_res,
),
timeout=0.0,
)
if ok:
fed_idx += 1
if fed_idx == total_pages:
ctx.first_stage.input_queue.close()
else:
break
# Drain results from output
out_batch = ctx.output_queue.get_batch(batch_size, timeout=0.05)
for itm in out_batch:
if itm.run_id != run_id:
continue
if itm.is_failed or itm.error:
proc.failed_pages.append(
(itm.page_no, itm.error or RuntimeError("unknown error"))
)
else:
assert itm.payload is not None
proc.pages.append(itm.payload)
# Handle early termination
if not out_batch and ctx.output_queue.closed:
missing = total_pages - (proc.success_count + proc.failure_count)
if missing > 0:
proc.failed_pages.extend(
[(-1, RuntimeError("pipeline terminated early"))] * missing
)
break
finally:
for st in ctx.stages:
st.stop()
ctx.output_queue.close()
self._integrate_results(conv_res, proc)
return conv_res
def _integrate_results(
self, conv_res: ConversionResult, proc: ProcessingResult
) -> None:
"""Integrate processing results into conversion result."""
page_map = {p.page_no: p for p in proc.pages}
# Track failed pages for cleanup
failed_page_nos = {fp for fp, _ in proc.failed_pages}
# Collect pages that will be removed (failed pages) for resource cleanup
pages_to_remove = [p for p in conv_res.pages if p.page_no in failed_page_nos]
conv_res.pages = [
page_map.get(p.page_no, p)
for p in conv_res.pages
if p.page_no in page_map
or not any(fp == p.page_no for fp, _ in proc.failed_pages)
]
if proc.is_complete_failure:
conv_res.status = ConversionStatus.FAILURE
elif proc.is_partial_success:
conv_res.status = ConversionStatus.PARTIAL_SUCCESS
else:
conv_res.status = ConversionStatus.SUCCESS
# Clean up resources for failed pages that were removed
for p in pages_to_remove:
if p._backend is not None:
p._backend.unload()
p._image_cache = {}
# Clean up parsed_page if it exists (it's Optional[SegmentedPdfPage])
if p.parsed_page is not None:
del p.parsed_page
p.parsed_page = None
# Clean up images if not needed for remaining pages
if not self.pipeline_options.generate_page_images:
for p in conv_res.pages:
p._image_cache = {}
def _assemble_document(self, conv_res: ConversionResult) -> ConversionResult:
"""Assemble final document from VLM predictions."""
from docling_core.types.doc import DocItem, ImageRef, PictureItem
from docling.datamodel.pipeline_options_vlm_model import ResponseFormat
with TimeRecorder(conv_res, "doc_assemble", scope=ProfilingScope.DOCUMENT):
# Response format validation is done in ThreadedLayoutVlmPipelineOptions
# This check is kept as a safety net, but should never trigger if validation works
if (
self.pipeline_options.vlm_options.response_format
!= ResponseFormat.DOCTAGS
):
raise RuntimeError(
f"Unsupported VLM response format {self.pipeline_options.vlm_options.response_format}. Only DOCTAGS format is supported."
)
conv_res.document = self._turn_dt_into_doc(conv_res)
# Generate images of the requested element types
if self.pipeline_options.generate_picture_images:
# Create mapping from page_no to Page object since pages may be non-continuous
page_map = {p.page_no: p for p in conv_res.pages}
scale = self.pipeline_options.images_scale
for element, _level in conv_res.document.iterate_items():
if not isinstance(element, DocItem) or len(element.prov) == 0:
continue
if (
isinstance(element, PictureItem)
and self.pipeline_options.generate_picture_images
):
page_no = element.prov[0].page_no
page = page_map.get(page_no)
if page is None:
_log.warning(
f"Page {page_no} not found in conversion result for picture element. Skipping image generation."
)
continue
assert page.size is not None
assert page.image is not None
crop_bbox = (
element.prov[0]
.bbox.scaled(scale=scale)
.to_top_left_origin(page_height=page.size.height * scale)
)
cropped_im = page.image.crop(crop_bbox.as_tuple())
element.image = ImageRef.from_pil(
cropped_im, dpi=int(72 * scale)
)
return conv_res
def _turn_dt_into_doc(self, conv_res: ConversionResult) -> DoclingDocument:
"""Convert DOCTAGS response format to DoclingDocument."""
doctags_list = []
image_list = []
for page in conv_res.pages:
# Only include pages that have both an image and VLM predictions
if page.image and page.predictions.vlm_response:
predicted_doctags = page.predictions.vlm_response.text
image_list.append(page.image)
doctags_list.append(predicted_doctags)
doctags_list_c = cast(List[Union[Path, str]], doctags_list)
image_list_c = cast(List[Union[Path, PILImage.Image]], image_list)
doctags_doc = DocTagsDocument.from_doctags_and_image_pairs(
doctags_list_c, image_list_c
)
document = DoclingDocument.load_from_doctags(doctag_document=doctags_doc)
return document
@classmethod
def get_default_options(cls) -> ThreadedLayoutVlmPipelineOptions:
return ThreadedLayoutVlmPipelineOptions()
@classmethod
def is_backend_supported(cls, backend: AbstractDocumentBackend) -> bool:
return isinstance(backend, PdfDocumentBackend)
def _determine_status(self, conv_res: ConversionResult) -> ConversionStatus:
return conv_res.status
def _unload(self, conv_res: ConversionResult) -> None:
for p in conv_res.pages:
if p._backend is not None:
p._backend.unload()
if conv_res.input._backend:
conv_res.input._backend.unload()
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/experimental/pipeline/__init__.py | docling/experimental/pipeline/__init__.py | """Experimental pipeline modules."""
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/cli/models.py | docling/cli/models.py | import logging
import warnings
from enum import Enum
from pathlib import Path
from typing import Annotated, Optional
import typer
from rich.console import Console
from rich.logging import RichHandler
from docling.datamodel.settings import settings
from docling.models.utils.hf_model_download import download_hf_model
from docling.utils.model_downloader import download_models
warnings.filterwarnings(action="ignore", category=UserWarning, module="pydantic|torch")
warnings.filterwarnings(action="ignore", category=FutureWarning, module="easyocr")
console = Console()
err_console = Console(stderr=True)
app = typer.Typer(
name="Docling models helper",
no_args_is_help=True,
add_completion=False,
pretty_exceptions_enable=False,
)
class _AvailableModels(str, Enum):
LAYOUT = "layout"
TABLEFORMER = "tableformer"
CODE_FORMULA = "code_formula"
PICTURE_CLASSIFIER = "picture_classifier"
SMOLVLM = "smolvlm"
GRANITEDOCLING = "granitedocling"
GRANITEDOCLING_MLX = "granitedocling_mlx"
SMOLDOCLING = "smoldocling"
SMOLDOCLING_MLX = "smoldocling_mlx"
GRANITE_VISION = "granite_vision"
RAPIDOCR = "rapidocr"
EASYOCR = "easyocr"
_default_models = [
_AvailableModels.LAYOUT,
_AvailableModels.TABLEFORMER,
_AvailableModels.CODE_FORMULA,
_AvailableModels.PICTURE_CLASSIFIER,
_AvailableModels.RAPIDOCR,
]
@app.command("download")
def download(
output_dir: Annotated[
Path,
typer.Option(
...,
"-o",
"--output-dir",
help="The directory where to download the models.",
),
] = (settings.cache_dir / "models"),
force: Annotated[
bool, typer.Option(..., help="If true, the download will be forced.")
] = False,
models: Annotated[
Optional[list[_AvailableModels]],
typer.Argument(
help="Models to download (default behavior: a predefined set of models will be downloaded).",
),
] = None,
all: Annotated[
bool,
typer.Option(
...,
"--all",
help="If true, all available models will be downloaded (mutually exclusive with passing specific models).",
show_default=True,
),
] = False,
quiet: Annotated[
bool,
typer.Option(
...,
"-q",
"--quiet",
help="No extra output is generated, the CLI prints only the directory with the cached models.",
),
] = False,
):
if models and all:
raise typer.BadParameter(
"Cannot simultaneously set 'all' parameter and specify models to download."
)
if not quiet:
logging.basicConfig(
level=logging.INFO,
format="[blue]%(message)s[/blue]",
datefmt="[%X]",
handlers=[RichHandler(show_level=False, show_time=False, markup=True)],
)
to_download = models or (list(_AvailableModels) if all else _default_models)
output_dir = download_models(
output_dir=output_dir,
force=force,
progress=(not quiet),
with_layout=_AvailableModels.LAYOUT in to_download,
with_tableformer=_AvailableModels.TABLEFORMER in to_download,
with_code_formula=_AvailableModels.CODE_FORMULA in to_download,
with_picture_classifier=_AvailableModels.PICTURE_CLASSIFIER in to_download,
with_smolvlm=_AvailableModels.SMOLVLM in to_download,
with_granitedocling=_AvailableModels.GRANITEDOCLING in to_download,
with_granitedocling_mlx=_AvailableModels.GRANITEDOCLING_MLX in to_download,
with_smoldocling=_AvailableModels.SMOLDOCLING in to_download,
with_smoldocling_mlx=_AvailableModels.SMOLDOCLING_MLX in to_download,
with_granite_vision=_AvailableModels.GRANITE_VISION in to_download,
with_rapidocr=_AvailableModels.RAPIDOCR in to_download,
with_easyocr=_AvailableModels.EASYOCR in to_download,
)
if quiet:
typer.echo(output_dir)
else:
typer.secho(f"\nModels downloaded into: {output_dir}.", fg="green")
console.print(
"\n",
"Docling can now be configured for running offline using the local artifacts.\n\n",
"Using the CLI:",
f"`docling --artifacts-path={output_dir} FILE`",
"\n",
"Using Python: see the documentation at <https://docling-project.github.io/docling/usage>.",
)
@app.command("download-hf-repo")
def download_hf_repo(
models: Annotated[
list[str],
typer.Argument(
help="Specific models to download from HuggingFace identified by their repo id. For example: docling-project/docling-models .",
),
],
output_dir: Annotated[
Path,
typer.Option(
...,
"-o",
"--output-dir",
help="The directory where to download the models.",
),
] = (settings.cache_dir / "models"),
force: Annotated[
bool, typer.Option(..., help="If true, the download will be forced.")
] = False,
quiet: Annotated[
bool,
typer.Option(
...,
"-q",
"--quiet",
help="No extra output is generated, the CLI prints only the directory with the cached models.",
),
] = False,
):
if not quiet:
logging.basicConfig(
level=logging.INFO,
format="[blue]%(message)s[/blue]",
datefmt="[%X]",
handlers=[RichHandler(show_level=False, show_time=False, markup=True)],
)
for item in models:
typer.secho(f"\nDownloading {item} model from HuggingFace...")
download_hf_model(
repo_id=item,
# would be better to reuse "repo_cache_folder" property: https://github.com/docling-project/docling/blob/main/docling/datamodel/pipeline_options_vlm_model.py#L76
# but creating options objects seams like an overkill
local_dir=output_dir / item.replace("/", "--"),
force=force,
progress=(not quiet),
)
if quiet:
typer.echo(output_dir)
else:
typer.secho(f"\nModels downloaded into: {output_dir}.", fg="green")
click_app = typer.main.get_command(app)
if __name__ == "__main__":
app()
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/cli/tools.py | docling/cli/tools.py | import typer
from docling.cli.models import app as models_app
app = typer.Typer(
name="Docling helpers",
no_args_is_help=True,
add_completion=False,
pretty_exceptions_enable=False,
)
app.add_typer(models_app, name="models")
click_app = typer.main.get_command(app)
if __name__ == "__main__":
app()
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/cli/main.py | docling/cli/main.py | import importlib
import logging
import platform
import re
import sys
import tempfile
import time
import warnings
from collections.abc import Iterable
from pathlib import Path
from typing import Annotated, Dict, List, Optional, Type
import rich.table
import typer
from docling_core.transforms.serializer.html import (
HTMLDocSerializer,
HTMLOutputStyle,
HTMLParams,
)
from docling_core.transforms.visualizer.layout_visualizer import LayoutVisualizer
from docling_core.types.doc import ImageRefMode
from docling_core.utils.file import resolve_source_to_path
from pydantic import TypeAdapter
from rich.console import Console
from docling.backend.docling_parse_backend import DoclingParseDocumentBackend
from docling.backend.docling_parse_v2_backend import DoclingParseV2DocumentBackend
from docling.backend.docling_parse_v4_backend import DoclingParseV4DocumentBackend
from docling.backend.image_backend import ImageDocumentBackend
from docling.backend.mets_gbs_backend import MetsGbsDocumentBackend
from docling.backend.pdf_backend import PdfDocumentBackend
from docling.backend.pypdfium2_backend import PyPdfiumDocumentBackend
from docling.datamodel.accelerator_options import AcceleratorDevice, AcceleratorOptions
from docling.datamodel.asr_model_specs import (
WHISPER_BASE,
WHISPER_BASE_MLX,
WHISPER_BASE_NATIVE,
WHISPER_LARGE,
WHISPER_LARGE_MLX,
WHISPER_LARGE_NATIVE,
WHISPER_MEDIUM,
WHISPER_MEDIUM_MLX,
WHISPER_MEDIUM_NATIVE,
WHISPER_SMALL,
WHISPER_SMALL_MLX,
WHISPER_SMALL_NATIVE,
WHISPER_TINY,
WHISPER_TINY_MLX,
WHISPER_TINY_NATIVE,
WHISPER_TURBO,
WHISPER_TURBO_MLX,
WHISPER_TURBO_NATIVE,
AsrModelType,
)
from docling.datamodel.backend_options import PdfBackendOptions
from docling.datamodel.base_models import (
ConversionStatus,
FormatToExtensions,
InputFormat,
OutputFormat,
)
from docling.datamodel.document import ConversionResult, DoclingVersion
from docling.datamodel.pipeline_options import (
AsrPipelineOptions,
ConvertPipelineOptions,
OcrAutoOptions,
OcrOptions,
PaginatedPipelineOptions,
PdfBackend,
PdfPipelineOptions,
PipelineOptions,
ProcessingPipeline,
TableFormerMode,
TableStructureOptions,
TesseractCliOcrOptions,
TesseractOcrOptions,
VlmPipelineOptions,
)
from docling.datamodel.settings import settings
from docling.datamodel.vlm_model_specs import (
GOT2_TRANSFORMERS,
GRANITE_VISION_OLLAMA,
GRANITE_VISION_TRANSFORMERS,
GRANITEDOCLING_MLX,
GRANITEDOCLING_TRANSFORMERS,
GRANITEDOCLING_VLLM,
SMOLDOCLING_MLX,
SMOLDOCLING_TRANSFORMERS,
SMOLDOCLING_VLLM,
VlmModelType,
)
from docling.document_converter import (
AudioFormatOption,
DocumentConverter,
ExcelFormatOption,
FormatOption,
HTMLFormatOption,
MarkdownFormatOption,
PdfFormatOption,
PowerpointFormatOption,
WordFormatOption,
)
from docling.models.factories import get_ocr_factory
from docling.pipeline.asr_pipeline import AsrPipeline
from docling.pipeline.vlm_pipeline import VlmPipeline
warnings.filterwarnings(action="ignore", category=UserWarning, module="pydantic|torch")
warnings.filterwarnings(action="ignore", category=FutureWarning, module="easyocr")
_log = logging.getLogger(__name__)
console = Console()
err_console = Console(stderr=True)
ocr_factory_internal = get_ocr_factory(allow_external_plugins=False)
ocr_engines_enum_internal = ocr_factory_internal.get_enum()
DOCLING_ASCII_ART = r"""
████ ██████
███░░██░░░░░██████
████████░░░░░░░░████████████
████████░░░░░░░░░░░░░░░░░░████████
██████░░░░░░░░░░░░░░░░░░░░░░░░░░██████
██████░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░█████
██████░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░█████
██████░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░██████
██████░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░██████
██████░░░░░░░ ░░░░░░░░░░░░░░░░░░░░░░ ░░░░░░░██████
██████░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░██████
██████░░░░░░ ░░░░░░░░░░░░░░░ ░░░░░░██████
███▒██░░░░░ ████ ░░░░░░░░░░░░ ████ ░░░░░██▒███
███▒██░░░░░░ ████ ░░░░░░░░░░░░ ████ ░░░░░██▒████
███▒██░░░░░░ ██ ██ ░░░░░░░░░░░░ ██ ██ ░░░░░██▒▒███
███▒███░░░░░ ██ ░░░░████░░░░ ██ ░░░░░██▒▒███
████▒▒██░░░░░░ ░░░███▒▒▒▒███░░░ ░░░░░░░██▒▒████
████▒▒██░░░░░░░░░░░░░░░░░█▒▒▒▒▒▒▒▒▒▒█░░░░░░░░░░░░░░░░███▒▒████
████▒▒▒██░░░░░░░░░░░░█████ ▒▒▒▒▒▒ ██████░░░░░░░░░░░██▒▒▒████
███▒▒▒▒██░░░░░░░░███▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒███░░░░░░░░██▒▒▒▒███
███▒▒▒▒▒███░░░░░░██▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒██░░░░░░███▒▒▒▒▒███
████▒▒▒▒▒████░░░░░░██████████████████████░░░░░░████▒▒▒▒▒████
███▒▒▒▒▒▒▒▒████░░░░░░░░░░░░░░░░░░░░░░░░░░░████▒▒▒▒▒▒▒▒▒███
████▒▒▒▒▒▒▒▒███░░░░░████████████████████████▒▒▒▒▒▒▒▒▒████
████▒▒▒▒▒▒██░░░░░░█ █░░░░░██▒▒▒▒▒▒████
████▒▒▒▒█░░░░░░░█ D O C L I N G █░░░░░░░░██▒▒▒████
████▒▒██░░░░░░█ █░░░░░░░░░░█▒▒████
██████░░░░░░█ D O C L I N G █░░░░░░░░░░░██████
████░░░░░█ █░░░░░░░░░░░░████
█████░░█ D O C L I N G █░░░░░░░░░░░█████
█████ █░░░░░░░░████████
██ D O C L I N G █░░░░░░░░█████
█ █░░░████████
█████████████████████████████
"""
app = typer.Typer(
name="Docling",
no_args_is_help=True,
add_completion=False,
pretty_exceptions_enable=False,
)
def logo_callback(value: bool):
if value:
print(DOCLING_ASCII_ART)
raise typer.Exit()
def version_callback(value: bool):
if value:
v = DoclingVersion()
print(f"Docling version: {v.docling_version}")
print(f"Docling Core version: {v.docling_core_version}")
print(f"Docling IBM Models version: {v.docling_ibm_models_version}")
print(f"Docling Parse version: {v.docling_parse_version}")
print(f"Python: {v.py_impl_version} ({v.py_lang_version})")
print(f"Platform: {v.platform_str}")
raise typer.Exit()
def show_external_plugins_callback(value: bool):
if value:
ocr_factory_all = get_ocr_factory(allow_external_plugins=True)
table = rich.table.Table(title="Available OCR engines")
table.add_column("Name", justify="right")
table.add_column("Plugin")
table.add_column("Package")
for meta in ocr_factory_all.registered_meta.values():
if not meta.module.startswith("docling."):
table.add_row(
f"[bold]{meta.kind}[/bold]",
meta.plugin_name,
meta.module.split(".")[0],
)
rich.print(table)
raise typer.Exit()
def export_documents(
conv_results: Iterable[ConversionResult],
output_dir: Path,
export_json: bool,
export_yaml: bool,
export_html: bool,
export_html_split_page: bool,
show_layout: bool,
export_md: bool,
export_txt: bool,
export_doctags: bool,
image_export_mode: ImageRefMode,
):
success_count = 0
failure_count = 0
for conv_res in conv_results:
if conv_res.status == ConversionStatus.SUCCESS:
success_count += 1
doc_filename = conv_res.input.file.stem
# Export JSON format:
if export_json:
fname = output_dir / f"{doc_filename}.json"
_log.info(f"writing JSON output to {fname}")
conv_res.document.save_as_json(
filename=fname, image_mode=image_export_mode
)
# Export YAML format:
if export_yaml:
fname = output_dir / f"{doc_filename}.yaml"
_log.info(f"writing YAML output to {fname}")
conv_res.document.save_as_yaml(
filename=fname, image_mode=image_export_mode
)
# Export HTML format:
if export_html:
fname = output_dir / f"{doc_filename}.html"
_log.info(f"writing HTML output to {fname}")
conv_res.document.save_as_html(
filename=fname, image_mode=image_export_mode, split_page_view=False
)
# Export HTML format:
if export_html_split_page:
fname = output_dir / f"{doc_filename}.html"
_log.info(f"writing HTML output to {fname}")
if show_layout:
ser = HTMLDocSerializer(
doc=conv_res.document,
params=HTMLParams(
image_mode=image_export_mode,
output_style=HTMLOutputStyle.SPLIT_PAGE,
),
)
visualizer = LayoutVisualizer()
visualizer.params.show_label = False
ser_res = ser.serialize(
visualizer=visualizer,
)
with open(fname, "w") as fw:
fw.write(ser_res.text)
else:
conv_res.document.save_as_html(
filename=fname,
image_mode=image_export_mode,
split_page_view=True,
)
# Export Text format:
if export_txt:
fname = output_dir / f"{doc_filename}.txt"
_log.info(f"writing TXT output to {fname}")
conv_res.document.save_as_markdown(
filename=fname,
strict_text=True,
image_mode=ImageRefMode.PLACEHOLDER,
)
# Export Markdown format:
if export_md:
fname = output_dir / f"{doc_filename}.md"
_log.info(f"writing Markdown output to {fname}")
conv_res.document.save_as_markdown(
filename=fname, image_mode=image_export_mode
)
# Export Document Tags format:
if export_doctags:
fname = output_dir / f"{doc_filename}.doctags"
_log.info(f"writing Doc Tags output to {fname}")
conv_res.document.save_as_doctags(filename=fname)
else:
_log.warning(f"Document {conv_res.input.file} failed to convert.")
if _log.isEnabledFor(logging.INFO):
for err in conv_res.errors:
_log.info(
f" [Failure Detail] Component: {err.component_type}, "
f"Module: {err.module_name}, Message: {err.error_message}"
)
failure_count += 1
_log.info(
f"Processed {success_count + failure_count} docs, of which {failure_count} failed"
)
def _split_list(raw: Optional[str]) -> Optional[List[str]]:
if raw is None:
return None
return re.split(r"[;,]", raw)
@app.command(no_args_is_help=True)
def convert( # noqa: C901
input_sources: Annotated[
List[str],
typer.Argument(
...,
metavar="source",
help="PDF files to convert. Can be local file / directory paths or URL.",
),
],
from_formats: List[InputFormat] = typer.Option(
None,
"--from",
help="Specify input formats to convert from. Defaults to all formats.",
),
to_formats: List[OutputFormat] = typer.Option(
None, "--to", help="Specify output formats. Defaults to Markdown."
),
show_layout: Annotated[
bool,
typer.Option(
...,
help="If enabled, the page images will show the bounding-boxes of the items.",
),
] = False,
headers: str = typer.Option(
None,
"--headers",
help="Specify http request headers used when fetching url input sources in the form of a JSON string",
),
image_export_mode: Annotated[
ImageRefMode,
typer.Option(
...,
help="Image export mode for the document (only in case of JSON, Markdown or HTML). With `placeholder`, only the position of the image is marked in the output. In `embedded` mode, the image is embedded as base64 encoded string. In `referenced` mode, the image is exported in PNG format and referenced from the main exported document.",
),
] = ImageRefMode.EMBEDDED,
pipeline: Annotated[
ProcessingPipeline,
typer.Option(..., help="Choose the pipeline to process PDF or image files."),
] = ProcessingPipeline.STANDARD,
vlm_model: Annotated[
VlmModelType,
typer.Option(..., help="Choose the VLM model to use with PDF or image files."),
] = VlmModelType.GRANITEDOCLING,
asr_model: Annotated[
AsrModelType,
typer.Option(..., help="Choose the ASR model to use with audio/video files."),
] = AsrModelType.WHISPER_TINY,
ocr: Annotated[
bool,
typer.Option(
..., help="If enabled, the bitmap content will be processed using OCR."
),
] = True,
force_ocr: Annotated[
bool,
typer.Option(
...,
help="Replace any existing text with OCR generated text over the full content.",
),
] = False,
tables: Annotated[
bool,
typer.Option(
...,
help="If enabled, the table structure model will be used to extract table information.",
),
] = True,
ocr_engine: Annotated[
str,
typer.Option(
...,
help=(
f"The OCR engine to use. When --allow-external-plugins is *not* set, the available values are: "
f"{', '.join(o.value for o in ocr_engines_enum_internal)}. "
f"Use the option --show-external-plugins to see the options allowed with external plugins."
),
),
] = OcrAutoOptions.kind,
ocr_lang: Annotated[
Optional[str],
typer.Option(
...,
help="Provide a comma-separated list of languages used by the OCR engine. Note that each OCR engine has different values for the language names.",
),
] = None,
psm: Annotated[
Optional[int],
typer.Option(
...,
help="Page Segmentation Mode for the OCR engine (0-13).",
),
] = None,
pdf_backend: Annotated[
PdfBackend, typer.Option(..., help="The PDF backend to use.")
] = PdfBackend.DLPARSE_V4,
pdf_password: Annotated[
Optional[str], typer.Option(..., help="Password for protected PDF documents")
] = None,
table_mode: Annotated[
TableFormerMode,
typer.Option(..., help="The mode to use in the table structure model."),
] = TableFormerMode.ACCURATE,
enrich_code: Annotated[
bool,
typer.Option(..., help="Enable the code enrichment model in the pipeline."),
] = False,
enrich_formula: Annotated[
bool,
typer.Option(..., help="Enable the formula enrichment model in the pipeline."),
] = False,
enrich_picture_classes: Annotated[
bool,
typer.Option(
...,
help="Enable the picture classification enrichment model in the pipeline.",
),
] = False,
enrich_picture_description: Annotated[
bool,
typer.Option(..., help="Enable the picture description model in the pipeline."),
] = False,
artifacts_path: Annotated[
Optional[Path],
typer.Option(..., help="If provided, the location of the model artifacts."),
] = None,
enable_remote_services: Annotated[
bool,
typer.Option(
..., help="Must be enabled when using models connecting to remote services."
),
] = False,
allow_external_plugins: Annotated[
bool,
typer.Option(
..., help="Must be enabled for loading modules from third-party plugins."
),
] = False,
show_external_plugins: Annotated[
bool,
typer.Option(
...,
help="List the third-party plugins which are available when the option --allow-external-plugins is set.",
callback=show_external_plugins_callback,
is_eager=True,
),
] = False,
abort_on_error: Annotated[
bool,
typer.Option(
...,
"--abort-on-error/--no-abort-on-error",
help="If enabled, the processing will be aborted when the first error is encountered.",
),
] = False,
output: Annotated[
Path, typer.Option(..., help="Output directory where results are saved.")
] = Path("."),
verbose: Annotated[
int,
typer.Option(
"--verbose",
"-v",
count=True,
help="Set the verbosity level. -v for info logging, -vv for debug logging.",
),
] = 0,
debug_visualize_cells: Annotated[
bool,
typer.Option(..., help="Enable debug output which visualizes the PDF cells"),
] = False,
debug_visualize_ocr: Annotated[
bool,
typer.Option(..., help="Enable debug output which visualizes the OCR cells"),
] = False,
debug_visualize_layout: Annotated[
bool,
typer.Option(
..., help="Enable debug output which visualizes the layour clusters"
),
] = False,
debug_visualize_tables: Annotated[
bool,
typer.Option(..., help="Enable debug output which visualizes the table cells"),
] = False,
version: Annotated[
Optional[bool],
typer.Option(
"--version",
callback=version_callback,
is_eager=True,
help="Show version information.",
),
] = None,
document_timeout: Annotated[
Optional[float],
typer.Option(
...,
help="The timeout for processing each document, in seconds.",
),
] = None,
num_threads: Annotated[int, typer.Option(..., help="Number of threads")] = 4,
device: Annotated[
AcceleratorDevice, typer.Option(..., help="Accelerator device")
] = AcceleratorDevice.AUTO,
docling_logo: Annotated[
Optional[bool],
typer.Option(
"--logo", callback=logo_callback, is_eager=True, help="Docling logo"
),
] = None,
page_batch_size: Annotated[
int,
typer.Option(
"--page-batch-size",
help=f"Number of pages processed in one batch. Default: {settings.perf.page_batch_size}",
),
] = settings.perf.page_batch_size,
):
log_format = "%(asctime)s\t%(levelname)s\t%(name)s: %(message)s"
if verbose == 0:
logging.basicConfig(level=logging.WARNING, format=log_format)
elif verbose == 1:
logging.basicConfig(level=logging.INFO, format=log_format)
else:
logging.basicConfig(level=logging.DEBUG, format=log_format)
settings.debug.visualize_cells = debug_visualize_cells
settings.debug.visualize_layout = debug_visualize_layout
settings.debug.visualize_tables = debug_visualize_tables
settings.debug.visualize_ocr = debug_visualize_ocr
settings.perf.page_batch_size = page_batch_size
if from_formats is None:
from_formats = list(InputFormat)
parsed_headers: Optional[Dict[str, str]] = None
if headers is not None:
headers_t = TypeAdapter(Dict[str, str])
parsed_headers = headers_t.validate_json(headers)
with tempfile.TemporaryDirectory() as tempdir:
input_doc_paths: List[Path] = []
for src in input_sources:
try:
# check if we can fetch some remote url
source = resolve_source_to_path(
source=src, headers=parsed_headers, workdir=Path(tempdir)
)
input_doc_paths.append(source)
except FileNotFoundError:
err_console.print(
f"[red]Error: The input file {src} does not exist.[/red]"
)
raise typer.Abort()
except IsADirectoryError:
# if the input matches to a file or a folder
try:
local_path = TypeAdapter(Path).validate_python(src)
if local_path.exists() and local_path.is_dir():
for fmt in from_formats:
for ext in FormatToExtensions[fmt]:
for path in local_path.glob(f"**/*.{ext}"):
if path.name.startswith("~$") and ext == "docx":
_log.info(
f"Ignoring temporary Word file: {path}"
)
continue
input_doc_paths.append(path)
for path in local_path.glob(f"**/*.{ext.upper()}"):
if path.name.startswith("~$") and ext == "docx":
_log.info(
f"Ignoring temporary Word file: {path}"
)
continue
input_doc_paths.append(path)
elif local_path.exists():
if not local_path.name.startswith("~$") and ext == "docx":
_log.info(f"Ignoring temporary Word file: {path}")
continue
input_doc_paths.append(local_path)
else:
err_console.print(
f"[red]Error: The input file {src} does not exist.[/red]"
)
raise typer.Abort()
except Exception as err:
err_console.print(f"[red]Error: Cannot read the input {src}.[/red]")
_log.info(err) # will print more details if verbose is activated
raise typer.Abort()
if to_formats is None:
to_formats = [OutputFormat.MARKDOWN]
export_json = OutputFormat.JSON in to_formats
export_yaml = OutputFormat.YAML in to_formats
export_html = OutputFormat.HTML in to_formats
export_html_split_page = OutputFormat.HTML_SPLIT_PAGE in to_formats
export_md = OutputFormat.MARKDOWN in to_formats
export_txt = OutputFormat.TEXT in to_formats
export_doctags = OutputFormat.DOCTAGS in to_formats
ocr_factory = get_ocr_factory(allow_external_plugins=allow_external_plugins)
ocr_options: OcrOptions = ocr_factory.create_options( # type: ignore
kind=ocr_engine,
force_full_page_ocr=force_ocr,
)
ocr_lang_list = _split_list(ocr_lang)
if ocr_lang_list is not None:
ocr_options.lang = ocr_lang_list
if psm is not None and isinstance(
ocr_options, (TesseractOcrOptions, TesseractCliOcrOptions)
):
ocr_options.psm = psm
accelerator_options = AcceleratorOptions(num_threads=num_threads, device=device)
# pipeline_options: PaginatedPipelineOptions
pipeline_options: PipelineOptions
format_options: Dict[InputFormat, FormatOption] = {}
pdf_backend_options: Optional[PdfBackendOptions] = PdfBackendOptions(
password=pdf_password
)
if pipeline == ProcessingPipeline.STANDARD:
pipeline_options = PdfPipelineOptions(
allow_external_plugins=allow_external_plugins,
enable_remote_services=enable_remote_services,
accelerator_options=accelerator_options,
do_ocr=ocr,
ocr_options=ocr_options,
do_table_structure=tables,
do_code_enrichment=enrich_code,
do_formula_enrichment=enrich_formula,
do_picture_description=enrich_picture_description,
do_picture_classification=enrich_picture_classes,
document_timeout=document_timeout,
)
if isinstance(
pipeline_options.table_structure_options, TableStructureOptions
):
pipeline_options.table_structure_options.do_cell_matching = (
True # do_cell_matching
)
pipeline_options.table_structure_options.mode = table_mode
if image_export_mode != ImageRefMode.PLACEHOLDER:
pipeline_options.generate_page_images = True
pipeline_options.generate_picture_images = (
True # FIXME: to be deprecated in version 3
)
pipeline_options.images_scale = 2
backend: Type[PdfDocumentBackend]
if pdf_backend == PdfBackend.DLPARSE_V1:
backend = DoclingParseDocumentBackend
pdf_backend_options = None
elif pdf_backend == PdfBackend.DLPARSE_V2:
backend = DoclingParseV2DocumentBackend
pdf_backend_options = None
elif pdf_backend == PdfBackend.DLPARSE_V4:
backend = DoclingParseV4DocumentBackend # type: ignore
elif pdf_backend == PdfBackend.PYPDFIUM2:
backend = PyPdfiumDocumentBackend # type: ignore
else:
raise RuntimeError(f"Unexpected PDF backend type {pdf_backend}")
pdf_format_option = PdfFormatOption(
pipeline_options=pipeline_options,
backend=backend, # pdf_backend
backend_options=pdf_backend_options,
)
# METS GBS options
mets_gbs_options = pipeline_options.model_copy()
mets_gbs_options.do_ocr = False
mets_gbs_format_option = PdfFormatOption(
pipeline_options=mets_gbs_options,
backend=MetsGbsDocumentBackend,
)
# SimplePipeline options
simple_format_option = ConvertPipelineOptions(
do_picture_description=enrich_picture_description,
do_picture_classification=enrich_picture_classes,
)
if artifacts_path is not None:
simple_format_option.artifacts_path = artifacts_path
# Use image-native backend for IMAGE to avoid pypdfium2 locking
image_format_option = PdfFormatOption(
pipeline_options=pipeline_options,
backend=ImageDocumentBackend,
backend_options=pdf_backend_options,
)
format_options = {
InputFormat.PDF: pdf_format_option,
InputFormat.IMAGE: image_format_option,
InputFormat.METS_GBS: mets_gbs_format_option,
InputFormat.DOCX: WordFormatOption(
pipeline_options=simple_format_option
),
InputFormat.PPTX: PowerpointFormatOption(
pipeline_options=simple_format_option
),
InputFormat.XLSX: ExcelFormatOption(
pipeline_options=simple_format_option
),
InputFormat.HTML: HTMLFormatOption(
pipeline_options=simple_format_option
),
InputFormat.MD: MarkdownFormatOption(
pipeline_options=simple_format_option
),
}
elif pipeline == ProcessingPipeline.VLM:
pipeline_options = VlmPipelineOptions(
enable_remote_services=enable_remote_services,
)
if vlm_model == VlmModelType.GRANITE_VISION:
pipeline_options.vlm_options = GRANITE_VISION_TRANSFORMERS
elif vlm_model == VlmModelType.GRANITE_VISION_OLLAMA:
pipeline_options.vlm_options = GRANITE_VISION_OLLAMA
elif vlm_model == VlmModelType.GOT_OCR_2:
pipeline_options.vlm_options = GOT2_TRANSFORMERS
elif vlm_model == VlmModelType.SMOLDOCLING:
pipeline_options.vlm_options = SMOLDOCLING_TRANSFORMERS
if sys.platform == "darwin":
try:
import mlx_vlm
pipeline_options.vlm_options = SMOLDOCLING_MLX
except ImportError:
if sys.version_info < (3, 14):
_log.warning(
"To run SmolDocling faster, please install mlx-vlm:\n"
"pip install mlx-vlm"
)
else:
_log.warning(
"You can run SmolDocling faster with MLX support, but it is unfortunately not yet available on Python 3.14."
)
elif vlm_model == VlmModelType.GRANITEDOCLING:
pipeline_options.vlm_options = GRANITEDOCLING_TRANSFORMERS
if sys.platform == "darwin":
try:
import mlx_vlm
pipeline_options.vlm_options = GRANITEDOCLING_MLX
except ImportError:
if sys.version_info < (3, 14):
_log.warning(
"To run GraniteDocling faster, please install mlx-vlm:\n"
"pip install mlx-vlm"
)
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | true |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/cli/__init__.py | docling/cli/__init__.py | python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false | |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/chunking/__init__.py | docling/chunking/__init__.py | #
# Copyright IBM Corp. 2024 - 2024
# SPDX-License-Identifier: MIT
#
from docling_core.transforms.chunker.base import BaseChunk, BaseChunker, BaseMeta
from docling_core.transforms.chunker.hierarchical_chunker import (
DocChunk,
DocMeta,
HierarchicalChunker,
)
from docling_core.transforms.chunker.hybrid_chunker import HybridChunker
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/pipeline/extraction_vlm_pipeline.py | docling/pipeline/extraction_vlm_pipeline.py | import inspect
import json
import logging
from typing import Optional
from PIL.Image import Image
from pydantic import BaseModel
from docling.backend.abstract_backend import PaginatedDocumentBackend
from docling.backend.pdf_backend import PdfDocumentBackend
from docling.datamodel.base_models import ConversionStatus, ErrorItem, VlmStopReason
from docling.datamodel.document import InputDocument
from docling.datamodel.extraction import (
ExtractedPageData,
ExtractionResult,
ExtractionTemplateType,
)
from docling.datamodel.pipeline_options import (
PipelineOptions,
VlmExtractionPipelineOptions,
)
from docling.datamodel.settings import settings
from docling.models.vlm_models_inline.nuextract_transformers_model import (
NuExtractTransformersModel,
)
from docling.pipeline.base_extraction_pipeline import BaseExtractionPipeline
from docling.utils.accelerator_utils import decide_device
_log = logging.getLogger(__name__)
class ExtractionVlmPipeline(BaseExtractionPipeline):
def __init__(self, pipeline_options: VlmExtractionPipelineOptions):
super().__init__(pipeline_options)
# Initialize VLM model with default options
self.accelerator_options = pipeline_options.accelerator_options
self.pipeline_options: VlmExtractionPipelineOptions
# Create VLM model instance
self.vlm_model = NuExtractTransformersModel(
enabled=True,
artifacts_path=self.artifacts_path, # Will download automatically
accelerator_options=self.accelerator_options,
vlm_options=pipeline_options.vlm_options,
)
def _extract_data(
self,
ext_res: ExtractionResult,
template: Optional[ExtractionTemplateType] = None,
) -> ExtractionResult:
"""Extract data using the VLM model."""
try:
# Get images from input document using the backend
images = self._get_images_from_input(ext_res.input)
if not images:
ext_res.status = ConversionStatus.FAILURE
ext_res.errors.append(
ErrorItem(
component_type="extraction_pipeline",
module_name=self.__class__.__name__,
error_message="No images found in document",
)
)
return ext_res
# Use provided template or default prompt
if template is not None:
prompt = self._serialize_template(template)
else:
prompt = "Extract all text and structured information from this document. Return as JSON."
# Process all images with VLM model
start_page, end_page = ext_res.input.limits.page_range
for i, image in enumerate(images):
# Calculate the actual page number based on the filtered range
page_number = start_page + i
try:
predictions = list(self.vlm_model.process_images([image], prompt))
if predictions:
# Parse the extracted text as JSON if possible, otherwise use as-is
extracted_text = predictions[0].text
extracted_data = None
vlm_stop_reason: VlmStopReason = predictions[0].stop_reason
if (
vlm_stop_reason == VlmStopReason.LENGTH
or vlm_stop_reason == VlmStopReason.STOP_SEQUENCE
):
ext_res.status = ConversionStatus.PARTIAL_SUCCESS
try:
extracted_data = json.loads(extracted_text)
except (json.JSONDecodeError, ValueError):
# If not valid JSON, keep extracted_data as None
pass
# Create page data with proper structure
page_data = ExtractedPageData(
page_no=page_number,
extracted_data=extracted_data,
raw_text=extracted_text, # Always populate raw_text
)
ext_res.pages.append(page_data)
else:
# Add error page data
page_data = ExtractedPageData(
page_no=page_number,
extracted_data=None,
errors=["No extraction result from VLM model"],
)
ext_res.pages.append(page_data)
except Exception as e:
_log.error(f"Error processing page {page_number}: {e}")
page_data = ExtractedPageData(
page_no=page_number, extracted_data=None, errors=[str(e)]
)
ext_res.pages.append(page_data)
except Exception as e:
_log.error(f"Error during extraction: {e}")
ext_res.errors.append(
ErrorItem(
component_type="extraction_pipeline",
module_name=self.__class__.__name__,
error_message=str(e),
)
)
return ext_res
def _determine_status(self, ext_res: ExtractionResult) -> ConversionStatus:
"""Determine the status based on extraction results."""
if ext_res.pages and not any(page.errors for page in ext_res.pages):
return (
ConversionStatus.PARTIAL_SUCCESS
if ext_res.status == ConversionStatus.PARTIAL_SUCCESS
else ConversionStatus.SUCCESS
)
else:
return ConversionStatus.FAILURE
def _get_images_from_input(self, input_doc: InputDocument) -> list[Image]:
"""Extract images from input document using the backend."""
images = []
try:
backend = input_doc._backend
assert isinstance(backend, PdfDocumentBackend)
# Use the backend's pagination interface
page_count = backend.page_count()
# Respect page range limits, following the same pattern as PaginatedPipeline
start_page, end_page = input_doc.limits.page_range
_log.info(
f"Processing pages {start_page}-{end_page} of {page_count} total pages for extraction"
)
for page_num in range(page_count):
# Only process pages within the specified range (0-based indexing)
if start_page - 1 <= page_num <= end_page - 1:
try:
page_backend = backend.load_page(page_num)
if page_backend.is_valid():
# Get page image at a reasonable scale
page_image = page_backend.get_page_image(
scale=self.pipeline_options.vlm_options.scale
)
images.append(page_image)
else:
_log.warning(f"Page {page_num + 1} backend is not valid")
except Exception as e:
_log.error(f"Error loading page {page_num + 1}: {e}")
except Exception as e:
_log.error(f"Error getting images from input document: {e}")
return images
def _serialize_template(self, template: ExtractionTemplateType) -> str:
"""Serialize template to string based on its type."""
if isinstance(template, str):
return template
elif isinstance(template, dict):
return json.dumps(template, indent=2)
elif isinstance(template, BaseModel):
return template.model_dump_json(indent=2)
elif inspect.isclass(template) and issubclass(template, BaseModel):
from polyfactory.factories.pydantic_factory import ModelFactory
class ExtractionTemplateFactory(ModelFactory[template]): # type: ignore
__use_examples__ = True # prefer Field(examples=...) when present
__use_defaults__ = True # use field defaults instead of random values
__check_model__ = (
True # setting the value to avoid deprecation warnings
)
return ExtractionTemplateFactory.build().model_dump_json(indent=2) # type: ignore
else:
raise ValueError(f"Unsupported template type: {type(template)}")
@classmethod
def get_default_options(cls) -> PipelineOptions:
return VlmExtractionPipelineOptions()
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/pipeline/standard_pdf_pipeline.py | docling/pipeline/standard_pdf_pipeline.py | """Thread-safe, production-ready PDF pipeline
================================================
A self-contained, thread-safe PDF conversion pipeline exploiting parallelism between pipeline stages and models.
* **Per-run isolation** - every :py:meth:`execute` call uses its own bounded queues and worker
threads so that concurrent invocations never share mutable state.
* **Deterministic run identifiers** - pages are tracked with an internal *run-id* instead of
relying on :pyfunc:`id`, which may clash after garbage collection.
* **Explicit back-pressure & shutdown** - producers block on full queues; queue *close()*
propagates downstream so stages terminate deterministically without sentinels.
* **Minimal shared state** - heavyweight models are initialised once per pipeline instance
and only read by worker threads; no runtime mutability is exposed.
* **Strict typing & clean API usage** - code is fully annotated and respects *coding_rules.md*.
"""
from __future__ import annotations
import itertools
import logging
import threading
import time
import warnings
from collections import defaultdict, deque
from dataclasses import dataclass, field
from pathlib import Path
from typing import Any, Callable, Iterable, List, Optional, Sequence, Tuple, cast
import numpy as np
from docling_core.types.doc import DocItem, ImageRef, PictureItem, TableItem
from docling.backend.abstract_backend import AbstractDocumentBackend
from docling.backend.pdf_backend import PdfDocumentBackend
from docling.datamodel.base_models import (
AssembledUnit,
ConversionStatus,
DoclingComponentType,
ErrorItem,
Page,
)
from docling.datamodel.document import ConversionResult
from docling.datamodel.pipeline_options import ThreadedPdfPipelineOptions
from docling.datamodel.settings import settings
from docling.models.code_formula_model import CodeFormulaModel, CodeFormulaModelOptions
from docling.models.factories import (
get_layout_factory,
get_ocr_factory,
get_table_structure_factory,
)
from docling.models.page_assemble_model import PageAssembleModel, PageAssembleOptions
from docling.models.page_preprocessing_model import (
PagePreprocessingModel,
PagePreprocessingOptions,
)
from docling.models.readingorder_model import ReadingOrderModel, ReadingOrderOptions
from docling.pipeline.base_pipeline import ConvertPipeline
from docling.utils.profiling import ProfilingScope, TimeRecorder
from docling.utils.utils import chunkify
_log = logging.getLogger(__name__)
# ──────────────────────────────────────────────────────────────────────────────
# Helper data structures
# ──────────────────────────────────────────────────────────────────────────────
@dataclass
class ThreadedItem:
"""Envelope that travels between pipeline stages."""
payload: Optional[Page]
run_id: int # Unique per *execute* call, monotonic across pipeline instance
page_no: int
conv_res: ConversionResult
error: Optional[Exception] = None
is_failed: bool = False
@dataclass
class ProcessingResult:
"""Aggregated outcome of a pipeline run."""
pages: List[Page] = field(default_factory=list)
failed_pages: List[Tuple[int, Exception]] = field(default_factory=list)
total_expected: int = 0
@property
def success_count(self) -> int:
return len(self.pages)
@property
def failure_count(self) -> int:
return len(self.failed_pages)
@property
def is_partial_success(self) -> bool:
return 0 < self.success_count < self.total_expected
@property
def is_complete_failure(self) -> bool:
return self.success_count == 0 and self.failure_count > 0
class ThreadedQueue:
"""Bounded queue with blocking put/ get_batch and explicit *close()* semantics."""
__slots__ = ("_closed", "_items", "_lock", "_max", "_not_empty", "_not_full")
def __init__(self, max_size: int) -> None:
self._max: int = max_size
self._items: deque[ThreadedItem] = deque()
self._lock = threading.Lock()
self._not_full = threading.Condition(self._lock)
self._not_empty = threading.Condition(self._lock)
self._closed = False
# ---------------------------------------------------------------- put()
def put(self, item: ThreadedItem, timeout: Optional[float] | None = None) -> bool:
"""Block until queue accepts *item* or is closed. Returns *False* if closed."""
with self._not_full:
if self._closed:
return False
start = time.monotonic()
while len(self._items) >= self._max and not self._closed:
if timeout is not None:
remaining = timeout - (time.monotonic() - start)
if remaining <= 0:
return False
self._not_full.wait(remaining)
else:
self._not_full.wait()
if self._closed:
return False
self._items.append(item)
self._not_empty.notify()
return True
# ------------------------------------------------------------ get_batch()
def get_batch(
self, size: int, timeout: Optional[float] | None = None
) -> List[ThreadedItem]:
"""Return up to *size* items. Blocks until ≥1 item present or queue closed/timeout."""
with self._not_empty:
start = time.monotonic()
while not self._items and not self._closed:
if timeout is not None:
remaining = timeout - (time.monotonic() - start)
if remaining <= 0:
return []
self._not_empty.wait(remaining)
else:
self._not_empty.wait()
batch: List[ThreadedItem] = []
while self._items and len(batch) < size:
batch.append(self._items.popleft())
if batch:
self._not_full.notify_all()
return batch
# ---------------------------------------------------------------- close()
def close(self) -> None:
with self._lock:
self._closed = True
self._not_empty.notify_all()
self._not_full.notify_all()
# -------------------------------------------------------------- property
@property
def closed(self) -> bool:
return self._closed
class ThreadedPipelineStage:
"""A single pipeline stage backed by one worker thread."""
def __init__(
self,
*,
name: str,
model: Any,
batch_size: int,
batch_timeout: float,
queue_max_size: int,
postprocess: Optional[Callable[[ThreadedItem], None]] = None,
timed_out_run_ids: Optional[set[int]] = None,
) -> None:
self.name = name
self.model = model
self.batch_size = batch_size
self.batch_timeout = batch_timeout
self.input_queue = ThreadedQueue(queue_max_size)
self._outputs: list[ThreadedQueue] = []
self._thread: Optional[threading.Thread] = None
self._running = False
self._postprocess = postprocess
self._timed_out_run_ids = (
timed_out_run_ids if timed_out_run_ids is not None else set()
)
# ---------------------------------------------------------------- wiring
def add_output_queue(self, q: ThreadedQueue) -> None:
self._outputs.append(q)
# -------------------------------------------------------------- lifecycle
def start(self) -> None:
if self._running:
return
self._running = True
self._thread = threading.Thread(
target=self._run, name=f"Stage-{self.name}", daemon=False
)
self._thread.start()
def stop(self) -> None:
if not self._running:
return
self._running = False
self.input_queue.close()
if self._thread is not None:
# Give thread 2s to finish naturally before abandoning
self._thread.join(timeout=15.0)
if self._thread.is_alive():
_log.warning(
"Stage %s thread did not terminate within 15s. "
"Thread is likely stuck in a blocking call and will be abandoned (resources may leak).",
self.name,
)
# ------------------------------------------------------------------ _run
def _run(self) -> None:
try:
while self._running:
batch = self.input_queue.get_batch(self.batch_size, self.batch_timeout)
if not batch and self.input_queue.closed:
break
processed = self._process_batch(batch)
self._emit(processed)
except Exception: # pragma: no cover - top-level guard
_log.exception("Fatal error in stage %s", self.name)
finally:
for q in self._outputs:
q.close()
# ----------------------------------------------------- _process_batch()
def _process_batch(self, batch: Sequence[ThreadedItem]) -> list[ThreadedItem]:
"""Run *model* on *batch* grouped by run_id to maximise batching."""
groups: dict[int, list[ThreadedItem]] = defaultdict(list)
for itm in batch:
groups[itm.run_id].append(itm)
result: list[ThreadedItem] = []
for rid, items in groups.items():
# If run_id is timed out, skip processing but pass through items as-is
# This allows already-completed work to flow through while aborting new work
if rid in self._timed_out_run_ids:
for it in items:
it.is_failed = True
if it.error is None:
it.error = RuntimeError("document timeout exceeded")
result.extend(items)
continue
good: list[ThreadedItem] = [i for i in items if not i.is_failed]
if not good:
result.extend(items)
continue
try:
# Filter out None payloads and ensure type safety
pages_with_payloads = [
(i, i.payload) for i in good if i.payload is not None
]
if len(pages_with_payloads) != len(good):
# Some items have None payloads, mark all as failed
for it in items:
it.is_failed = True
it.error = RuntimeError("Page payload is None")
result.extend(items)
continue
pages: List[Page] = [payload for _, payload in pages_with_payloads]
processed_pages = list(self.model(good[0].conv_res, pages)) # type: ignore[arg-type]
if len(processed_pages) != len(pages): # strict mismatch guard
raise RuntimeError(
f"Model {self.name} returned wrong number of pages"
)
for idx, page in enumerate(processed_pages):
result.append(
ThreadedItem(
payload=page,
run_id=rid,
page_no=good[idx].page_no,
conv_res=good[idx].conv_res,
)
)
except Exception as exc:
_log.error(
"Stage %s failed for run %d: %s", self.name, rid, exc, exc_info=True
)
for it in items:
it.is_failed = True
it.error = exc
result.extend(items)
return result
# -------------------------------------------------------------- _emit()
def _emit(self, items: Iterable[ThreadedItem]) -> None:
for item in items:
if self._postprocess is not None:
self._postprocess(item)
for q in self._outputs:
if not q.put(item):
_log.error("Output queue closed while emitting from %s", self.name)
class PreprocessThreadedStage(ThreadedPipelineStage):
"""Pipeline stage that lazily loads PDF backends just-in-time."""
def __init__(
self,
*,
batch_timeout: float,
queue_max_size: int,
model: Any,
timed_out_run_ids: Optional[set[int]] = None,
) -> None:
super().__init__(
name="preprocess",
model=model,
batch_size=1,
batch_timeout=batch_timeout,
queue_max_size=queue_max_size,
timed_out_run_ids=timed_out_run_ids,
)
def _process_batch(self, batch: Sequence[ThreadedItem]) -> list[ThreadedItem]:
groups: dict[int, list[ThreadedItem]] = defaultdict(list)
for itm in batch:
groups[itm.run_id].append(itm)
result: list[ThreadedItem] = []
for rid, items in groups.items():
# If run_id is timed out, skip processing but pass through items as-is
# This allows already-completed work to flow through while aborting new work
if rid in self._timed_out_run_ids:
for it in items:
it.is_failed = True
if it.error is None:
it.error = RuntimeError("document timeout exceeded")
result.extend(items)
continue
good = [i for i in items if not i.is_failed]
if not good:
result.extend(items)
continue
try:
pages_with_payloads: list[tuple[ThreadedItem, Page]] = []
for it in good:
page = it.payload
if page is None:
raise RuntimeError("Page payload is None")
if page._backend is None:
backend = it.conv_res.input._backend
assert isinstance(backend, PdfDocumentBackend), (
"Threaded pipeline only supports PdfDocumentBackend."
)
page_backend = backend.load_page(page.page_no)
page._backend = page_backend
if page_backend.is_valid():
page.size = page_backend.get_size()
pages_with_payloads.append((it, page))
pages = [payload for _, payload in pages_with_payloads]
processed_pages = list(
self.model(good[0].conv_res, pages) # type: ignore[arg-type]
)
if len(processed_pages) != len(pages):
raise RuntimeError(
"PagePreprocessingModel returned unexpected number of pages"
)
for idx, processed_page in enumerate(processed_pages):
result.append(
ThreadedItem(
payload=processed_page,
run_id=rid,
page_no=good[idx].page_no,
conv_res=good[idx].conv_res,
)
)
except Exception as exc:
_log.error("Stage preprocess failed for run %d: %s", rid, exc)
for it in items:
it.is_failed = True
it.error = exc
result.extend(items)
return result
@dataclass
class RunContext:
"""Wiring for a single *execute* call."""
stages: list[ThreadedPipelineStage]
first_stage: ThreadedPipelineStage
output_queue: ThreadedQueue
timed_out_run_ids: set[int] = field(default_factory=set)
# ──────────────────────────────────────────────────────────────────────────────
# Main pipeline
# ──────────────────────────────────────────────────────────────────────────────
class StandardPdfPipeline(ConvertPipeline):
"""High-performance PDF pipeline with multi-threaded stages."""
def __init__(self, pipeline_options: ThreadedPdfPipelineOptions) -> None:
super().__init__(pipeline_options)
self.pipeline_options: ThreadedPdfPipelineOptions = pipeline_options
self._run_seq = itertools.count(1) # deterministic, monotonic run ids
# initialise heavy models once
self._init_models()
# ────────────────────────────────────────────────────────────────────────
# Heavy-model initialisation & helpers
# ────────────────────────────────────────────────────────────────────────
def _init_models(self) -> None:
art_path = self.artifacts_path
self.keep_images = (
self.pipeline_options.generate_page_images
or self.pipeline_options.generate_picture_images
or self.pipeline_options.generate_table_images
)
self.preprocessing_model = PagePreprocessingModel(
options=PagePreprocessingOptions(
images_scale=self.pipeline_options.images_scale
)
)
self.ocr_model = self._make_ocr_model(art_path)
layout_factory = get_layout_factory(
allow_external_plugins=self.pipeline_options.allow_external_plugins
)
self.layout_model = layout_factory.create_instance(
options=self.pipeline_options.layout_options,
artifacts_path=art_path,
accelerator_options=self.pipeline_options.accelerator_options,
)
table_factory = get_table_structure_factory(
allow_external_plugins=self.pipeline_options.allow_external_plugins
)
self.table_model = table_factory.create_instance(
options=self.pipeline_options.table_structure_options,
enabled=self.pipeline_options.do_table_structure,
artifacts_path=art_path,
accelerator_options=self.pipeline_options.accelerator_options,
)
self.assemble_model = PageAssembleModel(options=PageAssembleOptions())
self.reading_order_model = ReadingOrderModel(options=ReadingOrderOptions())
# --- optional enrichment ------------------------------------------------
self.enrichment_pipe = [
# Code Formula Enrichment Model
CodeFormulaModel(
enabled=self.pipeline_options.do_code_enrichment
or self.pipeline_options.do_formula_enrichment,
artifacts_path=self.artifacts_path,
options=CodeFormulaModelOptions(
do_code_enrichment=self.pipeline_options.do_code_enrichment,
do_formula_enrichment=self.pipeline_options.do_formula_enrichment,
),
accelerator_options=self.pipeline_options.accelerator_options,
),
*self.enrichment_pipe,
]
self.keep_backend = any(
(
self.pipeline_options.do_formula_enrichment,
self.pipeline_options.do_code_enrichment,
self.pipeline_options.do_picture_classification,
self.pipeline_options.do_picture_description,
)
)
# ---------------------------------------------------------------- helpers
def _make_ocr_model(self, art_path: Optional[Path]) -> Any:
factory = get_ocr_factory(
allow_external_plugins=self.pipeline_options.allow_external_plugins
)
return factory.create_instance(
options=self.pipeline_options.ocr_options,
enabled=self.pipeline_options.do_ocr,
artifacts_path=art_path,
accelerator_options=self.pipeline_options.accelerator_options,
)
def _release_page_resources(self, item: ThreadedItem) -> None:
page = item.payload
if page is None:
return
if not self.keep_images:
page._image_cache = {}
if not self.keep_backend and page._backend is not None:
page._backend.unload()
page._backend = None
if not self.pipeline_options.generate_parsed_pages:
page.parsed_page = None
# ────────────────────────────────────────────────────────────────────────
# Build - thread pipeline
# ────────────────────────────────────────────────────────────────────────
def _create_run_ctx(self) -> RunContext:
opts = self.pipeline_options
timed_out_run_ids: set[int] = set()
preprocess = PreprocessThreadedStage(
batch_timeout=opts.batch_polling_interval_seconds,
queue_max_size=opts.queue_max_size,
model=self.preprocessing_model,
timed_out_run_ids=timed_out_run_ids,
)
ocr = ThreadedPipelineStage(
name="ocr",
model=self.ocr_model,
batch_size=opts.ocr_batch_size,
batch_timeout=opts.batch_polling_interval_seconds,
queue_max_size=opts.queue_max_size,
timed_out_run_ids=timed_out_run_ids,
)
layout = ThreadedPipelineStage(
name="layout",
model=self.layout_model,
batch_size=opts.layout_batch_size,
batch_timeout=opts.batch_polling_interval_seconds,
queue_max_size=opts.queue_max_size,
timed_out_run_ids=timed_out_run_ids,
)
table = ThreadedPipelineStage(
name="table",
model=self.table_model,
batch_size=opts.table_batch_size,
batch_timeout=opts.batch_polling_interval_seconds,
queue_max_size=opts.queue_max_size,
timed_out_run_ids=timed_out_run_ids,
)
assemble = ThreadedPipelineStage(
name="assemble",
model=self.assemble_model,
batch_size=1,
batch_timeout=opts.batch_polling_interval_seconds,
queue_max_size=opts.queue_max_size,
postprocess=self._release_page_resources,
timed_out_run_ids=timed_out_run_ids,
)
# wire stages
output_q = ThreadedQueue(opts.queue_max_size)
preprocess.add_output_queue(ocr.input_queue)
ocr.add_output_queue(layout.input_queue)
layout.add_output_queue(table.input_queue)
table.add_output_queue(assemble.input_queue)
assemble.add_output_queue(output_q)
stages = [preprocess, ocr, layout, table, assemble]
return RunContext(
stages=stages,
first_stage=preprocess,
output_queue=output_q,
timed_out_run_ids=timed_out_run_ids,
)
# --------------------------------------------------------------------- build
def _build_document(self, conv_res: ConversionResult) -> ConversionResult:
"""Stream-build the document while interleaving producer and consumer work.
Note: If a worker thread gets stuck in a blocking call (model inference or PDF backend
load_page/get_size), that thread will be abandoned after a brief wait (15s) during cleanup.
The thread continues running until the blocking call completes, potentially holding
resources (e.g., pypdfium2_lock).
"""
run_id = next(self._run_seq)
assert isinstance(conv_res.input._backend, PdfDocumentBackend)
# Collect page placeholders; backends are loaded lazily in preprocess stage
start_page, end_page = conv_res.input.limits.page_range
pages: list[Page] = []
for i in range(conv_res.input.page_count):
if start_page - 1 <= i <= end_page - 1:
page = Page(page_no=i)
conv_res.pages.append(page)
pages.append(page)
if not pages:
conv_res.status = ConversionStatus.FAILURE
return conv_res
total_pages: int = len(pages)
ctx: RunContext = self._create_run_ctx()
for st in ctx.stages:
st.start()
proc = ProcessingResult(total_expected=total_pages)
fed_idx: int = 0 # number of pages successfully queued
batch_size: int = 32 # drain chunk
start_time = time.monotonic()
timeout_exceeded = False
input_queue_closed = False
try:
while proc.success_count + proc.failure_count < total_pages:
# Check timeout
if (
self.pipeline_options.document_timeout is not None
and not timeout_exceeded
):
elapsed_time = time.monotonic() - start_time
if elapsed_time > self.pipeline_options.document_timeout:
_log.warning(
f"Document processing time ({elapsed_time:.3f}s) "
f"exceeded timeout of {self.pipeline_options.document_timeout:.3f}s"
)
timeout_exceeded = True
ctx.timed_out_run_ids.add(run_id)
if not input_queue_closed:
ctx.first_stage.input_queue.close()
input_queue_closed = True
# Break immediately - don't wait for in-flight work
break
# 1) feed - try to enqueue until the first queue is full
if not input_queue_closed:
while fed_idx < total_pages:
ok = ctx.first_stage.input_queue.put(
ThreadedItem(
payload=pages[fed_idx],
run_id=run_id,
page_no=pages[fed_idx].page_no,
conv_res=conv_res,
),
timeout=0.0, # non-blocking try-put
)
if ok:
fed_idx += 1
if fed_idx == total_pages:
ctx.first_stage.input_queue.close()
input_queue_closed = True
else: # queue full - switch to draining
break
# 2) drain - pull whatever is ready from the output side
out_batch = ctx.output_queue.get_batch(batch_size, timeout=0.05)
for itm in out_batch:
if itm.run_id != run_id:
continue
if itm.is_failed or itm.error:
proc.failed_pages.append(
(itm.page_no, itm.error or RuntimeError("unknown error"))
)
else:
assert itm.payload is not None
proc.pages.append(itm.payload)
# 3) failure safety - downstream closed early
if not out_batch and ctx.output_queue.closed:
missing = total_pages - (proc.success_count + proc.failure_count)
if missing > 0:
proc.failed_pages.extend(
[(-1, RuntimeError("pipeline terminated early"))] * missing
)
break
# Mark remaining pages as failed if timeout occurred
if timeout_exceeded:
completed_page_nos = {p.page_no for p in proc.pages} | {
fp for fp, _ in proc.failed_pages
}
for page in pages[fed_idx:]:
if page.page_no not in completed_page_nos:
proc.failed_pages.append(
(page.page_no, RuntimeError("document timeout exceeded"))
)
finally:
for st in ctx.stages:
st.stop()
ctx.output_queue.close()
self._integrate_results(conv_res, proc, timeout_exceeded=timeout_exceeded)
return conv_res
# ---------------------------------------------------- integrate_results()
def _integrate_results(
self,
conv_res: ConversionResult,
proc: ProcessingResult,
timeout_exceeded: bool = False,
) -> None:
page_map = {p.page_no: p for p in proc.pages}
# Only keep pages that successfully completed processing
conv_res.pages = [
page_map[p.page_no] for p in conv_res.pages if p.page_no in page_map
]
# Add error details from failed pages
for page_no, error in proc.failed_pages:
page_label = f"Page {page_no + 1}" if page_no >= 0 else "Unknown page"
error_msg = str(error) if error else ""
error_item = ErrorItem(
component_type=DoclingComponentType.PIPELINE,
module_name=self.__class__.__name__,
error_message=f"{page_label}: {error_msg}" if error_msg else page_label,
)
conv_res.errors.append(error_item)
if timeout_exceeded and proc.total_expected > 0:
# Timeout exceeded: set PARTIAL_SUCCESS if any pages were attempted
conv_res.status = ConversionStatus.PARTIAL_SUCCESS
elif proc.is_complete_failure:
conv_res.status = ConversionStatus.FAILURE
elif proc.is_partial_success:
conv_res.status = ConversionStatus.PARTIAL_SUCCESS
else:
conv_res.status = ConversionStatus.SUCCESS
if not self.keep_images:
for p in conv_res.pages:
p._image_cache = {}
for p in conv_res.pages:
if not self.keep_backend and p._backend is not None:
p._backend.unload()
if not self.pipeline_options.generate_parsed_pages:
del p.parsed_page
p.parsed_page = None
# ---------------------------------------------------------------- assemble
def _assemble_document(self, conv_res: ConversionResult) -> ConversionResult:
elements, headers, body = [], [], []
with TimeRecorder(conv_res, "doc_assemble", scope=ProfilingScope.DOCUMENT):
for p in conv_res.pages:
if p.assembled:
elements.extend(p.assembled.elements)
headers.extend(p.assembled.headers)
body.extend(p.assembled.body)
conv_res.assembled = AssembledUnit(
elements=elements, headers=headers, body=body
)
conv_res.document = self.reading_order_model(conv_res)
# Generate page images in the output
if self.pipeline_options.generate_page_images:
for page in conv_res.pages:
assert page.image is not None
page_no = page.page_no + 1
conv_res.document.pages[page_no].image = ImageRef.from_pil(
page.image, dpi=int(72 * self.pipeline_options.images_scale)
)
# Generate images of the requested element types
with warnings.catch_warnings(): # deprecated generate_table_images
warnings.filterwarnings("ignore", category=DeprecationWarning)
if (
self.pipeline_options.generate_picture_images
or self.pipeline_options.generate_table_images
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | true |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/pipeline/threaded_standard_pdf_pipeline.py | docling/pipeline/threaded_standard_pdf_pipeline.py | from docling.pipeline.standard_pdf_pipeline import StandardPdfPipeline
class ThreadedStandardPdfPipeline(StandardPdfPipeline):
"""Backwards compatible import for ThreadedStandardPdfPipeline."""
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/pipeline/vlm_pipeline.py | docling/pipeline/vlm_pipeline.py | import logging
import re
from io import BytesIO
from pathlib import Path
from typing import List, Optional, Union, cast
from docling_core.types.doc import (
BoundingBox,
ContentLayer,
DocItem,
DoclingDocument,
ImageRef,
PictureItem,
ProvenanceItem,
TextItem,
)
from docling_core.types.doc.base import (
BoundingBox,
Size,
)
from docling_core.types.doc.document import DocTagsDocument
from PIL import Image as PILImage
from docling.backend.abstract_backend import AbstractDocumentBackend
from docling.backend.html_backend import HTMLDocumentBackend
from docling.backend.md_backend import MarkdownDocumentBackend
from docling.backend.pdf_backend import PdfDocumentBackend
from docling.datamodel.base_models import InputFormat, Page
from docling.datamodel.document import ConversionResult, InputDocument
from docling.datamodel.pipeline_options import (
VlmPipelineOptions,
)
from docling.datamodel.pipeline_options_vlm_model import (
ApiVlmOptions,
InferenceFramework,
InlineVlmOptions,
ResponseFormat,
)
from docling.datamodel.settings import settings
from docling.models.api_vlm_model import ApiVlmModel
from docling.models.vlm_models_inline.hf_transformers_model import (
HuggingFaceTransformersVlmModel,
)
from docling.models.vlm_models_inline.mlx_model import HuggingFaceMlxModel
from docling.pipeline.base_pipeline import PaginatedPipeline
from docling.utils.profiling import ProfilingScope, TimeRecorder
_log = logging.getLogger(__name__)
class VlmPipeline(PaginatedPipeline):
def __init__(self, pipeline_options: VlmPipelineOptions):
super().__init__(pipeline_options)
self.keep_backend = True
self.pipeline_options: VlmPipelineOptions
# force_backend_text = False - use text that is coming from VLM response
# force_backend_text = True - get text from backend using bounding boxes predicted by SmolDocling doctags
self.force_backend_text = (
pipeline_options.force_backend_text
and pipeline_options.vlm_options.response_format == ResponseFormat.DOCTAGS
)
self.keep_images = self.pipeline_options.generate_page_images
if isinstance(pipeline_options.vlm_options, ApiVlmOptions):
self.build_pipe = [
ApiVlmModel(
enabled=True, # must be always enabled for this pipeline to make sense.
enable_remote_services=self.pipeline_options.enable_remote_services,
vlm_options=cast(ApiVlmOptions, self.pipeline_options.vlm_options),
),
]
elif isinstance(self.pipeline_options.vlm_options, InlineVlmOptions):
vlm_options = cast(InlineVlmOptions, self.pipeline_options.vlm_options)
if vlm_options.inference_framework == InferenceFramework.MLX:
self.build_pipe = [
HuggingFaceMlxModel(
enabled=True, # must be always enabled for this pipeline to make sense.
artifacts_path=self.artifacts_path,
accelerator_options=pipeline_options.accelerator_options,
vlm_options=vlm_options,
),
]
elif vlm_options.inference_framework == InferenceFramework.TRANSFORMERS:
self.build_pipe = [
HuggingFaceTransformersVlmModel(
enabled=True, # must be always enabled for this pipeline to make sense.
artifacts_path=self.artifacts_path,
accelerator_options=pipeline_options.accelerator_options,
vlm_options=vlm_options,
),
]
elif vlm_options.inference_framework == InferenceFramework.VLLM:
from docling.models.vlm_models_inline.vllm_model import VllmVlmModel
self.build_pipe = [
VllmVlmModel(
enabled=True, # must be always enabled for this pipeline to make sense.
artifacts_path=self.artifacts_path,
accelerator_options=pipeline_options.accelerator_options,
vlm_options=vlm_options,
),
]
else:
raise ValueError(
f"Could not instantiate the right type of VLM pipeline: {vlm_options.inference_framework}"
)
self.enrichment_pipe = [
# Other models working on `NodeItem` elements in the DoclingDocument
]
def initialize_page(self, conv_res: ConversionResult, page: Page) -> Page:
with TimeRecorder(conv_res, "page_init"):
images_scale = self.pipeline_options.images_scale
if images_scale is not None:
page._default_image_scale = images_scale
page._backend = conv_res.input._backend.load_page(page.page_no) # type: ignore
if page._backend is not None and page._backend.is_valid():
page.size = page._backend.get_size()
if self.force_backend_text:
page.parsed_page = page._backend.get_segmented_page()
return page
def extract_text_from_backend(
self, page: Page, bbox: Union[BoundingBox, None]
) -> str:
# Convert bounding box normalized to 0-100 into page coordinates for cropping
text = ""
if bbox:
if page.size:
if page._backend:
text = page._backend.get_text_in_rect(bbox)
return text
def _assemble_document(self, conv_res: ConversionResult) -> ConversionResult:
with TimeRecorder(conv_res, "doc_assemble", scope=ProfilingScope.DOCUMENT):
if (
self.pipeline_options.vlm_options.response_format
== ResponseFormat.DOCTAGS
):
conv_res.document = self._turn_dt_into_doc(conv_res)
elif (
self.pipeline_options.vlm_options.response_format
== ResponseFormat.MARKDOWN
):
conv_res.document = self._turn_md_into_doc(conv_res)
elif (
self.pipeline_options.vlm_options.response_format == ResponseFormat.HTML
):
conv_res.document = self._turn_html_into_doc(conv_res)
else:
raise RuntimeError(
f"Unsupported VLM response format {self.pipeline_options.vlm_options.response_format}"
)
# Generate images of the requested element types
if self.pipeline_options.generate_picture_images:
scale = self.pipeline_options.images_scale
for element, _level in conv_res.document.iterate_items():
if not isinstance(element, DocItem) or len(element.prov) == 0:
continue
if (
isinstance(element, PictureItem)
and self.pipeline_options.generate_picture_images
):
page_ix = element.prov[0].page_no - 1
page = conv_res.pages[page_ix]
assert page.size is not None
assert page.image is not None
crop_bbox = (
element.prov[0]
.bbox.scaled(scale=scale)
.to_top_left_origin(page_height=page.size.height * scale)
)
cropped_im = page.image.crop(crop_bbox.as_tuple())
element.image = ImageRef.from_pil(
cropped_im, dpi=int(72 * scale)
)
return conv_res
def _turn_dt_into_doc(self, conv_res) -> DoclingDocument:
doctags_list = []
image_list = []
for page in conv_res.pages:
predicted_doctags = ""
img = PILImage.new("RGB", (1, 1), "rgb(255,255,255)")
if page.predictions.vlm_response:
predicted_doctags = page.predictions.vlm_response.text
if page.image:
img = page.image
image_list.append(img)
doctags_list.append(predicted_doctags)
doctags_list_c = cast(List[Union[Path, str]], doctags_list)
image_list_c = cast(List[Union[Path, PILImage.Image]], image_list)
doctags_doc = DocTagsDocument.from_doctags_and_image_pairs(
doctags_list_c, image_list_c
)
conv_res.document = DoclingDocument.load_from_doctags(
doctag_document=doctags_doc
)
# If forced backend text, replace model predicted text with backend one
if page.size:
if self.force_backend_text:
scale = self.pipeline_options.images_scale
for element, _level in conv_res.document.iterate_items():
if not isinstance(element, TextItem) or len(element.prov) == 0:
continue
crop_bbox = (
element.prov[0]
.bbox.scaled(scale=scale)
.to_top_left_origin(page_height=page.size.height * scale)
)
txt = self.extract_text_from_backend(page, crop_bbox)
element.text = txt
element.orig = txt
return conv_res.document
def _turn_md_into_doc(self, conv_res):
def _extract_markdown_code(text):
"""
Extracts text from markdown code blocks (enclosed in triple backticks).
If no code blocks are found, returns the original text.
Args:
text (str): Input text that may contain markdown code blocks
Returns:
str: Extracted code if code blocks exist, otherwise original text
"""
# Regex pattern to match content between triple backticks
# This handles multiline content and optional language specifier
pattern = r"^```(?:\w*\n)?(.*?)```(\n)*$"
# Search with DOTALL flag to match across multiple lines
mtch = re.search(pattern, text, re.DOTALL)
if mtch:
# Return only the content of the first capturing group
return mtch.group(1)
else:
# No code blocks found, return original text
return text
page_docs = []
for pg_idx, page in enumerate(conv_res.pages):
predicted_text = ""
if page.predictions.vlm_response:
predicted_text = page.predictions.vlm_response.text + "\n\n"
predicted_text = _extract_markdown_code(text=predicted_text)
response_bytes = BytesIO(predicted_text.encode("utf8"))
out_doc = InputDocument(
path_or_stream=response_bytes,
filename=conv_res.input.file.name,
format=InputFormat.MD,
backend=MarkdownDocumentBackend,
)
backend = MarkdownDocumentBackend(
in_doc=out_doc,
path_or_stream=response_bytes,
)
page_doc = backend.convert()
# Modify provenance in place for all items in the page document
for item, level in page_doc.iterate_items(
with_groups=True,
traverse_pictures=True,
included_content_layers=set(ContentLayer),
):
if isinstance(item, DocItem):
item.prov = [
ProvenanceItem(
page_no=pg_idx + 1,
bbox=BoundingBox(
t=0.0, b=0.0, l=0.0, r=0.0
), # FIXME: would be nice not to have to "fake" it
charspan=[0, 0],
)
]
# Add page metadata to the page document before concatenation
if page.image is not None:
pg_width = page.image.width
pg_height = page.image.height
else:
pg_width = 1
pg_height = 1
page_doc.add_page(
page_no=pg_idx + 1,
size=Size(width=pg_width, height=pg_height),
image=ImageRef.from_pil(image=page.image, dpi=72)
if page.image
else None,
)
page_docs.append(page_doc)
final_doc = DoclingDocument.concatenate(docs=page_docs)
return final_doc
def _turn_html_into_doc(self, conv_res):
def _extract_html_code(text):
"""
Extracts text from markdown code blocks (enclosed in triple backticks).
If no code blocks are found, returns the original text.
Args:
text (str): Input text that may contain markdown code blocks
Returns:
str: Extracted code if code blocks exist, otherwise original text
"""
# Regex pattern to match content between triple backticks
# This handles multiline content and optional language specifier
pattern = r"^```(?:\w*\n)?(.*?)```(\n)*$"
# Search with DOTALL flag to match across multiple lines
mtch = re.search(pattern, text, re.DOTALL)
if mtch:
# Return only the content of the first capturing group
return mtch.group(1)
else:
# No code blocks found, return original text
return text
page_docs = []
for pg_idx, page in enumerate(conv_res.pages):
predicted_text = ""
if page.predictions.vlm_response:
predicted_text = page.predictions.vlm_response.text + "\n\n"
predicted_text = _extract_html_code(text=predicted_text)
response_bytes = BytesIO(predicted_text.encode("utf8"))
out_doc = InputDocument(
path_or_stream=response_bytes,
filename=conv_res.input.file.name,
format=InputFormat.HTML,
backend=HTMLDocumentBackend,
)
backend = HTMLDocumentBackend(
in_doc=out_doc,
path_or_stream=response_bytes,
)
page_doc = backend.convert()
# Modify provenance in place for all items in the page document
for item, level in page_doc.iterate_items(
with_groups=True,
traverse_pictures=True,
included_content_layers=set(ContentLayer),
):
if isinstance(item, DocItem):
item.prov = [
ProvenanceItem(
page_no=pg_idx + 1,
bbox=BoundingBox(
t=0.0, b=0.0, l=0.0, r=0.0
), # FIXME: would be nice not to have to "fake" it
charspan=[0, 0],
)
]
# Add page metadata to the page document before concatenation
if page.image is not None:
pg_width = page.image.width
pg_height = page.image.height
else:
pg_width = 1
pg_height = 1
page_doc.add_page(
page_no=pg_idx + 1,
size=Size(width=pg_width, height=pg_height),
image=ImageRef.from_pil(image=page.image, dpi=72)
if page.image
else None,
)
page_docs.append(page_doc)
# Concatenate all page documents to preserve hierarchy
final_doc = DoclingDocument.concatenate(docs=page_docs)
return final_doc
@classmethod
def get_default_options(cls) -> VlmPipelineOptions:
return VlmPipelineOptions()
@classmethod
def is_backend_supported(cls, backend: AbstractDocumentBackend):
return isinstance(backend, PdfDocumentBackend)
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/pipeline/__init__.py | docling/pipeline/__init__.py | python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false | |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/pipeline/legacy_standard_pdf_pipeline.py | docling/pipeline/legacy_standard_pdf_pipeline.py | import logging
import warnings
from pathlib import Path
from typing import Optional, cast
import numpy as np
from docling_core.types.doc import DocItem, ImageRef, PictureItem, TableItem
from docling.backend.abstract_backend import AbstractDocumentBackend
from docling.backend.pdf_backend import PdfDocumentBackend
from docling.datamodel.base_models import AssembledUnit, Page
from docling.datamodel.document import ConversionResult
from docling.datamodel.layout_model_specs import LayoutModelConfig
from docling.datamodel.pipeline_options import PdfPipelineOptions
from docling.datamodel.settings import settings
from docling.models.base_ocr_model import BaseOcrModel
from docling.models.code_formula_model import CodeFormulaModel, CodeFormulaModelOptions
from docling.models.factories import (
get_layout_factory,
get_ocr_factory,
get_table_structure_factory,
)
from docling.models.page_assemble_model import PageAssembleModel, PageAssembleOptions
from docling.models.page_preprocessing_model import (
PagePreprocessingModel,
PagePreprocessingOptions,
)
from docling.models.readingorder_model import ReadingOrderModel, ReadingOrderOptions
from docling.pipeline.base_pipeline import PaginatedPipeline
from docling.utils.model_downloader import download_models
from docling.utils.profiling import ProfilingScope, TimeRecorder
_log = logging.getLogger(__name__)
class LegacyStandardPdfPipeline(PaginatedPipeline):
def __init__(self, pipeline_options: PdfPipelineOptions):
super().__init__(pipeline_options)
self.pipeline_options: PdfPipelineOptions
with warnings.catch_warnings(): # deprecated generate_table_images
warnings.filterwarnings("ignore", category=DeprecationWarning)
self.keep_images = (
self.pipeline_options.generate_page_images
or self.pipeline_options.generate_picture_images
or self.pipeline_options.generate_table_images
)
self.reading_order_model = ReadingOrderModel(options=ReadingOrderOptions())
ocr_model = self.get_ocr_model(artifacts_path=self.artifacts_path)
layout_factory = get_layout_factory(
allow_external_plugins=self.pipeline_options.allow_external_plugins
)
layout_model = layout_factory.create_instance(
options=pipeline_options.layout_options,
artifacts_path=self.artifacts_path,
accelerator_options=pipeline_options.accelerator_options,
)
table_factory = get_table_structure_factory(
allow_external_plugins=self.pipeline_options.allow_external_plugins
)
table_model = table_factory.create_instance(
options=pipeline_options.table_structure_options,
enabled=pipeline_options.do_table_structure,
artifacts_path=self.artifacts_path,
accelerator_options=pipeline_options.accelerator_options,
)
self.build_pipe = [
# Pre-processing
PagePreprocessingModel(
options=PagePreprocessingOptions(
images_scale=pipeline_options.images_scale,
)
),
# OCR
ocr_model,
# Layout model
layout_model,
# Table structure model
table_model,
# Page assemble
PageAssembleModel(options=PageAssembleOptions()),
]
self.enrichment_pipe = [
# Code Formula Enrichment Model
CodeFormulaModel(
enabled=pipeline_options.do_code_enrichment
or pipeline_options.do_formula_enrichment,
artifacts_path=self.artifacts_path,
options=CodeFormulaModelOptions(
do_code_enrichment=pipeline_options.do_code_enrichment,
do_formula_enrichment=pipeline_options.do_formula_enrichment,
),
accelerator_options=pipeline_options.accelerator_options,
),
*self.enrichment_pipe,
]
if (
self.pipeline_options.do_formula_enrichment
or self.pipeline_options.do_code_enrichment
or self.pipeline_options.do_picture_classification
or self.pipeline_options.do_picture_description
):
self.keep_backend = True
@staticmethod
def download_models_hf(
local_dir: Optional[Path] = None, force: bool = False
) -> Path:
warnings.warn(
"The usage of LegacyStandardPdfPipeline.download_models_hf() is deprecated "
"use instead the utility `docling-tools models download`, or "
"the upstream method docling.utils.models_downloader.download_all()",
DeprecationWarning,
stacklevel=3,
)
output_dir = download_models(output_dir=local_dir, force=force, progress=False)
return output_dir
def get_ocr_model(self, artifacts_path: Optional[Path] = None) -> BaseOcrModel:
factory = get_ocr_factory(
allow_external_plugins=self.pipeline_options.allow_external_plugins
)
return factory.create_instance(
options=self.pipeline_options.ocr_options,
enabled=self.pipeline_options.do_ocr,
artifacts_path=artifacts_path,
accelerator_options=self.pipeline_options.accelerator_options,
)
def initialize_page(self, conv_res: ConversionResult, page: Page) -> Page:
with TimeRecorder(conv_res, "page_init"):
page._backend = conv_res.input._backend.load_page(page.page_no) # type: ignore
if page._backend is not None and page._backend.is_valid():
page.size = page._backend.get_size()
return page
def _assemble_document(self, conv_res: ConversionResult) -> ConversionResult:
all_elements = []
all_headers = []
all_body = []
with TimeRecorder(conv_res, "doc_assemble", scope=ProfilingScope.DOCUMENT):
for p in conv_res.pages:
if p.assembled is not None:
for el in p.assembled.body:
all_body.append(el)
for el in p.assembled.headers:
all_headers.append(el)
for el in p.assembled.elements:
all_elements.append(el)
conv_res.assembled = AssembledUnit(
elements=all_elements, headers=all_headers, body=all_body
)
conv_res.document = self.reading_order_model(conv_res)
# Generate page images in the output
if self.pipeline_options.generate_page_images:
for page in conv_res.pages:
assert page.image is not None
page_no = page.page_no + 1
conv_res.document.pages[page_no].image = ImageRef.from_pil(
page.image, dpi=int(72 * self.pipeline_options.images_scale)
)
# Generate images of the requested element types
with warnings.catch_warnings(): # deprecated generate_table_images
warnings.filterwarnings("ignore", category=DeprecationWarning)
if (
self.pipeline_options.generate_picture_images
or self.pipeline_options.generate_table_images
):
scale = self.pipeline_options.images_scale
for element, _level in conv_res.document.iterate_items():
if not isinstance(element, DocItem) or len(element.prov) == 0:
continue
if (
isinstance(element, PictureItem)
and self.pipeline_options.generate_picture_images
) or (
isinstance(element, TableItem)
and self.pipeline_options.generate_table_images
):
page_ix = element.prov[0].page_no - 1
page = next(
(p for p in conv_res.pages if p.page_no == page_ix),
cast("Page", None),
)
assert page is not None
assert page.size is not None
assert page.image is not None
crop_bbox = (
element.prov[0]
.bbox.scaled(scale=scale)
.to_top_left_origin(
page_height=page.size.height * scale
)
)
cropped_im = page.image.crop(crop_bbox.as_tuple())
element.image = ImageRef.from_pil(
cropped_im, dpi=int(72 * scale)
)
# Aggregate confidence values for document:
if len(conv_res.pages) > 0:
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
category=RuntimeWarning,
message="Mean of empty slice|All-NaN slice encountered",
)
conv_res.confidence.layout_score = float(
np.nanmean(
[c.layout_score for c in conv_res.confidence.pages.values()]
)
)
conv_res.confidence.parse_score = float(
np.nanquantile(
[c.parse_score for c in conv_res.confidence.pages.values()],
q=0.1, # parse score should relate to worst 10% of pages.
)
)
conv_res.confidence.table_score = float(
np.nanmean(
[c.table_score for c in conv_res.confidence.pages.values()]
)
)
conv_res.confidence.ocr_score = float(
np.nanmean(
[c.ocr_score for c in conv_res.confidence.pages.values()]
)
)
return conv_res
@classmethod
def get_default_options(cls) -> PdfPipelineOptions:
return PdfPipelineOptions()
@classmethod
def is_backend_supported(cls, backend: AbstractDocumentBackend):
return isinstance(backend, PdfDocumentBackend)
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/pipeline/asr_pipeline.py | docling/pipeline/asr_pipeline.py | import logging
import os
import re
import sys
import tempfile
from io import BytesIO
from pathlib import Path
from typing import TYPE_CHECKING, List, Optional, Union, cast
from docling_core.types.doc import DoclingDocument, DocumentOrigin
# import whisper # type: ignore
# import librosa
# import numpy as np
# import soundfile as sf # type: ignore
from docling_core.types.doc.labels import DocItemLabel
from pydantic import BaseModel, Field, validator
from docling.backend.abstract_backend import AbstractDocumentBackend
from docling.backend.noop_backend import NoOpBackend
# from pydub import AudioSegment # type: ignore
# from transformers import WhisperForConditionalGeneration, WhisperProcessor, pipeline
from docling.datamodel.accelerator_options import (
AcceleratorOptions,
)
from docling.datamodel.base_models import (
ConversionStatus,
FormatToMimeType,
)
from docling.datamodel.document import ConversionResult, InputDocument
from docling.datamodel.pipeline_options import (
AsrPipelineOptions,
)
from docling.datamodel.pipeline_options_asr_model import (
InlineAsrMlxWhisperOptions,
InlineAsrNativeWhisperOptions,
# AsrResponseFormat,
InlineAsrOptions,
)
from docling.datamodel.pipeline_options_vlm_model import (
InferenceFramework,
)
from docling.datamodel.settings import settings
from docling.pipeline.base_pipeline import BasePipeline
from docling.utils.accelerator_utils import decide_device
from docling.utils.profiling import ProfilingScope, TimeRecorder
_log = logging.getLogger(__name__)
class _ConversationWord(BaseModel):
text: str
start_time: Optional[float] = Field(
None, description="Start time in seconds from video start"
)
end_time: Optional[float] = Field(
None, ge=0, description="End time in seconds from video start"
)
class _ConversationItem(BaseModel):
text: str
start_time: Optional[float] = Field(
None, description="Start time in seconds from video start"
)
end_time: Optional[float] = Field(
None, ge=0, description="End time in seconds from video start"
)
speaker_id: Optional[int] = Field(None, description="Numeric speaker identifier")
speaker: Optional[str] = Field(
None, description="Speaker name, defaults to speaker-{speaker_id}"
)
words: Optional[list[_ConversationWord]] = Field(
None, description="Individual words with time-stamps"
)
def __lt__(self, other):
if not isinstance(other, _ConversationItem):
return NotImplemented
return self.start_time < other.start_time
def __eq__(self, other):
if not isinstance(other, _ConversationItem):
return NotImplemented
return self.start_time == other.start_time
def to_string(self) -> str:
"""Format the conversation entry as a string"""
result = ""
if (self.start_time is not None) and (self.end_time is not None):
result += f"[time: {self.start_time}-{self.end_time}] "
if self.speaker is not None:
result += f"[speaker:{self.speaker}] "
result += self.text
return result
class _NativeWhisperModel:
def __init__(
self,
enabled: bool,
artifacts_path: Optional[Path],
accelerator_options: AcceleratorOptions,
asr_options: InlineAsrNativeWhisperOptions,
):
"""
Transcriber using native Whisper.
"""
self.enabled = enabled
_log.info(f"artifacts-path: {artifacts_path}")
_log.info(f"accelerator_options: {accelerator_options}")
if self.enabled:
try:
import whisper # type: ignore
except ImportError:
if sys.version_info < (3, 14):
raise ImportError(
"whisper is not installed. Please install it via `pip install openai-whisper` or do `uv sync --extra asr`."
)
else:
raise ImportError(
"whisper is not installed. Unfortunately its dependencies are not yet available for Python 3.14."
)
self.asr_options = asr_options
self.max_tokens = asr_options.max_new_tokens
self.temperature = asr_options.temperature
self.device = decide_device(
accelerator_options.device,
supported_devices=asr_options.supported_devices,
)
_log.info(f"Available device for Whisper: {self.device}")
self.model_name = asr_options.repo_id
_log.info(f"loading _NativeWhisperModel({self.model_name})")
if artifacts_path is not None:
_log.info(f"loading {self.model_name} from {artifacts_path}")
self.model = whisper.load_model(
name=self.model_name,
device=self.device,
download_root=str(artifacts_path),
)
else:
self.model = whisper.load_model(
name=self.model_name, device=self.device
)
self.verbose = asr_options.verbose
self.timestamps = asr_options.timestamps
self.word_timestamps = asr_options.word_timestamps
def run(self, conv_res: ConversionResult) -> ConversionResult:
# Access the file path from the backend, similar to how other pipelines handle it
path_or_stream = conv_res.input._backend.path_or_stream
# Handle both Path and BytesIO inputs
temp_file_path: Optional[Path] = None
if isinstance(path_or_stream, BytesIO):
# For BytesIO, write to a temporary file since whisper requires a file path
suffix = Path(conv_res.input.file.name).suffix or ".wav"
with tempfile.NamedTemporaryFile(delete=False, suffix=suffix) as tmp_file:
tmp_file.write(path_or_stream.getvalue())
temp_file_path = Path(tmp_file.name)
audio_path = temp_file_path
elif isinstance(path_or_stream, Path):
audio_path = path_or_stream
else:
raise RuntimeError(
f"ASR pipeline requires a file path or BytesIO stream, but got {type(path_or_stream)}"
)
try:
conversation = self.transcribe(audio_path)
# Ensure we have a proper DoclingDocument
origin = DocumentOrigin(
filename=conv_res.input.file.name or "audio.wav",
mimetype="audio/x-wav",
binary_hash=conv_res.input.document_hash,
)
conv_res.document = DoclingDocument(
name=conv_res.input.file.stem or "audio.wav", origin=origin
)
for citem in conversation:
conv_res.document.add_text(
label=DocItemLabel.TEXT, text=citem.to_string()
)
return conv_res
except Exception as exc:
_log.error(f"Audio tranciption has an error: {exc}")
conv_res.status = ConversionStatus.FAILURE
return conv_res
finally:
# Clean up temporary file if created
if temp_file_path is not None and temp_file_path.exists():
try:
temp_file_path.unlink()
except Exception as e:
_log.warning(
f"Failed to delete temporary file {temp_file_path}: {e}"
)
def transcribe(self, fpath: Path) -> list[_ConversationItem]:
result = self.model.transcribe(
str(fpath), verbose=self.verbose, word_timestamps=self.word_timestamps
)
convo: list[_ConversationItem] = []
for _ in result["segments"]:
item = _ConversationItem(
start_time=_["start"], end_time=_["end"], text=_["text"], words=[]
)
if "words" in _ and self.word_timestamps:
item.words = []
for __ in _["words"]:
item.words.append(
_ConversationWord(
start_time=__["start"],
end_time=__["end"],
text=__["word"],
)
)
convo.append(item)
return convo
class _MlxWhisperModel:
def __init__(
self,
enabled: bool,
artifacts_path: Optional[Path],
accelerator_options: AcceleratorOptions,
asr_options: InlineAsrMlxWhisperOptions,
):
"""
Transcriber using MLX Whisper for Apple Silicon optimization.
"""
self.enabled = enabled
_log.info(f"artifacts-path: {artifacts_path}")
_log.info(f"accelerator_options: {accelerator_options}")
if self.enabled:
try:
import mlx_whisper # type: ignore
except ImportError:
raise ImportError(
"mlx-whisper is not installed. Please install it via `pip install mlx-whisper` or do `uv sync --extra asr`."
)
self.asr_options = asr_options
self.mlx_whisper = mlx_whisper
self.device = decide_device(
accelerator_options.device,
supported_devices=asr_options.supported_devices,
)
_log.info(f"Available device for MLX Whisper: {self.device}")
self.model_name = asr_options.repo_id
_log.info(f"loading _MlxWhisperModel({self.model_name})")
# MLX Whisper models are loaded differently - they use HuggingFace repos
self.model_path = self.model_name
# Store MLX-specific options
self.language = asr_options.language
self.task = asr_options.task
self.word_timestamps = asr_options.word_timestamps
self.no_speech_threshold = asr_options.no_speech_threshold
self.logprob_threshold = asr_options.logprob_threshold
self.compression_ratio_threshold = asr_options.compression_ratio_threshold
def run(self, conv_res: ConversionResult) -> ConversionResult:
audio_path: Path = Path(conv_res.input.file).resolve()
try:
conversation = self.transcribe(audio_path)
# Ensure we have a proper DoclingDocument
origin = DocumentOrigin(
filename=conv_res.input.file.name or "audio.wav",
mimetype="audio/x-wav",
binary_hash=conv_res.input.document_hash,
)
conv_res.document = DoclingDocument(
name=conv_res.input.file.stem or "audio.wav", origin=origin
)
for citem in conversation:
conv_res.document.add_text(
label=DocItemLabel.TEXT, text=citem.to_string()
)
conv_res.status = ConversionStatus.SUCCESS
return conv_res
except Exception as exc:
_log.error(f"MLX Audio transcription has an error: {exc}")
conv_res.status = ConversionStatus.FAILURE
return conv_res
def transcribe(self, fpath: Path) -> list[_ConversationItem]:
"""
Transcribe audio using MLX Whisper.
Args:
fpath: Path to audio file
Returns:
List of conversation items with timestamps
"""
result = self.mlx_whisper.transcribe(
str(fpath),
path_or_hf_repo=self.model_path,
language=self.language,
task=self.task,
word_timestamps=self.word_timestamps,
no_speech_threshold=self.no_speech_threshold,
logprob_threshold=self.logprob_threshold,
compression_ratio_threshold=self.compression_ratio_threshold,
)
convo: list[_ConversationItem] = []
# MLX Whisper returns segments similar to native Whisper
for segment in result.get("segments", []):
item = _ConversationItem(
start_time=segment.get("start"),
end_time=segment.get("end"),
text=segment.get("text", "").strip(),
words=[],
)
# Add word-level timestamps if available
if self.word_timestamps and "words" in segment:
item.words = []
for word_data in segment["words"]:
item.words.append(
_ConversationWord(
start_time=word_data.get("start"),
end_time=word_data.get("end"),
text=word_data.get("word", ""),
)
)
convo.append(item)
return convo
class AsrPipeline(BasePipeline):
def __init__(self, pipeline_options: AsrPipelineOptions):
super().__init__(pipeline_options)
self.keep_backend = True
self.pipeline_options: AsrPipelineOptions = pipeline_options
self._model: Union[_NativeWhisperModel, _MlxWhisperModel]
if isinstance(self.pipeline_options.asr_options, InlineAsrNativeWhisperOptions):
native_asr_options: InlineAsrNativeWhisperOptions = (
self.pipeline_options.asr_options
)
self._model = _NativeWhisperModel(
enabled=True, # must be always enabled for this pipeline to make sense.
artifacts_path=self.artifacts_path,
accelerator_options=pipeline_options.accelerator_options,
asr_options=native_asr_options,
)
elif isinstance(self.pipeline_options.asr_options, InlineAsrMlxWhisperOptions):
mlx_asr_options: InlineAsrMlxWhisperOptions = (
self.pipeline_options.asr_options
)
self._model = _MlxWhisperModel(
enabled=True, # must be always enabled for this pipeline to make sense.
artifacts_path=self.artifacts_path,
accelerator_options=pipeline_options.accelerator_options,
asr_options=mlx_asr_options,
)
else:
_log.error(f"No model support for {self.pipeline_options.asr_options}")
def _has_text(self, document: "DoclingDocument") -> bool:
"""
Helper method to check if the document contains any transcribed text.
A transcription is considered non-empty if the .texts list contains items with actual, non whitespace content.
"""
if not document or not document.texts:
return False
for item in document.texts:
if item.text and item.text.strip():
return True
return False
def _determine_status(self, conv_res: ConversionResult) -> ConversionStatus:
"""Determines the final status of ASR Conversion based on its result."""
if conv_res.status == ConversionStatus.FAILURE or conv_res.errors:
return ConversionStatus.FAILURE
if not self._has_text(conv_res.document):
_log.warning(
"ASR conversion resulted in an empty document."
f"File: {conv_res.input.file.name}"
)
return ConversionStatus.PARTIAL_SUCCESS
return ConversionStatus.SUCCESS
@classmethod
def get_default_options(cls) -> AsrPipelineOptions:
return AsrPipelineOptions()
def _build_document(self, conv_res: ConversionResult) -> ConversionResult:
_log.info(f"start _build_document in AsrPipeline: {conv_res.input.file}")
with TimeRecorder(conv_res, "doc_build", scope=ProfilingScope.DOCUMENT):
self._model.run(conv_res=conv_res)
return conv_res
@classmethod
def is_backend_supported(cls, backend: AbstractDocumentBackend):
return isinstance(backend, NoOpBackend)
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/pipeline/base_extraction_pipeline.py | docling/pipeline/base_extraction_pipeline.py | import logging
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Optional
from docling.datamodel.base_models import ConversionStatus, ErrorItem
from docling.datamodel.document import InputDocument
from docling.datamodel.extraction import ExtractionResult, ExtractionTemplateType
from docling.datamodel.pipeline_options import BaseOptions, PipelineOptions
from docling.datamodel.settings import settings
_log = logging.getLogger(__name__)
class BaseExtractionPipeline(ABC):
def __init__(self, pipeline_options: PipelineOptions):
self.pipeline_options = pipeline_options
self.artifacts_path: Optional[Path] = None
if pipeline_options.artifacts_path is not None:
self.artifacts_path = Path(pipeline_options.artifacts_path).expanduser()
elif settings.artifacts_path is not None:
self.artifacts_path = Path(settings.artifacts_path).expanduser()
if self.artifacts_path is not None and not self.artifacts_path.is_dir():
raise RuntimeError(
f"The value of {self.artifacts_path=} is not valid. "
"When defined, it must point to a folder containing all models required by the pipeline."
)
def execute(
self,
in_doc: InputDocument,
raises_on_error: bool,
template: Optional[ExtractionTemplateType] = None,
) -> ExtractionResult:
ext_res = ExtractionResult(input=in_doc)
try:
ext_res = self._extract_data(ext_res, template)
ext_res.status = self._determine_status(ext_res)
except Exception as e:
ext_res.status = ConversionStatus.FAILURE
error_item = ErrorItem(
component_type="extraction_pipeline",
module_name=self.__class__.__name__,
error_message=str(e),
)
ext_res.errors.append(error_item)
if raises_on_error:
raise e
return ext_res
@abstractmethod
def _extract_data(
self,
ext_res: ExtractionResult,
template: Optional[ExtractionTemplateType] = None,
) -> ExtractionResult:
"""Subclass must populate ext_res.pages/errors and return the result."""
raise NotImplementedError
@abstractmethod
def _determine_status(self, ext_res: ExtractionResult) -> ConversionStatus:
"""Subclass must decide SUCCESS/PARTIAL_SUCCESS/FAILURE based on ext_res."""
raise NotImplementedError
@classmethod
@abstractmethod
def get_default_options(cls) -> PipelineOptions:
pass
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/pipeline/base_pipeline.py | docling/pipeline/base_pipeline.py | import functools
import logging
import time
import traceback
from abc import ABC, abstractmethod
from collections.abc import Iterable
from pathlib import Path
from typing import Any, Callable, List, Optional
from docling_core.types.doc import NodeItem
from docling.backend.abstract_backend import (
AbstractDocumentBackend,
PaginatedDocumentBackend,
)
from docling.backend.pdf_backend import PdfDocumentBackend
from docling.datamodel.base_models import (
ConversionStatus,
DoclingComponentType,
ErrorItem,
Page,
)
from docling.datamodel.document import ConversionResult, InputDocument
from docling.datamodel.pipeline_options import (
ConvertPipelineOptions,
PdfPipelineOptions,
PipelineOptions,
)
from docling.datamodel.settings import settings
from docling.models.base_model import GenericEnrichmentModel
from docling.models.document_picture_classifier import (
DocumentPictureClassifier,
DocumentPictureClassifierOptions,
)
from docling.models.factories import get_picture_description_factory
from docling.models.picture_description_base_model import PictureDescriptionBaseModel
from docling.utils.profiling import ProfilingScope, TimeRecorder
from docling.utils.utils import chunkify
_log = logging.getLogger(__name__)
class BasePipeline(ABC):
def __init__(self, pipeline_options: PipelineOptions):
self.pipeline_options = pipeline_options
self.keep_images = False
self.build_pipe: List[Callable] = []
self.enrichment_pipe: List[GenericEnrichmentModel[Any]] = []
self.artifacts_path: Optional[Path] = None
if pipeline_options.artifacts_path is not None:
self.artifacts_path = Path(pipeline_options.artifacts_path).expanduser()
elif settings.artifacts_path is not None:
self.artifacts_path = Path(settings.artifacts_path).expanduser()
if self.artifacts_path is not None and not self.artifacts_path.is_dir():
raise RuntimeError(
f"The value of {self.artifacts_path=} is not valid. "
"When defined, it must point to a folder containing all models required by the pipeline."
)
def execute(self, in_doc: InputDocument, raises_on_error: bool) -> ConversionResult:
conv_res = ConversionResult(input=in_doc)
_log.info(f"Processing document {in_doc.file.name}")
try:
with TimeRecorder(
conv_res, "pipeline_total", scope=ProfilingScope.DOCUMENT
):
# These steps are building and assembling the structure of the
# output DoclingDocument.
conv_res = self._build_document(conv_res)
conv_res = self._assemble_document(conv_res)
# From this stage, all operations should rely only on conv_res.output
conv_res = self._enrich_document(conv_res)
conv_res.status = self._determine_status(conv_res)
except Exception as e:
conv_res.status = ConversionStatus.FAILURE
if not raises_on_error:
error_item = ErrorItem(
component_type=DoclingComponentType.PIPELINE,
module_name=self.__class__.__name__,
error_message=str(e),
)
conv_res.errors.append(error_item)
else:
raise RuntimeError(f"Pipeline {self.__class__.__name__} failed") from e
finally:
self._unload(conv_res)
return conv_res
@abstractmethod
def _build_document(self, conv_res: ConversionResult) -> ConversionResult:
pass
def _assemble_document(self, conv_res: ConversionResult) -> ConversionResult:
return conv_res
def _enrich_document(self, conv_res: ConversionResult) -> ConversionResult:
def _prepare_elements(
conv_res: ConversionResult, model: GenericEnrichmentModel[Any]
) -> Iterable[NodeItem]:
for doc_element, _level in conv_res.document.iterate_items():
prepared_element = model.prepare_element(
conv_res=conv_res, element=doc_element
)
if prepared_element is not None:
yield prepared_element
with TimeRecorder(conv_res, "doc_enrich", scope=ProfilingScope.DOCUMENT):
for model in self.enrichment_pipe:
for element_batch in chunkify(
_prepare_elements(conv_res, model),
model.elements_batch_size,
):
for element in model(
doc=conv_res.document, element_batch=element_batch
): # Must exhaust!
pass
return conv_res
@abstractmethod
def _determine_status(self, conv_res: ConversionResult) -> ConversionStatus:
pass
def _unload(self, conv_res: ConversionResult):
pass
@classmethod
@abstractmethod
def get_default_options(cls) -> PipelineOptions:
pass
@classmethod
@abstractmethod
def is_backend_supported(cls, backend: AbstractDocumentBackend):
pass
class ConvertPipeline(BasePipeline):
def __init__(self, pipeline_options: ConvertPipelineOptions):
super().__init__(pipeline_options)
self.pipeline_options: ConvertPipelineOptions
# ------ Common enrichment models working on all backends
# Picture description model
if (
picture_description_model := self._get_picture_description_model(
artifacts_path=self.artifacts_path
)
) is None:
raise RuntimeError(
f"The specified picture description kind is not supported: {pipeline_options.picture_description_options.kind}."
)
self.enrichment_pipe = [
# Document Picture Classifier
DocumentPictureClassifier(
enabled=pipeline_options.do_picture_classification,
artifacts_path=self.artifacts_path,
options=DocumentPictureClassifierOptions(),
accelerator_options=pipeline_options.accelerator_options,
),
# Document Picture description
picture_description_model,
]
def _get_picture_description_model(
self, artifacts_path: Optional[Path] = None
) -> Optional[PictureDescriptionBaseModel]:
factory = get_picture_description_factory(
allow_external_plugins=self.pipeline_options.allow_external_plugins
)
return factory.create_instance(
options=self.pipeline_options.picture_description_options,
enabled=self.pipeline_options.do_picture_description,
enable_remote_services=self.pipeline_options.enable_remote_services,
artifacts_path=artifacts_path,
accelerator_options=self.pipeline_options.accelerator_options,
)
@classmethod
@abstractmethod
def get_default_options(cls) -> ConvertPipelineOptions:
pass
class PaginatedPipeline(ConvertPipeline): # TODO this is a bad name.
def __init__(self, pipeline_options: ConvertPipelineOptions):
super().__init__(pipeline_options)
self.keep_backend = False
def _apply_on_pages(
self, conv_res: ConversionResult, page_batch: Iterable[Page]
) -> Iterable[Page]:
for model in self.build_pipe:
page_batch = model(conv_res, page_batch)
yield from page_batch
def _build_document(self, conv_res: ConversionResult) -> ConversionResult:
if not isinstance(conv_res.input._backend, PaginatedDocumentBackend):
raise RuntimeError(
f"The selected backend {type(conv_res.input._backend).__name__} for {conv_res.input.file} is not a paginated backend. "
f"Can not convert this with a paginated PDF pipeline. "
f"Please check your format configuration on DocumentConverter."
)
# conv_res.status = ConversionStatus.FAILURE
# return conv_res
total_elapsed_time = 0.0
with TimeRecorder(conv_res, "doc_build", scope=ProfilingScope.DOCUMENT):
for i in range(conv_res.input.page_count):
start_page, end_page = conv_res.input.limits.page_range
if (start_page - 1) <= i <= (end_page - 1):
conv_res.pages.append(Page(page_no=i))
try:
total_pages_processed = 0
# Iterate batches of pages (page_batch_size) in the doc
for page_batch in chunkify(
conv_res.pages, settings.perf.page_batch_size
):
start_batch_time = time.monotonic()
# 1. Initialise the page resources
init_pages = map(
functools.partial(self.initialize_page, conv_res), page_batch
)
# 2. Run pipeline stages
pipeline_pages = self._apply_on_pages(conv_res, init_pages)
for p in pipeline_pages: # Must exhaust!
# Cleanup cached images
if not self.keep_images:
p._image_cache = {}
# Cleanup page backends
if not self.keep_backend and p._backend is not None:
p._backend.unload()
if (
isinstance(self.pipeline_options, PdfPipelineOptions)
and not self.pipeline_options.generate_parsed_pages
):
del p.parsed_page
p.parsed_page = None
end_batch_time = time.monotonic()
total_elapsed_time += end_batch_time - start_batch_time
if (
self.pipeline_options.document_timeout is not None
and total_elapsed_time > self.pipeline_options.document_timeout
):
_log.warning(
f"Document processing time ({total_elapsed_time:.3f} seconds) exceeded the specified timeout of {self.pipeline_options.document_timeout:.3f} seconds"
)
conv_res.status = ConversionStatus.PARTIAL_SUCCESS
break
total_pages_processed += len(page_batch)
_log.debug(
f"Finished converting pages {total_pages_processed}/{len(conv_res.pages)} time={end_batch_time:.3f}"
)
except Exception as e:
conv_res.status = ConversionStatus.FAILURE
trace = "\n".join(
traceback.format_exception(type(e), e, e.__traceback__)
)
_log.warning(
f"Encountered an error during conversion of document {conv_res.input.document_hash}:\n"
f"{trace}"
)
raise e
# Filter out uninitialized pages (those with size=None) that may remain
# after timeout or processing failures to prevent assertion errors downstream
initial_page_count = len(conv_res.pages)
conv_res.pages = [page for page in conv_res.pages if page.size is not None]
if len(conv_res.pages) < initial_page_count:
_log.info(
f"Filtered out {initial_page_count - len(conv_res.pages)} uninitialized pages "
f"due to timeout or processing failures"
)
return conv_res
def _unload(self, conv_res: ConversionResult) -> ConversionResult:
for page in conv_res.pages:
if page._backend is not None:
page._backend.unload()
if conv_res.input._backend:
conv_res.input._backend.unload()
return conv_res
def _determine_status(self, conv_res: ConversionResult) -> ConversionStatus:
status = conv_res.status
if status in [
ConversionStatus.PENDING,
ConversionStatus.STARTED,
]: # preserves ConversionStatus.PARTIAL_SUCCESS
status = ConversionStatus.SUCCESS
for page in conv_res.pages:
if page._backend is None or not page._backend.is_valid():
conv_res.errors.append(
ErrorItem(
component_type=DoclingComponentType.DOCUMENT_BACKEND,
module_name=type(page._backend).__name__,
error_message=f"Page {page.page_no} failed to parse.",
)
)
status = ConversionStatus.PARTIAL_SUCCESS
return status
# Initialise and load resources for a page
@abstractmethod
def initialize_page(self, conv_res: ConversionResult, page: Page) -> Page:
pass
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/pipeline/simple_pipeline.py | docling/pipeline/simple_pipeline.py | import logging
from docling.backend.abstract_backend import (
AbstractDocumentBackend,
DeclarativeDocumentBackend,
)
from docling.datamodel.base_models import ConversionStatus
from docling.datamodel.document import ConversionResult
from docling.datamodel.pipeline_options import ConvertPipelineOptions
from docling.pipeline.base_pipeline import ConvertPipeline
from docling.utils.profiling import ProfilingScope, TimeRecorder
_log = logging.getLogger(__name__)
class SimplePipeline(ConvertPipeline):
"""SimpleModelPipeline.
This class is used at the moment for formats / backends
which produce straight DoclingDocument output.
"""
def __init__(self, pipeline_options: ConvertPipelineOptions):
super().__init__(pipeline_options)
def _build_document(self, conv_res: ConversionResult) -> ConversionResult:
if not isinstance(conv_res.input._backend, DeclarativeDocumentBackend):
raise RuntimeError(
f"The selected backend {type(conv_res.input._backend).__name__} for {conv_res.input.file} is not a declarative backend. "
f"Can not convert this with simple pipeline. "
f"Please check your format configuration on DocumentConverter."
)
# conv_res.status = ConversionStatus.FAILURE
# return conv_res
# Instead of running a page-level pipeline to build up the document structure,
# the backend is expected to be of type DeclarativeDocumentBackend, which can output
# a DoclingDocument straight.
with TimeRecorder(conv_res, "doc_build", scope=ProfilingScope.DOCUMENT):
conv_res.document = conv_res.input._backend.convert()
return conv_res
def _determine_status(self, conv_res: ConversionResult) -> ConversionStatus:
# This is called only if the previous steps didn't raise.
# Since we don't have anything else to evaluate, we can
# safely return SUCCESS.
return ConversionStatus.SUCCESS
@classmethod
def get_default_options(cls) -> ConvertPipelineOptions:
return ConvertPipelineOptions()
@classmethod
def is_backend_supported(cls, backend: AbstractDocumentBackend):
return isinstance(backend, DeclarativeDocumentBackend)
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
psf/black | https://github.com/psf/black/blob/c3cc5a95d4f72e6ccc27ebae23344fce8cc70786/scripts/make_width_table.py | scripts/make_width_table.py | """Generates a width table for Unicode characters.
This script generates a width table for Unicode characters that are not
narrow (width 1). The table is written to src/black/_width_table.py (note
that although this file is generated, it is checked into Git) and is used
by the char_width() function in src/black/strings.py.
You should run this script when you upgrade wcwidth, which is expected to
happen when a new Unicode version is released. The generated table contains
the version of wcwidth and Unicode that it was generated for.
In order to run this script, you need to install the latest version of wcwidth.
You can do this by running:
pip install -U wcwidth
"""
import sys
from collections.abc import Iterable
from os.path import basename, dirname, join
import wcwidth # type: ignore[import-not-found]
def make_width_table() -> Iterable[tuple[int, int, int]]:
start_codepoint = -1
end_codepoint = -1
range_width = -2
for codepoint in range(0, sys.maxunicode + 1):
width = wcwidth.wcwidth(chr(codepoint))
if width <= 1:
# Ignore narrow characters along with zero-width characters so that
# they are treated as single-width. Note that treating zero-width
# characters as single-width is consistent with the heuristics built
# on top of str.isascii() in the str_width() function in strings.py.
continue
if start_codepoint < 0:
start_codepoint = codepoint
range_width = width
elif width != range_width or codepoint != end_codepoint + 1:
yield (start_codepoint, end_codepoint, range_width)
start_codepoint = codepoint
range_width = width
end_codepoint = codepoint
if start_codepoint >= 0:
yield (start_codepoint, end_codepoint, range_width)
def main() -> None:
table_path = join(dirname(__file__), "..", "src", "black", "_width_table.py")
with open(table_path, "w") as f:
f.write(f"""# Generated by {basename(__file__)}
# wcwidth {wcwidth.__version__}
# Unicode {wcwidth.list_versions()[-1]}
from typing import Final
WIDTH_TABLE: Final[list[tuple[int, int, int]]] = [
""")
for triple in make_width_table():
f.write(f" {triple!r},\n")
f.write("]\n")
if __name__ == "__main__":
main()
| python | MIT | c3cc5a95d4f72e6ccc27ebae23344fce8cc70786 | 2026-01-04T14:40:23.735327Z | false |
psf/black | https://github.com/psf/black/blob/c3cc5a95d4f72e6ccc27ebae23344fce8cc70786/scripts/migrate-black.py | scripts/migrate-black.py | #!/usr/bin/env python3
# check out every commit added by the current branch, blackify them,
# and generate diffs to reconstruct the original commits, but then
# blackified
import logging
import os
import sys
from subprocess import PIPE, Popen, check_output, run
def git(*args: str) -> str:
return check_output(["git", *args]).decode("utf8").strip()
def blackify(base_branch: str, black_command: str, logger: logging.Logger) -> int:
current_branch = git("branch", "--show-current")
if not current_branch or base_branch == current_branch:
logger.error("You need to check out a feature branch to work on")
return 1
if not os.path.exists(".git"):
logger.error("Run me in the root of your repo")
return 1
merge_base = git("merge-base", "HEAD", base_branch)
if not merge_base:
logger.error(
f"Could not find a common commit for current head and {base_branch}"
)
return 1
commits = git(
"log", "--reverse", "--pretty=format:%H", f"{merge_base}~1..HEAD"
).split()
for commit in commits:
git("checkout", commit, f"-b{commit}-black")
check_output(black_command, shell=True)
git("commit", "-aqm", "blackify")
git("checkout", base_branch, f"-b{current_branch}-black")
for last_commit, commit in zip(commits, commits[1:], strict=False):
allow_empty = (
b"--allow-empty" in run(["git", "apply", "-h"], stdout=PIPE).stdout
)
quiet = b"--quiet" in run(["git", "apply", "-h"], stdout=PIPE).stdout
git_diff = Popen(
[
"git",
"diff",
"--binary",
"--find-copies",
f"{last_commit}-black..{commit}-black",
],
stdout=PIPE,
)
git_apply = Popen(
[
"git",
"apply",
]
+ (["--quiet"] if quiet else [])
+ [
"-3",
"--intent-to-add",
]
+ (["--allow-empty"] if allow_empty else [])
+ [
"-",
],
stdin=git_diff.stdout,
)
if git_diff.stdout is not None:
git_diff.stdout.close()
git_apply.communicate()
git("commit", "--allow-empty", "-aqC", commit)
for commit in commits:
git("branch", "-qD", f"{commit}-black")
return 0
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("base_branch")
parser.add_argument("--black_command", default="black -q .")
parser.add_argument("--logfile", type=argparse.FileType("w"), default=sys.stdout)
args = parser.parse_args()
logger = logging.getLogger(__name__)
logger.addHandler(logging.StreamHandler(args.logfile))
logger.setLevel(logging.INFO)
sys.exit(blackify(args.base_branch, args.black_command, logger))
| python | MIT | c3cc5a95d4f72e6ccc27ebae23344fce8cc70786 | 2026-01-04T14:40:23.735327Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.