repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/platform/pf.py | mitmproxy/platform/pf.py | import re
import sys
def lookup(address, port, s):
"""
Parse the pfctl state output s, to look up the destination host
matching the client (address, port).
Returns an (address, port) tuple, or None.
"""
# We may get an ipv4-mapped ipv6 address here, e.g. ::ffff:127.0.0.1.
# Those still appear as "127.0.0.1" in the table, so we need to strip the prefix.
address = re.sub(r"^::ffff:(?=\d+.\d+.\d+.\d+$)", "", address)
s = s.decode()
# ALL tcp 192.168.1.13:57474 -> 23.205.82.58:443 ESTABLISHED:ESTABLISHED
specv4 = f"{address}:{port}"
# ALL tcp 2a01:e35:8bae:50f0:9d9b:ef0d:2de3:b733[58505] -> 2606:4700:30::681f:4ad0[443] ESTABLISHED:ESTABLISHED
specv6 = f"{address}[{port}]"
for i in s.split("\n"):
if "ESTABLISHED:ESTABLISHED" in i and specv4 in i:
s = i.split()
if len(s) > 4:
if sys.platform.startswith("freebsd"):
# strip parentheses for FreeBSD pfctl
s = s[3][1:-1].split(":")
else:
s = s[4].split(":")
if len(s) == 2:
return s[0], int(s[1])
elif "ESTABLISHED:ESTABLISHED" in i and specv6 in i:
s = i.split()
if len(s) > 4:
s = s[4].split("[")
port = s[1].split("]")
port = port[0]
return s[0], int(port)
raise RuntimeError("Could not resolve original destination.")
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/platform/linux.py | mitmproxy/platform/linux.py | import socket
import struct
# Python's socket module does not have these constants
SO_ORIGINAL_DST = 80
SOL_IPV6 = 41
def original_addr(csock: socket.socket) -> tuple[str, int]:
# Get the original destination on Linux.
# In theory, this can be done using the following syscalls:
# sock.getsockopt(socket.SOL_IP, SO_ORIGINAL_DST, 16)
# sock.getsockopt(SOL_IPV6, SO_ORIGINAL_DST, 28)
#
# In practice, it is a bit more complex:
# 1. We cannot rely on sock.family to decide which syscall to use because of IPv4-mapped
# IPv6 addresses. If sock.family is AF_INET6 while sock.getsockname() is ::ffff:127.0.0.1,
# we need to call the IPv4 version to get a result.
# 2. We can't just try the IPv4 syscall and then do IPv6 if that doesn't work,
# because doing the wrong syscall can apparently crash the whole Python runtime.
# As such, we use a heuristic to check which syscall to do.
is_ipv4 = "." in csock.getsockname()[0] # either 127.0.0.1 or ::ffff:127.0.0.1
if is_ipv4:
# the struct returned here should only have 8 bytes, but invoking sock.getsockopt
# with buflen=8 doesn't work.
dst = csock.getsockopt(socket.SOL_IP, SO_ORIGINAL_DST, 16)
port, raw_ip = struct.unpack_from("!2xH4s", dst)
ip = socket.inet_ntop(socket.AF_INET, raw_ip)
else:
dst = csock.getsockopt(SOL_IPV6, SO_ORIGINAL_DST, 28)
port, raw_ip = struct.unpack_from("!2xH4x16s", dst)
ip = socket.inet_ntop(socket.AF_INET6, raw_ip)
return ip, port
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/platform/osx.py | mitmproxy/platform/osx.py | import subprocess
from . import pf
"""
Doing this the "right" way by using DIOCNATLOOK on the pf device turns out
to be a pain. Apple has made a number of modifications to the data
structures returned, and compiling userspace tools to test and work with
this turns out to be a pain in the ass. Parsing pfctl output is short,
simple, and works.
Note: Also Tested with FreeBSD 10 pkgng Python 2.7.x.
Should work almost exactly as on Mac OS X and except with some changes to
the output processing of pfctl (see pf.py).
"""
STATECMD = ("sudo", "-n", "/sbin/pfctl", "-s", "state")
def original_addr(csock):
peer = csock.getpeername()
try:
stxt = subprocess.check_output(STATECMD, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
if "sudo: a password is required" in e.output.decode(errors="replace"):
insufficient_priv = True
else:
raise RuntimeError("Error getting pfctl state: " + repr(e))
else:
insufficient_priv = "sudo: a password is required" in stxt.decode(
errors="replace"
)
if insufficient_priv:
raise RuntimeError(
"Insufficient privileges to access pfctl. "
"See https://mitmproxy.org/docs/latest/howto-transparent/#macos for details."
)
return pf.lookup(peer[0], peer[1], stxt)
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/platform/windows.py | mitmproxy/platform/windows.py | from __future__ import annotations
import collections.abc
import contextlib
import ctypes.wintypes
import json
import logging
import os
import re
import socket
import socketserver
import threading
import time
from collections.abc import Callable
from io import BufferedIOBase
from typing import Any
from typing import cast
from typing import ClassVar
import pydivert.consts
from mitmproxy.net.local_ip import get_local_ip
from mitmproxy.net.local_ip import get_local_ip6
REDIRECT_API_HOST = "127.0.0.1"
REDIRECT_API_PORT = 8085
logger = logging.getLogger(__name__)
##########################
# Resolver
def read(rfile: BufferedIOBase) -> Any:
x = rfile.readline().strip()
if not x:
return None
return json.loads(x)
def write(data, wfile: BufferedIOBase) -> None:
wfile.write(json.dumps(data).encode() + b"\n")
wfile.flush()
class Resolver:
sock: socket.socket | None
lock: threading.RLock
def __init__(self):
self.sock = None
self.lock = threading.RLock()
def setup(self):
with self.lock:
TransparentProxy.setup()
self._connect()
def _connect(self):
if self.sock:
self.sock.close()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((REDIRECT_API_HOST, REDIRECT_API_PORT))
self.wfile = self.sock.makefile("wb")
self.rfile = self.sock.makefile("rb")
write(os.getpid(), self.wfile)
def original_addr(self, csock: socket.socket):
ip, port = csock.getpeername()[:2]
ip = re.sub(r"^::ffff:(?=\d+.\d+.\d+.\d+$)", "", ip)
ip = ip.split("%", 1)[0]
with self.lock:
try:
write((ip, port), self.wfile)
addr = read(self.rfile)
if addr is None:
raise RuntimeError("Cannot resolve original destination.")
return tuple(addr)
except (EOFError, OSError, AttributeError):
self._connect()
return self.original_addr(csock)
class APIRequestHandler(socketserver.StreamRequestHandler):
"""
TransparentProxy API: Returns the pickled server address, port tuple
for each received pickled client address, port tuple.
"""
server: APIServer
def handle(self) -> None:
proxifier: TransparentProxy = self.server.proxifier
try:
pid: int = read(self.rfile)
if pid is None:
return
with proxifier.exempt(pid):
while True:
c = read(self.rfile)
if c is None:
return
try:
server = proxifier.client_server_map[
cast(tuple[str, int], tuple(c))
]
except KeyError:
server = None
write(server, self.wfile)
except (EOFError, OSError):
pass
class APIServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
def __init__(self, proxifier, *args, **kwargs):
super().__init__(*args, **kwargs)
self.proxifier = proxifier
self.daemon_threads = True
##########################
# Windows API
# from Windows' error.h
ERROR_INSUFFICIENT_BUFFER = 0x7A
IN6_ADDR = ctypes.c_ubyte * 16
IN4_ADDR = ctypes.c_ubyte * 4
#
# IPv6
#
# https://msdn.microsoft.com/en-us/library/windows/desktop/aa366896(v=vs.85).aspx
class MIB_TCP6ROW_OWNER_PID(ctypes.Structure):
_fields_ = [
("ucLocalAddr", IN6_ADDR),
("dwLocalScopeId", ctypes.wintypes.DWORD),
("dwLocalPort", ctypes.wintypes.DWORD),
("ucRemoteAddr", IN6_ADDR),
("dwRemoteScopeId", ctypes.wintypes.DWORD),
("dwRemotePort", ctypes.wintypes.DWORD),
("dwState", ctypes.wintypes.DWORD),
("dwOwningPid", ctypes.wintypes.DWORD),
]
# https://msdn.microsoft.com/en-us/library/windows/desktop/aa366905(v=vs.85).aspx
def MIB_TCP6TABLE_OWNER_PID(size):
class _MIB_TCP6TABLE_OWNER_PID(ctypes.Structure):
_fields_ = [
("dwNumEntries", ctypes.wintypes.DWORD),
("table", MIB_TCP6ROW_OWNER_PID * size),
]
return _MIB_TCP6TABLE_OWNER_PID()
#
# IPv4
#
# https://msdn.microsoft.com/en-us/library/windows/desktop/aa366913(v=vs.85).aspx
class MIB_TCPROW_OWNER_PID(ctypes.Structure):
_fields_ = [
("dwState", ctypes.wintypes.DWORD),
("ucLocalAddr", IN4_ADDR),
("dwLocalPort", ctypes.wintypes.DWORD),
("ucRemoteAddr", IN4_ADDR),
("dwRemotePort", ctypes.wintypes.DWORD),
("dwOwningPid", ctypes.wintypes.DWORD),
]
# https://msdn.microsoft.com/en-us/library/windows/desktop/aa366921(v=vs.85).aspx
def MIB_TCPTABLE_OWNER_PID(size):
class _MIB_TCPTABLE_OWNER_PID(ctypes.Structure):
_fields_ = [
("dwNumEntries", ctypes.wintypes.DWORD),
("table", MIB_TCPROW_OWNER_PID * size),
]
return _MIB_TCPTABLE_OWNER_PID()
TCP_TABLE_OWNER_PID_CONNECTIONS = 4
class TcpConnectionTable(collections.abc.Mapping):
DEFAULT_TABLE_SIZE = 4096
def __init__(self):
self._tcp = MIB_TCPTABLE_OWNER_PID(self.DEFAULT_TABLE_SIZE)
self._tcp_size = ctypes.wintypes.DWORD(self.DEFAULT_TABLE_SIZE)
self._tcp6 = MIB_TCP6TABLE_OWNER_PID(self.DEFAULT_TABLE_SIZE)
self._tcp6_size = ctypes.wintypes.DWORD(self.DEFAULT_TABLE_SIZE)
self._map = {}
def __getitem__(self, item):
return self._map[item]
def __iter__(self):
return self._map.__iter__()
def __len__(self):
return self._map.__len__()
def refresh(self):
self._map = {}
self._refresh_ipv4()
self._refresh_ipv6()
def _refresh_ipv4(self):
ret = ctypes.windll.iphlpapi.GetExtendedTcpTable( # type: ignore
ctypes.byref(self._tcp),
ctypes.byref(self._tcp_size),
False,
socket.AF_INET,
TCP_TABLE_OWNER_PID_CONNECTIONS,
0,
)
if ret == 0:
for row in self._tcp.table[: self._tcp.dwNumEntries]:
local_ip = socket.inet_ntop(socket.AF_INET, bytes(row.ucLocalAddr))
local_port = socket.htons(row.dwLocalPort)
self._map[(local_ip, local_port)] = row.dwOwningPid
elif ret == ERROR_INSUFFICIENT_BUFFER:
self._tcp = MIB_TCPTABLE_OWNER_PID(self._tcp_size.value)
# no need to update size, that's already done.
self._refresh_ipv4()
else:
raise RuntimeError(
"[IPv4] Unknown GetExtendedTcpTable return code: %s" % ret
)
def _refresh_ipv6(self):
ret = ctypes.windll.iphlpapi.GetExtendedTcpTable( # type: ignore
ctypes.byref(self._tcp6),
ctypes.byref(self._tcp6_size),
False,
socket.AF_INET6,
TCP_TABLE_OWNER_PID_CONNECTIONS,
0,
)
if ret == 0:
for row in self._tcp6.table[: self._tcp6.dwNumEntries]:
local_ip = socket.inet_ntop(socket.AF_INET6, bytes(row.ucLocalAddr))
local_port = socket.htons(row.dwLocalPort)
self._map[(local_ip, local_port)] = row.dwOwningPid
elif ret == ERROR_INSUFFICIENT_BUFFER:
self._tcp6 = MIB_TCP6TABLE_OWNER_PID(self._tcp6_size.value)
# no need to update size, that's already done.
self._refresh_ipv6()
else:
raise RuntimeError(
"[IPv6] Unknown GetExtendedTcpTable return code: %s" % ret
)
class Redirect(threading.Thread):
daemon = True
windivert: pydivert.WinDivert
def __init__(
self,
handle: Callable[[pydivert.Packet], None],
filter: str,
layer: pydivert.Layer = pydivert.Layer.NETWORK,
flags: pydivert.Flag = 0,
) -> None:
self.handle = handle
self.windivert = pydivert.WinDivert(filter, layer, flags=flags)
super().__init__()
def start(self):
self.windivert.open()
super().start()
def run(self):
while True:
try:
packet = self.windivert.recv()
except OSError as e:
if getattr(e, "winerror", None) == 995:
return
else:
raise
else:
self.handle(packet)
def shutdown(self):
self.windivert.close()
def recv(self) -> pydivert.Packet | None:
"""
Convenience function that receives a packet from the passed handler and handles error codes.
If the process has been shut down, None is returned.
"""
try:
return self.windivert.recv()
except OSError as e:
if e.winerror == 995: # type: ignore
return None
else:
raise
class RedirectLocal(Redirect):
trusted_pids: set[int]
def __init__(
self, redirect_request: Callable[[pydivert.Packet], None], filter: str
) -> None:
self.tcp_connections = TcpConnectionTable()
self.trusted_pids = set()
self.redirect_request = redirect_request
super().__init__(self.handle, filter)
def handle(self, packet):
client = (packet.src_addr, packet.src_port)
if client not in self.tcp_connections:
self.tcp_connections.refresh()
# If this fails, we most likely have a connection from an external client.
# In this, case we always want to proxy the request.
pid = self.tcp_connections.get(client, None)
if pid not in self.trusted_pids:
self.redirect_request(packet)
else:
# It's not really clear why we need to recalculate the checksum here,
# but this was identified as necessary in https://github.com/mitmproxy/mitmproxy/pull/3174.
self.windivert.send(packet, recalculate_checksum=True)
TConnection = tuple[str, int]
class ClientServerMap:
"""A thread-safe LRU dict."""
connection_cache_size: ClassVar[int] = 65536
def __init__(self):
self._lock = threading.Lock()
self._map = collections.OrderedDict()
def __getitem__(self, item: TConnection) -> TConnection:
with self._lock:
return self._map[item]
def __setitem__(self, key: TConnection, value: TConnection) -> None:
with self._lock:
self._map[key] = value
self._map.move_to_end(key)
while len(self._map) > self.connection_cache_size:
self._map.popitem(False)
class TransparentProxy:
"""
Transparent Windows Proxy for mitmproxy based on WinDivert/PyDivert. This module can be used to
redirect both traffic that is forwarded by the host and traffic originating from the host itself.
Requires elevated (admin) privileges. Can be started separately by manually running the file.
How it works:
(1) First, we intercept all packages that match our filter.
We both consider traffic that is forwarded by the OS (WinDivert's NETWORK_FORWARD layer) as well
as traffic sent from the local machine (WinDivert's NETWORK layer). In the case of traffic from
the local machine, we need to exempt packets sent from the proxy to not create a redirect loop.
To accomplish this, we use Windows' GetExtendedTcpTable syscall and determine the source
application's PID.
For each intercepted package, we
1. Store the source -> destination mapping (address and port)
2. Remove the package from the network (by not reinjecting it).
3. Re-inject the package into the local network stack, but with the destination address
changed to the proxy.
(2) Next, the proxy receives the forwarded packet, but does not know the real destination yet
(which we overwrote with the proxy's address). On Linux, we would now call
getsockopt(SO_ORIGINAL_DST). We now access the redirect module's API (see APIRequestHandler),
submit the source information and get the actual destination back (which we stored in 1.1).
(3) The proxy now establishes the upstream connection as usual.
(4) Finally, the proxy sends the response back to the client. To make it work, we need to change
the packet's source address back to the original destination (using the mapping from 1.1),
to which the client believes it is talking to.
Limitations:
- We assume that ephemeral TCP ports are not re-used for multiple connections at the same time.
The proxy will fail if an application connects to example.com and example.org from
192.168.0.42:4242 simultaneously. This could be mitigated by introducing unique "meta-addresses"
which mitmproxy sees, but this would remove the correct client info from mitmproxy.
"""
local: RedirectLocal | None = None
# really weird linting error here.
forward: Redirect | None = None
response: Redirect
icmp: Redirect
proxy_port: int
filter: str
client_server_map: ClientServerMap
def __init__(
self,
local: bool = True,
forward: bool = True,
proxy_port: int = 8080,
filter: str | None = "tcp.DstPort == 80 or tcp.DstPort == 443",
) -> None:
self.proxy_port = proxy_port
self.filter = (
filter
or f"tcp.DstPort != {proxy_port} and tcp.DstPort != {REDIRECT_API_PORT} and tcp.DstPort < 49152"
)
self.ipv4_address = get_local_ip()
self.ipv6_address = get_local_ip6()
# print(f"IPv4: {self.ipv4_address}, IPv6: {self.ipv6_address}")
self.client_server_map = ClientServerMap()
self.api = APIServer(
self, (REDIRECT_API_HOST, REDIRECT_API_PORT), APIRequestHandler
)
self.api_thread = threading.Thread(target=self.api.serve_forever)
self.api_thread.daemon = True
if forward:
self.forward = Redirect(
self.redirect_request, self.filter, pydivert.Layer.NETWORK_FORWARD
)
if local:
self.local = RedirectLocal(self.redirect_request, self.filter)
# The proxy server responds to the client. To the client,
# this response should look like it has been sent by the real target
self.response = Redirect(
self.redirect_response,
f"outbound and tcp.SrcPort == {proxy_port}",
)
# Block all ICMP requests (which are sent on Windows by default).
# If we don't do this, our proxy machine may send an ICMP redirect to the client,
# which instructs the client to directly connect to the real gateway
# if they are on the same network.
self.icmp = Redirect(lambda _: None, "icmp", flags=pydivert.Flag.DROP)
@classmethod
def setup(cls):
# TODO: Make sure that server can be killed cleanly. That's a bit difficult as we don't have access to
# controller.should_exit when this is called.
logger.warning(
"Transparent mode on Windows is unsupported, flaky, and deprecated. "
"Consider using local redirect mode or WireGuard mode instead."
)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_unavailable = s.connect_ex((REDIRECT_API_HOST, REDIRECT_API_PORT))
if server_unavailable:
proxifier = TransparentProxy()
proxifier.start()
def start(self):
self.api_thread.start()
self.icmp.start()
self.response.start()
if self.forward:
self.forward.start()
if self.local:
self.local.start()
def shutdown(self):
if self.local:
self.local.shutdown()
if self.forward:
self.forward.shutdown()
self.response.shutdown()
self.icmp.shutdown()
self.api.shutdown()
def redirect_request(self, packet: pydivert.Packet):
# print(" * Redirect client -> server to proxy")
# print(f"{packet.src_addr}:{packet.src_port} -> {packet.dst_addr}:{packet.dst_port}")
client = (packet.src_addr, packet.src_port)
self.client_server_map[client] = (packet.dst_addr, packet.dst_port)
# We do need to inject to an external IP here, 127.0.0.1 does not work.
if packet.address_family == socket.AF_INET:
assert self.ipv4_address
packet.dst_addr = self.ipv4_address
elif packet.address_family == socket.AF_INET6:
if not self.ipv6_address:
self.ipv6_address = get_local_ip6(packet.src_addr)
assert self.ipv6_address
packet.dst_addr = self.ipv6_address
else:
raise RuntimeError("Unknown address family")
packet.dst_port = self.proxy_port
packet.direction = pydivert.consts.Direction.INBOUND
# We need a handle on the NETWORK layer. the local handle is not guaranteed to exist,
# so we use the response handle.
self.response.windivert.send(packet)
def redirect_response(self, packet: pydivert.Packet):
"""
If the proxy responds to the client, let the client believe the target server sent the
packets.
"""
# print(" * Adjust proxy -> client")
client = (packet.dst_addr, packet.dst_port)
try:
packet.src_addr, packet.src_port = self.client_server_map[client]
except KeyError:
print(f"Warning: Previously unseen connection from proxy to {client}")
else:
packet.recalculate_checksums()
self.response.windivert.send(packet, recalculate_checksum=False)
@contextlib.contextmanager
def exempt(self, pid: int):
if self.local:
self.local.trusted_pids.add(pid)
try:
yield
finally:
if self.local:
self.local.trusted_pids.remove(pid)
if __name__ == "__main__":
import click
@click.group()
def cli():
pass
@cli.command()
@click.option(
"--local/--no-local", default=True, help="Redirect the host's own traffic."
)
@click.option(
"--forward/--no-forward",
default=True,
help="Redirect traffic that's forwarded by the host.",
)
@click.option(
"--filter",
type=str,
metavar="WINDIVERT_FILTER",
help="Custom WinDivert interception rule.",
)
@click.option(
"-p",
"--proxy-port",
type=int,
metavar="8080",
default=8080,
help="The port mitmproxy is listening on.",
)
def redirect(**options):
"""Redirect flows to mitmproxy."""
proxy = TransparentProxy(**options)
proxy.start()
print(f" * Redirection active.")
print(f" Filter: {proxy.filter}")
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
print(" * Shutting down...")
proxy.shutdown()
print(" * Shut down.")
@cli.command()
def connections():
"""List all TCP connections and the associated PIDs."""
connections = TcpConnectionTable()
connections.refresh()
for (ip, port), pid in connections.items():
print(f"{ip}:{port} -> {pid}")
cli()
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/platform/openbsd.py | mitmproxy/platform/openbsd.py | def original_addr(csock):
return csock.getsockname()
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/platform/__init__.py | mitmproxy/platform/__init__.py | import re
import socket
import sys
from collections.abc import Callable
def init_transparent_mode() -> None:
"""
Initialize transparent mode.
"""
original_addr: Callable[[socket.socket], tuple[str, int]] | None
"""
Get the original destination for the given socket.
This function will be None if transparent mode is not supported.
"""
if re.match(r"linux(?:2)?", sys.platform):
from . import linux
original_addr = linux.original_addr
elif sys.platform == "darwin" or sys.platform.startswith("freebsd"):
from . import osx
original_addr = osx.original_addr
elif sys.platform.startswith("openbsd"):
from . import openbsd
original_addr = openbsd.original_addr
elif sys.platform == "win32":
from . import windows
resolver = windows.Resolver()
init_transparent_mode = resolver.setup # noqa
original_addr = resolver.original_addr
else:
original_addr = None
__all__ = ["original_addr", "init_transparent_mode"]
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/tests/test_backend_docling_json.py | tests/test_backend_docling_json.py | """Test methods in module docling.backend.json.docling_json_backend.py."""
from io import BytesIO
from pathlib import Path
import pytest
from pydantic import ValidationError
from docling.backend.json.docling_json_backend import DoclingJSONBackend
from docling.datamodel.base_models import InputFormat
from docling.datamodel.document import DoclingDocument, InputDocument
GT_PATH: Path = Path("./tests/data/groundtruth/docling_v2/2206.01062.json")
def test_convert_valid_docling_json():
"""Test ingestion of valid Docling JSON."""
cls = DoclingJSONBackend
path_or_stream = GT_PATH
in_doc = InputDocument(
path_or_stream=path_or_stream,
format=InputFormat.JSON_DOCLING,
backend=cls,
)
backend = cls(
in_doc=in_doc,
path_or_stream=path_or_stream,
)
assert backend.is_valid()
act_doc = backend.convert()
act_data = act_doc.export_to_dict()
exp_doc = DoclingDocument.load_from_json(GT_PATH)
exp_data = exp_doc.export_to_dict()
assert act_data == exp_data
def test_invalid_docling_json():
"""Test ingestion of invalid Docling JSON."""
cls = DoclingJSONBackend
path_or_stream = BytesIO(b"{}")
in_doc = InputDocument(
path_or_stream=path_or_stream,
format=InputFormat.JSON_DOCLING,
backend=cls,
filename="foo",
)
backend = cls(
in_doc=in_doc,
path_or_stream=path_or_stream,
)
assert not backend.is_valid()
with pytest.raises(ValidationError):
backend.convert()
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/tests/test_backend_vtt.py | tests/test_backend_vtt.py | # Assisted by watsonx Code Assistant
from pathlib import Path
import pytest
from docling_core.types.doc import DoclingDocument
from pydantic import ValidationError
from docling.backend.webvtt_backend import (
_WebVTTCueItalicSpan,
_WebVTTCueTextSpan,
_WebVTTCueTimings,
_WebVTTCueVoiceSpan,
_WebVTTFile,
_WebVTTTimestamp,
)
from docling.datamodel.base_models import InputFormat
from docling.datamodel.document import ConversionResult
from docling.document_converter import DocumentConverter
from .test_data_gen_flag import GEN_TEST_DATA
from .verify_utils import verify_document, verify_export
GENERATE = GEN_TEST_DATA
def test_vtt_cue_commponents():
"""Test WebVTT components."""
valid_timestamps = [
"00:01:02.345",
"12:34:56.789",
"02:34.567",
"00:00:00.000",
]
valid_total_seconds = [
1 * 60 + 2.345,
12 * 3600 + 34 * 60 + 56.789,
2 * 60 + 34.567,
0.0,
]
for idx, ts in enumerate(valid_timestamps):
model = _WebVTTTimestamp(raw=ts)
assert model.seconds == valid_total_seconds[idx]
"""Test invalid WebVTT timestamps."""
invalid_timestamps = [
"00:60:02.345", # minutes > 59
"00:01:60.345", # seconds > 59
"00:01:02.1000", # milliseconds > 999
"01:02:03", # missing milliseconds
"01:02", # missing milliseconds
":01:02.345", # extra : for missing hours
"abc:01:02.345", # invalid format
]
for ts in invalid_timestamps:
with pytest.raises(ValidationError):
_WebVTTTimestamp(raw=ts)
"""Test the timestamp __str__ method."""
model = _WebVTTTimestamp(raw="00:01:02.345")
assert str(model) == "00:01:02.345"
"""Test valid cue timings."""
start = _WebVTTTimestamp(raw="00:10.005")
end = _WebVTTTimestamp(raw="00:14.007")
cue_timings = _WebVTTCueTimings(start=start, end=end)
assert cue_timings.start == start
assert cue_timings.end == end
assert str(cue_timings) == "00:10.005 --> 00:14.007"
"""Test invalid cue timings with end timestamp before start."""
start = _WebVTTTimestamp(raw="00:10.700")
end = _WebVTTTimestamp(raw="00:10.500")
with pytest.raises(ValidationError) as excinfo:
_WebVTTCueTimings(start=start, end=end)
assert "End timestamp must be greater than start timestamp" in str(excinfo.value)
"""Test invalid cue timings with missing end."""
start = _WebVTTTimestamp(raw="00:10.500")
with pytest.raises(ValidationError) as excinfo:
_WebVTTCueTimings(start=start)
assert "Field required" in str(excinfo.value)
"""Test invalid cue timings with missing start."""
end = _WebVTTTimestamp(raw="00:10.500")
with pytest.raises(ValidationError) as excinfo:
_WebVTTCueTimings(end=end)
assert "Field required" in str(excinfo.value)
"""Test with valid text."""
valid_text = "This is a valid cue text span."
span = _WebVTTCueTextSpan(text=valid_text)
assert span.text == valid_text
assert str(span) == valid_text
"""Test with text containing newline characters."""
invalid_text = "This cue text span\ncontains a newline."
with pytest.raises(ValidationError):
_WebVTTCueTextSpan(text=invalid_text)
"""Test with text containing ampersand."""
invalid_text = "This cue text span contains &."
with pytest.raises(ValidationError):
_WebVTTCueTextSpan(text=invalid_text)
"""Test with text containing less-than sign."""
invalid_text = "This cue text span contains <."
with pytest.raises(ValidationError):
_WebVTTCueTextSpan(text=invalid_text)
"""Test with empty text."""
with pytest.raises(ValidationError):
_WebVTTCueTextSpan(text="")
"""Test that annotation validation works correctly."""
valid_annotation = "valid-annotation"
invalid_annotation = "invalid\nannotation"
with pytest.raises(ValidationError):
_WebVTTCueVoiceSpan(annotation=invalid_annotation)
assert _WebVTTCueVoiceSpan(annotation=valid_annotation)
"""Test that classes validation works correctly."""
annotation = "speaker name"
valid_classes = ["class1", "class2"]
invalid_classes = ["class\nwith\nnewlines", ""]
with pytest.raises(ValidationError):
_WebVTTCueVoiceSpan(annotation=annotation, classes=invalid_classes)
assert _WebVTTCueVoiceSpan(annotation=annotation, classes=valid_classes)
"""Test that components validation works correctly."""
annotation = "speaker name"
valid_components = [_WebVTTCueTextSpan(text="random text")]
invalid_components = [123, "not a component"]
with pytest.raises(ValidationError):
_WebVTTCueVoiceSpan(annotation=annotation, components=invalid_components)
assert _WebVTTCueVoiceSpan(annotation=annotation, components=valid_components)
"""Test valid cue voice spans."""
cue_span = _WebVTTCueVoiceSpan(
annotation="speaker",
classes=["loud", "clear"],
components=[_WebVTTCueTextSpan(text="random text")],
)
expected_str = "<v.loud.clear speaker>random text</v>"
assert str(cue_span) == expected_str
cue_span = _WebVTTCueVoiceSpan(
annotation="speaker",
components=[_WebVTTCueTextSpan(text="random text")],
)
expected_str = "<v speaker>random text</v>"
assert str(cue_span) == expected_str
def test_webvtt_file():
"""Test WebVTT files."""
with open("./tests/data/webvtt/webvtt_example_01.vtt", encoding="utf-8") as f:
content = f.read()
vtt = _WebVTTFile.parse(content)
assert len(vtt) == 13
block = vtt.cue_blocks[11]
assert str(block.timings) == "00:32.500 --> 00:33.500"
assert len(block.payload) == 1
cue_span = block.payload[0]
assert isinstance(cue_span, _WebVTTCueVoiceSpan)
assert cue_span.annotation == "Neil deGrasse Tyson"
assert not cue_span.classes
assert len(cue_span.components) == 1
comp = cue_span.components[0]
assert isinstance(comp, _WebVTTCueItalicSpan)
assert len(comp.components) == 1
comp2 = comp.components[0]
assert isinstance(comp2, _WebVTTCueTextSpan)
assert comp2.text == "Laughs"
with open("./tests/data/webvtt/webvtt_example_02.vtt", encoding="utf-8") as f:
content = f.read()
vtt = _WebVTTFile.parse(content)
assert len(vtt) == 4
reverse = (
"WEBVTT\n\nNOTE Copyright © 2019 World Wide Web Consortium. "
"https://www.w3.org/TR/webvtt1/\n\n"
)
reverse += "\n\n".join([str(block) for block in vtt.cue_blocks])
assert content == reverse
with open("./tests/data/webvtt/webvtt_example_03.vtt", encoding="utf-8") as f:
content = f.read()
vtt = _WebVTTFile.parse(content)
assert len(vtt) == 13
for block in vtt:
assert block.identifier
block = vtt.cue_blocks[0]
assert block.identifier == "62357a1d-d250-41d5-a1cf-6cc0eeceffcc/15-0"
assert str(block.timings) == "00:00:04.963 --> 00:00:08.571"
assert len(block.payload) == 1
assert isinstance(block.payload[0], _WebVTTCueVoiceSpan)
block = vtt.cue_blocks[2]
assert isinstance(cue_span, _WebVTTCueVoiceSpan)
assert block.identifier == "62357a1d-d250-41d5-a1cf-6cc0eeceffcc/16-0"
assert str(block.timings) == "00:00:10.683 --> 00:00:11.563"
assert len(block.payload) == 1
assert isinstance(block.payload[0], _WebVTTCueTextSpan)
assert block.payload[0].text == "Good."
def test_e2e_vtt_conversions():
directory = Path("./tests/data/webvtt/")
vtt_paths = sorted(directory.rglob("*.vtt"))
converter = DocumentConverter(allowed_formats=[InputFormat.VTT])
for vtt in vtt_paths:
gt_path = vtt.parent.parent / "groundtruth" / "docling_v2" / vtt.name
conv_result: ConversionResult = converter.convert(vtt)
doc: DoclingDocument = conv_result.document
pred_md: str = doc.export_to_markdown(escape_html=False)
assert verify_export(pred_md, str(gt_path) + ".md", generate=GENERATE), (
"export to md"
)
pred_itxt: str = doc._export_to_indented_text(
max_text_len=70, explicit_tables=False
)
assert verify_export(pred_itxt, str(gt_path) + ".itxt", generate=GENERATE), (
"export to indented-text"
)
assert verify_document(doc, str(gt_path) + ".json", GENERATE)
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/tests/test_invalid_input.py | tests/test_invalid_input.py | from io import BytesIO
from pathlib import Path
import pytest
from docling.datamodel.base_models import ConversionStatus, DocumentStream
from docling.document_converter import ConversionError, DocumentConverter
def get_pdf_path():
pdf_path = Path("./tests/data/pdf/2305.03393v1-pg9.pdf")
return pdf_path
@pytest.fixture
def converter():
converter = DocumentConverter()
return converter
def test_convert_unsupported_doc_format_wout_exception(converter: DocumentConverter):
result = converter.convert(
DocumentStream(name="input.xyz", stream=BytesIO(b"xyz")), raises_on_error=False
)
assert result.status == ConversionStatus.SKIPPED
def test_convert_unsupported_doc_format_with_exception(converter: DocumentConverter):
with pytest.raises(ConversionError):
converter.convert(
DocumentStream(name="input.xyz", stream=BytesIO(b"xyz")),
raises_on_error=True,
)
def test_convert_too_small_filesize_limit_wout_exception(converter: DocumentConverter):
result = converter.convert(get_pdf_path(), max_file_size=1, raises_on_error=False)
assert result.status == ConversionStatus.FAILURE
def test_convert_too_small_filesize_limit_with_exception(converter: DocumentConverter):
with pytest.raises(ConversionError):
converter.convert(get_pdf_path(), max_file_size=1, raises_on_error=True)
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/tests/test_asr_mlx_whisper.py | tests/test_asr_mlx_whisper.py | """
Test MLX Whisper integration for Apple Silicon ASR pipeline.
"""
import sys
from pathlib import Path
from unittest.mock import Mock, patch
import pytest
from docling.datamodel.accelerator_options import AcceleratorDevice, AcceleratorOptions
from docling.datamodel.asr_model_specs import (
WHISPER_BASE,
WHISPER_BASE_MLX,
WHISPER_LARGE,
WHISPER_LARGE_MLX,
WHISPER_MEDIUM,
WHISPER_SMALL,
WHISPER_TINY,
WHISPER_TURBO,
)
from docling.datamodel.pipeline_options import AsrPipelineOptions
from docling.datamodel.pipeline_options_asr_model import (
InferenceAsrFramework,
InlineAsrMlxWhisperOptions,
)
from docling.pipeline.asr_pipeline import AsrPipeline, _MlxWhisperModel
class TestMlxWhisperIntegration:
"""Test MLX Whisper model integration."""
def test_mlx_whisper_options_creation(self):
"""Test that MLX Whisper options are created correctly."""
options = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-tiny-mlx",
language="en",
task="transcribe",
)
assert options.inference_framework == InferenceAsrFramework.MLX
assert options.repo_id == "mlx-community/whisper-tiny-mlx"
assert options.language == "en"
assert options.task == "transcribe"
assert options.word_timestamps is True
assert AcceleratorDevice.MPS in options.supported_devices
def test_whisper_models_auto_select_mlx(self):
"""Test that Whisper models automatically select MLX when MPS and mlx-whisper are available."""
# This test verifies that the models are correctly configured
# In a real Apple Silicon environment with mlx-whisper installed,
# these models would automatically use MLX
# Check that the models exist and have the correct structure
assert hasattr(WHISPER_TURBO, "inference_framework")
assert hasattr(WHISPER_TURBO, "repo_id")
assert hasattr(WHISPER_BASE, "inference_framework")
assert hasattr(WHISPER_BASE, "repo_id")
assert hasattr(WHISPER_SMALL, "inference_framework")
assert hasattr(WHISPER_SMALL, "repo_id")
def test_explicit_mlx_models_shape(self):
"""Explicit MLX options should have MLX framework and valid repos."""
assert WHISPER_BASE_MLX.inference_framework.name == "MLX"
assert WHISPER_LARGE_MLX.inference_framework.name == "MLX"
assert WHISPER_BASE_MLX.repo_id.startswith("mlx-community/")
def test_model_selectors_mlx_and_native_paths(self, monkeypatch):
"""Cover MLX/native selection branches in asr_model_specs getters."""
from docling.datamodel import asr_model_specs as specs
# Force MLX path
class _Mps:
def is_built(self):
return True
def is_available(self):
return True
class _Torch:
class backends:
mps = _Mps()
monkeypatch.setitem(sys.modules, "torch", _Torch())
monkeypatch.setitem(sys.modules, "mlx_whisper", object())
m_tiny = specs._get_whisper_tiny_model()
m_small = specs._get_whisper_small_model()
m_base = specs._get_whisper_base_model()
m_medium = specs._get_whisper_medium_model()
m_large = specs._get_whisper_large_model()
m_turbo = specs._get_whisper_turbo_model()
assert (
m_tiny.inference_framework == InferenceAsrFramework.MLX
and m_tiny.repo_id.startswith("mlx-community/whisper-tiny")
)
assert (
m_small.inference_framework == InferenceAsrFramework.MLX
and m_small.repo_id.startswith("mlx-community/whisper-small")
)
assert (
m_base.inference_framework == InferenceAsrFramework.MLX
and m_base.repo_id.startswith("mlx-community/whisper-base")
)
assert (
m_medium.inference_framework == InferenceAsrFramework.MLX
and "medium" in m_medium.repo_id
)
assert (
m_large.inference_framework == InferenceAsrFramework.MLX
and "large" in m_large.repo_id
)
assert (
m_turbo.inference_framework == InferenceAsrFramework.MLX
and m_turbo.repo_id.endswith("whisper-turbo")
)
# Force native path (no mlx or no mps)
if "mlx_whisper" in sys.modules:
del sys.modules["mlx_whisper"]
class _MpsOff:
def is_built(self):
return False
def is_available(self):
return False
class _TorchOff:
class backends:
mps = _MpsOff()
monkeypatch.setitem(sys.modules, "torch", _TorchOff())
n_tiny = specs._get_whisper_tiny_model()
n_small = specs._get_whisper_small_model()
n_base = specs._get_whisper_base_model()
n_medium = specs._get_whisper_medium_model()
n_large = specs._get_whisper_large_model()
n_turbo = specs._get_whisper_turbo_model()
assert (
n_tiny.inference_framework == InferenceAsrFramework.WHISPER
and n_tiny.repo_id == "tiny"
)
assert (
n_small.inference_framework == InferenceAsrFramework.WHISPER
and n_small.repo_id == "small"
)
assert (
n_base.inference_framework == InferenceAsrFramework.WHISPER
and n_base.repo_id == "base"
)
assert (
n_medium.inference_framework == InferenceAsrFramework.WHISPER
and n_medium.repo_id == "medium"
)
assert (
n_large.inference_framework == InferenceAsrFramework.WHISPER
and n_large.repo_id == "large"
)
assert (
n_turbo.inference_framework == InferenceAsrFramework.WHISPER
and n_turbo.repo_id == "turbo"
)
def test_selector_import_errors_force_native(self, monkeypatch):
"""If torch import fails, selector must return native."""
from docling.datamodel import asr_model_specs as specs
# Simulate environment where MPS is unavailable and mlx_whisper missing
class _MpsOff:
def is_built(self):
return False
def is_available(self):
return False
class _TorchOff:
class backends:
mps = _MpsOff()
monkeypatch.setitem(sys.modules, "torch", _TorchOff())
if "mlx_whisper" in sys.modules:
del sys.modules["mlx_whisper"]
model = specs._get_whisper_base_model()
assert model.inference_framework == InferenceAsrFramework.WHISPER
@patch("builtins.__import__")
def test_mlx_whisper_model_initialization(self, mock_import):
"""Test MLX Whisper model initialization."""
# Mock the mlx_whisper import
mock_mlx_whisper = Mock()
mock_import.return_value = mock_mlx_whisper
accelerator_options = AcceleratorOptions(device=AcceleratorDevice.MPS)
asr_options = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-tiny-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
model = _MlxWhisperModel(
enabled=True,
artifacts_path=None,
accelerator_options=accelerator_options,
asr_options=asr_options,
)
assert model.enabled is True
assert model.model_path == "mlx-community/whisper-tiny-mlx"
assert model.language == "en"
assert model.task == "transcribe"
assert model.word_timestamps is True
def test_mlx_whisper_model_import_error(self):
"""Test that ImportError is raised when mlx-whisper is not available."""
accelerator_options = AcceleratorOptions(device=AcceleratorDevice.MPS)
asr_options = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-tiny-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
with patch(
"builtins.__import__",
side_effect=ImportError("No module named 'mlx_whisper'"),
):
with pytest.raises(ImportError, match="mlx-whisper is not installed"):
_MlxWhisperModel(
enabled=True,
artifacts_path=None,
accelerator_options=accelerator_options,
asr_options=asr_options,
)
@patch("builtins.__import__")
def test_mlx_whisper_transcribe(self, mock_import):
"""Test MLX Whisper transcription method."""
# Mock the mlx_whisper module and its transcribe function
mock_mlx_whisper = Mock()
mock_import.return_value = mock_mlx_whisper
# Mock the transcribe result
mock_result = {
"segments": [
{
"start": 0.0,
"end": 2.5,
"text": "Hello world",
"words": [
{"start": 0.0, "end": 0.5, "word": "Hello"},
{"start": 0.5, "end": 1.0, "word": "world"},
],
}
]
}
mock_mlx_whisper.transcribe.return_value = mock_result
accelerator_options = AcceleratorOptions(device=AcceleratorDevice.MPS)
asr_options = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-tiny-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
model = _MlxWhisperModel(
enabled=True,
artifacts_path=None,
accelerator_options=accelerator_options,
asr_options=asr_options,
)
# Test transcription
audio_path = Path("test_audio.wav")
result = model.transcribe(audio_path)
# Verify the result
assert len(result) == 1
assert result[0].start_time == 0.0
assert result[0].end_time == 2.5
assert result[0].text == "Hello world"
assert len(result[0].words) == 2
assert result[0].words[0].text == "Hello"
assert result[0].words[1].text == "world"
# Verify mlx_whisper.transcribe was called with correct parameters
mock_mlx_whisper.transcribe.assert_called_once_with(
str(audio_path),
path_or_hf_repo="mlx-community/whisper-tiny-mlx",
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
@patch("builtins.__import__")
def test_asr_pipeline_with_mlx_whisper(self, mock_import):
"""Test that AsrPipeline can be initialized with MLX Whisper options."""
# Mock the mlx_whisper import
mock_mlx_whisper = Mock()
mock_import.return_value = mock_mlx_whisper
accelerator_options = AcceleratorOptions(device=AcceleratorDevice.MPS)
asr_options = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-tiny-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
pipeline_options = AsrPipelineOptions(
asr_options=asr_options,
accelerator_options=accelerator_options,
)
pipeline = AsrPipeline(pipeline_options)
assert isinstance(pipeline._model, _MlxWhisperModel)
assert pipeline._model.model_path == "mlx-community/whisper-tiny-mlx"
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/tests/test_backend_docling_parse_v4.py | tests/test_backend_docling_parse_v4.py | from pathlib import Path
import pytest
from docling.backend.docling_parse_v4_backend import (
DoclingParseV4DocumentBackend,
DoclingParseV4PageBackend,
)
from docling.datamodel.base_models import BoundingBox, InputFormat
from docling.datamodel.document import InputDocument
@pytest.fixture
def test_doc_path():
return Path("./tests/data/pdf/2206.01062.pdf")
def _get_backend(pdf_doc):
in_doc = InputDocument(
path_or_stream=pdf_doc,
format=InputFormat.PDF,
backend=DoclingParseV4DocumentBackend,
)
doc_backend = in_doc._backend
return doc_backend
def test_text_cell_counts():
pdf_doc = Path("./tests/data/pdf/redp5110_sampled.pdf")
doc_backend = _get_backend(pdf_doc)
for page_index in range(doc_backend.page_count()):
last_cell_count = None
for i in range(10):
page_backend: DoclingParseV4PageBackend = doc_backend.load_page(0)
cells = list(page_backend.get_text_cells())
if last_cell_count is None:
last_cell_count = len(cells)
if len(cells) != last_cell_count:
assert False, (
"Loading page multiple times yielded non-identical text cell counts"
)
last_cell_count = len(cells)
# Clean up page backend after each iteration
page_backend.unload()
# Explicitly clean up document backend to prevent race conditions in CI
doc_backend.unload()
def test_get_text_from_rect(test_doc_path):
doc_backend = _get_backend(test_doc_path)
page_backend: DoclingParseV4PageBackend = doc_backend.load_page(0)
# Get the title text of the DocLayNet paper
textpiece = page_backend.get_text_in_rect(
bbox=BoundingBox(l=102, t=77, r=511, b=124)
)
ref = "DocLayNet: A Large Human-Annotated Dataset for Document-Layout Analysis"
assert textpiece.strip() == ref
# Explicitly clean up resources
page_backend.unload()
doc_backend.unload()
def test_crop_page_image(test_doc_path):
doc_backend = _get_backend(test_doc_path)
page_backend: DoclingParseV4PageBackend = doc_backend.load_page(0)
# Crop out "Figure 1" from the DocLayNet paper
page_backend.get_page_image(
scale=2, cropbox=BoundingBox(l=317, t=246, r=574, b=527)
)
# im.show()
# Explicitly clean up resources
page_backend.unload()
doc_backend.unload()
def test_num_pages(test_doc_path):
doc_backend = _get_backend(test_doc_path)
doc_backend.page_count() == 9
# Explicitly clean up resources to prevent race conditions in CI
doc_backend.unload()
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/tests/test_backend_msword.py | tests/test_backend_msword.py | import logging
import os
from pathlib import Path
import pytest
from docling_core.types.doc import GroupItem
from docling.backend.docx.drawingml.utils import get_libreoffice_cmd
from docling.backend.msword_backend import MsWordDocumentBackend
from docling.datamodel.base_models import InputFormat
from docling.datamodel.document import (
ConversionResult,
DoclingDocument,
InputDocument,
SectionHeaderItem,
TextItem,
)
from docling.document_converter import DocumentConverter
from .test_data_gen_flag import GEN_TEST_DATA
from .verify_utils import verify_document, verify_export
_log = logging.getLogger(__name__)
GENERATE = GEN_TEST_DATA
IS_CI = bool(os.getenv("CI"))
@pytest.fixture(scope="module")
def docx_paths() -> list[Path]:
# Define the directory you want to search
directory = Path("./tests/data/docx/")
# List all docx files in the directory and its subdirectories
docx_files = sorted(directory.rglob("*.docx"))
return docx_files
def get_converter():
converter = DocumentConverter(allowed_formats=[InputFormat.DOCX])
return converter
@pytest.fixture(scope="module")
def documents(docx_paths) -> list[tuple[Path, DoclingDocument]]:
documents: list[dict[Path, DoclingDocument]] = []
converter = get_converter()
for docx_path in docx_paths:
_log.debug(f"converting {docx_path}")
gt_path = (
docx_path.parent.parent / "groundtruth" / "docling_v2" / docx_path.name
)
conv_result: ConversionResult = converter.convert(docx_path)
doc: DoclingDocument = conv_result.document
assert doc, f"Failed to convert document from file {gt_path}"
documents.append((gt_path, doc))
return documents
def _test_e2e_docx_conversions_impl(docx_paths: list[tuple[Path, DoclingDocument]]):
has_libreoffice = False
try:
cmd = get_libreoffice_cmd(raise_if_unavailable=True)
if cmd is not None:
has_libreoffice = True
except Exception:
pass
for docx_path, doc in docx_paths:
if not IS_CI and not has_libreoffice and docx_path.name == "drawingml.docx":
print(f"Skipping {docx_path} because no Libreoffice is installed.")
continue
pred_md: str = doc.export_to_markdown()
assert verify_export(pred_md, str(docx_path) + ".md", generate=GENERATE), (
f"export to markdown failed on {docx_path}"
)
pred_itxt: str = doc._export_to_indented_text(
max_text_len=70, explicit_tables=False
)
assert verify_export(pred_itxt, str(docx_path) + ".itxt", generate=GENERATE), (
f"export to indented-text failed on {docx_path}"
)
assert verify_document(doc, str(docx_path) + ".json", generate=GENERATE), (
f"DoclingDocument verification failed on {docx_path}"
)
if docx_path.name == "word_tables.docx":
pred_html: str = doc.export_to_html()
assert verify_export(
pred_text=pred_html,
gtfile=str(docx_path) + ".html",
generate=GENERATE,
), f"export to html failed on {docx_path}"
flaky_file = "textbox.docx"
def test_e2e_docx_conversions(documents):
target = [item for item in documents if item[0].name != flaky_file]
_test_e2e_docx_conversions_impl(target)
@pytest.mark.xfail(strict=False)
def test_textbox_conversion(documents):
target = [item for item in documents if item[0].name == flaky_file]
_test_e2e_docx_conversions_impl(target)
@pytest.mark.xfail(strict=False)
def test_textbox_extraction(documents):
name = "textbox.docx"
doc = next(item[1] for item in documents if item[0].name == name)
# Verify if a particular textbox content is extracted
textbox_found = False
for item, _ in doc.iterate_items():
if item.text[:30] == """Suggested Reportable Symptoms:""":
textbox_found = True
assert textbox_found
def test_heading_levels(documents):
name = "word_sample.docx"
doc = next(item[1] for item in documents if item[0].name == name)
found_lvl_1 = found_lvl_2 = False
for item, _ in doc.iterate_items():
if isinstance(item, SectionHeaderItem):
if item.text == "Let\u2019s swim!":
found_lvl_1 = True
assert item.level == 1
elif item.text == "Let\u2019s eat":
found_lvl_2 = True
assert item.level == 2
assert found_lvl_1 and found_lvl_2
def test_text_after_image_anchors(documents):
"""Test to analyse whether text gets parsed after image anchors."""
name = "word_image_anchors.docx"
doc = next(item[1] for item in documents if item[0].name == name)
found_text_after_anchor_1 = found_text_after_anchor_2 = (
found_text_after_anchor_3
) = found_text_after_anchor_4 = False
for item, _ in doc.iterate_items():
if isinstance(item, TextItem):
if item.text == "This is test 1":
found_text_after_anchor_1 = True
elif item.text == "0:08\nCorrect, he is not.":
found_text_after_anchor_2 = True
elif item.text == "This is test 2":
found_text_after_anchor_3 = True
elif item.text == "0:16\nYeah, exactly.":
found_text_after_anchor_4 = True
assert (
found_text_after_anchor_1
and found_text_after_anchor_2
and found_text_after_anchor_3
and found_text_after_anchor_4
)
def test_is_rich_table_cell(docx_paths):
"""Test the function is_rich_table_cell."""
name = "docx_rich_cells.docx"
path = next(item for item in docx_paths if item.name == name)
in_doc = InputDocument(
path_or_stream=path,
format=InputFormat.DOCX,
backend=MsWordDocumentBackend,
filename=name,
)
backend = MsWordDocumentBackend(
in_doc=in_doc,
path_or_stream=path,
)
gt_cells: list[bool] = []
# table: Table with rich cells
gt_cells.extend([False, False, True, True, True, True, True, False])
# table: Table with nested table
gt_cells.extend([False, False, False, True, True, True])
# table: Table with pictures
gt_cells.extend([False, False, False, True, True, False])
gt_it = iter(gt_cells)
for idx_t, table in enumerate(backend.docx_obj.tables):
for idx_r, row in enumerate(table.rows):
for idx_c, cell in enumerate(row.cells):
assert next(gt_it) == backend._is_rich_table_cell(cell), (
f"Wrong cell type in table {idx_t}, row {idx_r}, col {idx_c} "
f"with text: {cell.text}"
)
def test_add_header_footer(documents):
"""Test the funciton _add_header_footer."""
name = "unit_test_formatting.docx"
doc = next(item[1] for item in documents if item[0].name == name)
headers: list[GroupItem] = []
footers: list[GroupItem] = []
for group in doc.groups:
if not isinstance(group, GroupItem):
continue
if group.name == "page header":
headers.append(group)
elif group.name == "page footer":
footers.append(group)
assert len(headers) == 2, "Expected 2 different headers"
assert len(footers) == 2, "Expected 2 different footers"
assert len(headers[0].children) == 1, "First page header should have 1 paragraph"
assert len(headers[1].children) == 2, "Second page header should have 2 paragraphs"
assert len(footers[0].children) == 1, "First page footer should have 1 paragraph"
assert len(footers[1].children) == 4, (
"Second page footer should have 3 paragraphs and 1 picture"
)
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/tests/test_backend_markdown.py | tests/test_backend_markdown.py | from pathlib import Path
from docling.backend.md_backend import MarkdownDocumentBackend
from docling.datamodel.base_models import InputFormat
from docling.datamodel.document import (
ConversionResult,
DoclingDocument,
InputDocument,
)
from docling.document_converter import DocumentConverter
from tests.verify_utils import CONFID_PREC, COORD_PREC
from .test_data_gen_flag import GEN_TEST_DATA
from .verify_utils import verify_document
GENERATE = GEN_TEST_DATA
def test_convert_valid():
fmt = InputFormat.MD
cls = MarkdownDocumentBackend
root_path = Path("tests") / "data"
relevant_paths = sorted((root_path / "md").rglob("*.md"))
assert len(relevant_paths) > 0
yaml_filter = ["inline_and_formatting", "mixed_without_h1"]
json_filter = ["escaped_characters"]
for in_path in relevant_paths:
md_gt_path = root_path / "groundtruth" / "docling_v2" / f"{in_path.name}.md"
yaml_gt_path = root_path / "groundtruth" / "docling_v2" / f"{in_path.name}.yaml"
json_gt_path = root_path / "groundtruth" / "docling_v2" / f"{in_path.name}.json"
in_doc = InputDocument(
path_or_stream=in_path,
format=fmt,
backend=cls,
)
backend = cls(
in_doc=in_doc,
path_or_stream=in_path,
)
assert backend.is_valid()
act_doc = backend.convert()
act_data = act_doc.export_to_markdown()
if in_path.stem in json_filter:
assert verify_document(act_doc, json_gt_path, GENERATE), "export to json"
if GEN_TEST_DATA:
with open(md_gt_path, mode="w", encoding="utf-8") as f:
f.write(f"{act_data}\n")
if in_path.stem in yaml_filter:
act_doc.save_as_yaml(
yaml_gt_path,
coord_precision=COORD_PREC,
confid_precision=CONFID_PREC,
)
else:
with open(md_gt_path, encoding="utf-8") as f:
exp_data = f.read().rstrip()
assert act_data == exp_data
if in_path.stem in yaml_filter:
exp_doc = DoclingDocument.load_from_yaml(yaml_gt_path)
assert act_doc == exp_doc, f"export to yaml failed on {in_path}"
def get_md_paths():
# Define the directory you want to search
directory = Path("./tests/groundtruth/docling_v2")
# List all MD files in the directory and its subdirectories
md_files = sorted(directory.rglob("*.md"))
return md_files
def get_converter():
converter = DocumentConverter(allowed_formats=[InputFormat.MD])
return converter
def test_e2e_md_conversions():
md_paths = get_md_paths()
converter = get_converter()
for md_path in md_paths:
# print(f"converting {md_path}")
with open(md_path) as fr:
true_md = fr.read()
conv_result: ConversionResult = converter.convert(md_path)
doc: DoclingDocument = conv_result.document
pred_md: str = doc.export_to_markdown()
assert true_md == pred_md
conv_result_: ConversionResult = converter.convert_string(
true_md, format=InputFormat.MD
)
doc_: DoclingDocument = conv_result_.document
pred_md_: str = doc_.export_to_markdown()
assert true_md == pred_md_
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/tests/test_extraction.py | tests/test_extraction.py | """
Test unit for document extraction functionality.
"""
import os
from pathlib import Path
import pytest
from pydantic import BaseModel, Field
from docling.datamodel.base_models import InputFormat
from docling.document_converter import DocumentConverter
from docling.document_extractor import DocumentExtractor
IS_CI = bool(os.getenv("CI"))
class ExampleTemplate(BaseModel):
bill_no: str = Field(
examples=["A123", "5414"]
) # provide some examples, but not the actual value of the test sample
total: float = Field(
default=10.0, examples=[20.0]
) # provide a default value and some examples
@pytest.fixture
def extractor() -> DocumentExtractor:
"""Create a document converter instance for testing."""
return DocumentExtractor(allowed_formats=[InputFormat.IMAGE, InputFormat.PDF])
@pytest.fixture
def test_file_path() -> Path:
"""Get the path to the test QR bill image."""
return Path(__file__).parent / "data_scanned" / "qr_bill_example.jpg"
# return Path("tests/data/pdf/code_and_formula.pdf")
@pytest.mark.skipif(
IS_CI, reason="Skipping test in CI because the dataset is too heavy."
)
def test_extraction_with_string_template(
extractor: DocumentExtractor, test_file_path: Path
) -> None:
"""Test extraction using string template."""
str_templ = '{"bill_no": "string", "total": "number"}'
result = extractor.extract(test_file_path, template=str_templ)
print(result.pages)
assert result.status is not None
assert len(result.pages) == 1
assert result.pages[0].extracted_data["bill_no"] == "3139"
assert result.pages[0].extracted_data["total"] == 3949.75
@pytest.mark.skipif(
IS_CI, reason="Skipping test in CI because the dataset is too heavy."
)
def test_extraction_with_dict_template(
extractor: DocumentExtractor, test_file_path: Path
) -> None:
"""Test extraction using dictionary template."""
dict_templ = {
"bill_no": "string",
"total": "number",
}
result = extractor.extract(test_file_path, template=dict_templ)
assert len(result.pages) == 1
assert result.pages[0].extracted_data["bill_no"] == "3139"
assert result.pages[0].extracted_data["total"] == 3949.75
@pytest.mark.skipif(
IS_CI, reason="Skipping test in CI because the dataset is too heavy."
)
def test_extraction_with_pydantic_instance_template(
extractor: DocumentExtractor, test_file_path: Path
) -> None:
"""Test extraction using pydantic instance template."""
pydantic_instance_templ = ExampleTemplate(bill_no="4321")
result = extractor.extract(test_file_path, template=pydantic_instance_templ)
assert len(result.pages) == 1
assert result.pages[0].extracted_data["bill_no"] == "3139"
assert result.pages[0].extracted_data["total"] == 3949.75
@pytest.mark.skipif(
IS_CI, reason="Skipping test in CI because the dataset is too heavy."
)
def test_extraction_with_pydantic_class_template(
extractor: DocumentExtractor, test_file_path: Path
) -> None:
"""Test extraction using pydantic class template."""
pydantic_class_templ = ExampleTemplate
result = extractor.extract(test_file_path, template=pydantic_class_templ)
assert len(result.pages) == 1
assert result.pages[0].extracted_data["bill_no"] == "3139"
assert result.pages[0].extracted_data["total"] == 3949.75
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/tests/test_backend_pptx.py | tests/test_backend_pptx.py | from pathlib import Path
from docling.datamodel.base_models import InputFormat
from docling.datamodel.document import ConversionResult, DoclingDocument
from docling.document_converter import DocumentConverter
from .test_data_gen_flag import GEN_TEST_DATA
from .verify_utils import verify_document, verify_export
GENERATE = GEN_TEST_DATA
def get_pptx_paths():
# Define the directory you want to search
directory = Path("./tests/data/pptx/")
# List all PPTX files in the directory and its subdirectories
pptx_files = sorted(directory.rglob("*.pptx"))
return pptx_files
def get_converter():
converter = DocumentConverter(allowed_formats=[InputFormat.PPTX])
return converter
def test_e2e_pptx_conversions():
pptx_paths = get_pptx_paths()
converter = get_converter()
for pptx_path in pptx_paths:
# print(f"converting {pptx_path}")
gt_path = (
pptx_path.parent.parent / "groundtruth" / "docling_v2" / pptx_path.name
)
conv_result: ConversionResult = converter.convert(pptx_path)
doc: DoclingDocument = conv_result.document
pred_md: str = doc.export_to_markdown()
assert verify_export(pred_md, str(gt_path) + ".md", GENERATE), "export to md"
pred_itxt: str = doc._export_to_indented_text(
max_text_len=70, explicit_tables=False
)
assert verify_export(pred_itxt, str(gt_path) + ".itxt", GENERATE), (
"export to indented-text"
)
assert verify_document(doc, str(gt_path) + ".json", GENERATE), (
"document document"
)
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/tests/test_threaded_pipeline.py | tests/test_threaded_pipeline.py | import logging
import time
from pathlib import Path
from typing import List
import pytest
from docling.backend.pypdfium2_backend import PyPdfiumDocumentBackend
from docling.datamodel.base_models import ConversionStatus, InputFormat
from docling.datamodel.document import ConversionResult
from docling.datamodel.pipeline_options import (
PdfPipelineOptions,
ThreadedPdfPipelineOptions,
)
from docling.document_converter import DocumentConverter, PdfFormatOption
from docling.pipeline.standard_pdf_pipeline import StandardPdfPipeline
from docling.pipeline.threaded_standard_pdf_pipeline import ThreadedStandardPdfPipeline
def test_threaded_pipeline_multiple_documents():
"""Test threaded pipeline with multiple documents and compare with standard pipeline"""
test_files = [
"tests/data/pdf/2203.01017v2.pdf",
"tests/data/pdf/2206.01062.pdf",
"tests/data/pdf/2305.03393v1.pdf",
]
# test_files = [str(f) for f in Path("test/data/pdf").rglob("*.pdf")]
do_ts = False
do_ocr = False
run_threaded = True
run_serial = True
if run_threaded:
# Threaded pipeline
threaded_converter = DocumentConverter(
format_options={
InputFormat.PDF: PdfFormatOption(
pipeline_cls=ThreadedStandardPdfPipeline,
pipeline_options=ThreadedPdfPipelineOptions(
layout_batch_size=1,
table_batch_size=1,
ocr_batch_size=1,
batch_polling_interval_seconds=1.0,
do_table_structure=do_ts,
do_ocr=do_ocr,
),
)
}
)
threaded_converter.initialize_pipeline(InputFormat.PDF)
# Test threaded pipeline
threaded_success_count = 0
threaded_failure_count = 0
start_time = time.perf_counter()
for result in threaded_converter.convert_all(test_files, raises_on_error=True):
print(
"Finished converting document with threaded pipeline:",
result.input.file.name,
)
if result.status == ConversionStatus.SUCCESS:
threaded_success_count += 1
else:
threaded_failure_count += 1
threaded_time = time.perf_counter() - start_time
del threaded_converter
print(f"Threaded pipeline: {threaded_time:.2f} seconds")
if run_serial:
# Standard pipeline
standard_converter = DocumentConverter(
format_options={
InputFormat.PDF: PdfFormatOption(
pipeline_cls=StandardPdfPipeline,
pipeline_options=PdfPipelineOptions(
do_table_structure=do_ts,
do_ocr=do_ocr,
),
)
}
)
standard_converter.initialize_pipeline(InputFormat.PDF)
# Test standard pipeline
standard_success_count = 0
standard_failure_count = 0
start_time = time.perf_counter()
for result in standard_converter.convert_all(test_files, raises_on_error=True):
print(
"Finished converting document with standard pipeline:",
result.input.file.name,
)
if result.status == ConversionStatus.SUCCESS:
standard_success_count += 1
else:
standard_failure_count += 1
standard_time = time.perf_counter() - start_time
del standard_converter
print(f"Standard pipeline: {standard_time:.2f} seconds")
# Verify results
if run_threaded and run_serial:
assert standard_success_count == threaded_success_count
assert standard_failure_count == threaded_failure_count
if run_serial:
assert standard_success_count == len(test_files)
assert standard_failure_count == 0
if run_threaded:
assert threaded_success_count == len(test_files)
assert threaded_failure_count == 0
def test_pipeline_comparison():
"""Compare all three pipeline implementations"""
test_file = "tests/data/pdf/2206.01062.pdf"
# Sync pipeline
sync_converter = DocumentConverter(
format_options={
InputFormat.PDF: PdfFormatOption(
pipeline_cls=StandardPdfPipeline,
)
}
)
start_time = time.perf_counter()
sync_results = list(sync_converter.convert_all([test_file]))
sync_time = time.perf_counter() - start_time
# Threaded pipeline
threaded_converter = DocumentConverter(
format_options={
InputFormat.PDF: PdfFormatOption(
pipeline_cls=ThreadedStandardPdfPipeline,
pipeline_options=ThreadedPdfPipelineOptions(
layout_batch_size=1,
ocr_batch_size=1,
table_batch_size=1,
),
)
}
)
start_time = time.perf_counter()
threaded_results = list(threaded_converter.convert_all([test_file]))
threaded_time = time.perf_counter() - start_time
print("\nPipeline Comparison:")
print(f"Sync pipeline: {sync_time:.2f} seconds")
print(f"Threaded pipeline: {threaded_time:.2f} seconds")
print(f"Speedup: {sync_time / threaded_time:.2f}x")
# Verify results are equivalent
assert len(sync_results) == len(threaded_results) == 1
assert (
sync_results[0].status == threaded_results[0].status == ConversionStatus.SUCCESS
)
# Basic content comparison
sync_doc = sync_results[0].document
threaded_doc = threaded_results[0].document
assert len(sync_doc.pages) == len(threaded_doc.pages)
assert len(sync_doc.texts) == len(threaded_doc.texts)
def test_pypdfium_threaded_pipeline():
doc_converter = (
DocumentConverter( # all of the below is optional, has internal defaults.
format_options={
InputFormat.PDF: PdfFormatOption(
pipeline_cls=ThreadedStandardPdfPipeline,
backend=PyPdfiumDocumentBackend,
),
},
)
)
test_file = "tests/data/pdf/2206.01062.pdf"
for i in range(6):
print(f"iteration {i=}")
conv_result = doc_converter.convert(test_file)
assert conv_result.status == ConversionStatus.SUCCESS
print(f"[{i=}] Success")
print("All done!")
if __name__ == "__main__":
# Run basic performance test
test_pipeline_comparison()
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/tests/test_document_picture_classifier.py | tests/test_document_picture_classifier.py | from pathlib import Path
from docling_core.types.doc import PictureClassificationData
from docling.datamodel.base_models import InputFormat
from docling.datamodel.document import ConversionResult
from docling.datamodel.pipeline_options import PdfPipelineOptions
from docling.document_converter import DocumentConverter, PdfFormatOption
from docling.pipeline.standard_pdf_pipeline import StandardPdfPipeline
def get_converter():
pipeline_options = PdfPipelineOptions()
pipeline_options.generate_page_images = True
pipeline_options.do_ocr = False
pipeline_options.do_table_structure = False
pipeline_options.do_code_enrichment = False
pipeline_options.do_formula_enrichment = False
pipeline_options.generate_picture_images = False
pipeline_options.generate_page_images = False
pipeline_options.do_picture_classification = True
pipeline_options.images_scale = 2
converter = DocumentConverter(
format_options={
InputFormat.PDF: PdfFormatOption(
pipeline_cls=StandardPdfPipeline,
pipeline_options=pipeline_options,
)
}
)
return converter
def test_picture_classifier():
pdf_path = Path("tests/data/pdf/picture_classification.pdf")
converter = get_converter()
print(f"converting {pdf_path}")
doc_result: ConversionResult = converter.convert(pdf_path)
results = doc_result.document.pictures
assert len(results) == 2
res = results[0]
assert len(res.annotations) == 1
assert isinstance(res.annotations[0], PictureClassificationData)
classification_data = res.annotations[0]
assert classification_data.provenance == "DocumentPictureClassifier"
assert len(classification_data.predicted_classes) == 16, (
"Number of predicted classes is not equal to 16"
)
confidences = [pred.confidence for pred in classification_data.predicted_classes]
assert confidences == sorted(confidences, reverse=True), (
"Predictions are not sorted in descending order of confidence"
)
assert classification_data.predicted_classes[0].class_name == "bar_chart", (
"The prediction is wrong for the bar chart image."
)
res = results[1]
assert len(res.annotations) == 1
assert isinstance(res.annotations[0], PictureClassificationData)
classification_data = res.annotations[0]
assert classification_data.provenance == "DocumentPictureClassifier"
assert len(classification_data.predicted_classes) == 16, (
"Number of predicted classes is not equal to 16"
)
confidences = [pred.confidence for pred in classification_data.predicted_classes]
assert confidences == sorted(confidences, reverse=True), (
"Predictions are not sorted in descending order of confidence"
)
assert classification_data.predicted_classes[0].class_name == "map", (
"The prediction is wrong for the bar chart image."
)
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/tests/test_settings_load.py | tests/test_settings_load.py | import os
def _setup_env():
os.environ["DOCLING_PERF_PAGE_BATCH_SIZE"] = "12"
os.environ["DOCLING_DEBUG_VISUALIZE_RAW_LAYOUT"] = "True"
os.environ["DOCLING_ARTIFACTS_PATH"] = "/path/to/artifacts"
def test_settings():
_setup_env()
import importlib
import docling.datamodel.settings as m
# Reinitialize settings module
importlib.reload(m)
# Check top level setting
assert str(m.settings.artifacts_path) == "/path/to/artifacts"
# Check nested set via environment variables
assert m.settings.perf.page_batch_size == 12
assert m.settings.debug.visualize_raw_layout is True
# Check nested defaults
assert m.settings.perf.doc_batch_size == 1
assert m.settings.debug.visualize_ocr is False
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/tests/test_options.py | tests/test_options.py | import os
from pathlib import Path
import pytest
from docling.backend.docling_parse_backend import DoclingParseDocumentBackend
from docling.backend.docling_parse_v2_backend import DoclingParseV2DocumentBackend
from docling.backend.docling_parse_v4_backend import DoclingParseV4DocumentBackend
from docling.backend.pypdfium2_backend import PyPdfiumDocumentBackend
from docling.datamodel.accelerator_options import AcceleratorDevice, AcceleratorOptions
from docling.datamodel.base_models import ConversionStatus, InputFormat, QualityGrade
from docling.datamodel.document import ConversionResult
from docling.datamodel.pipeline_options import (
PdfPipelineOptions,
TableFormerMode,
)
from docling.document_converter import DocumentConverter, PdfFormatOption
from docling.pipeline.legacy_standard_pdf_pipeline import LegacyStandardPdfPipeline
@pytest.fixture
def test_doc_path():
return Path("./tests/data/pdf/2206.01062.pdf")
def get_converters_with_table_options():
for cell_matching in [True, False]:
for mode in [TableFormerMode.FAST, TableFormerMode.ACCURATE]:
pipeline_options = PdfPipelineOptions()
pipeline_options.do_ocr = False
pipeline_options.do_table_structure = True
pipeline_options.table_structure_options.do_cell_matching = cell_matching
pipeline_options.table_structure_options.mode = mode
converter = DocumentConverter(
format_options={
InputFormat.PDF: PdfFormatOption(pipeline_options=pipeline_options)
}
)
yield converter
def test_accelerator_options():
# Check the default options
ao = AcceleratorOptions()
assert ao.num_threads == 4, "Wrong default num_threads"
assert ao.device == AcceleratorDevice.AUTO, "Wrong default device"
# Use API
ao2 = AcceleratorOptions(num_threads=2, device=AcceleratorDevice.MPS)
ao3 = AcceleratorOptions(num_threads=3, device=AcceleratorDevice.CUDA)
assert ao2.num_threads == 2
assert ao2.device == AcceleratorDevice.MPS
assert ao3.num_threads == 3
assert ao3.device == AcceleratorDevice.CUDA
# Use envvars (regular + alternative) and default values
os.environ["OMP_NUM_THREADS"] = "1"
ao.__init__()
assert ao.num_threads == 1
assert ao.device == AcceleratorDevice.AUTO
os.environ["DOCLING_DEVICE"] = "cpu"
ao.__init__()
assert ao.device == AcceleratorDevice.CPU
assert ao.num_threads == 1
# Use envvars and override in init
os.environ["DOCLING_DEVICE"] = "cpu"
ao4 = AcceleratorOptions(num_threads=5, device=AcceleratorDevice.MPS)
assert ao4.num_threads == 5
assert ao4.device == AcceleratorDevice.MPS
# Use regular and alternative envvar
os.environ["DOCLING_NUM_THREADS"] = "2"
ao5 = AcceleratorOptions()
assert ao5.num_threads == 2
assert ao5.device == AcceleratorDevice.CPU
# Use wrong values
is_exception = False
try:
os.environ["DOCLING_DEVICE"] = "wrong"
ao5.__init__()
except Exception as ex:
print(ex)
is_exception = True
assert is_exception
# Use misformatted alternative envvar
del os.environ["DOCLING_NUM_THREADS"]
del os.environ["DOCLING_DEVICE"]
os.environ["OMP_NUM_THREADS"] = "wrong"
ao6 = AcceleratorOptions()
assert ao6.num_threads == 4
assert ao6.device == AcceleratorDevice.AUTO
def test_e2e_conversions(test_doc_path):
for converter in get_converters_with_table_options():
print(f"converting {test_doc_path}")
doc_result: ConversionResult = converter.convert(test_doc_path)
assert doc_result.status == ConversionStatus.SUCCESS
def test_page_range(test_doc_path):
converter = DocumentConverter()
doc_result: ConversionResult = converter.convert(test_doc_path, page_range=(9, 9))
assert doc_result.status == ConversionStatus.SUCCESS
assert doc_result.input.page_count == 9
assert doc_result.document.num_pages() == 1
doc_result: ConversionResult = converter.convert(
test_doc_path, page_range=(10, 10), raises_on_error=False
)
assert doc_result.status == ConversionStatus.FAILURE
def test_document_timeout(test_doc_path):
converter = DocumentConverter(
format_options={
InputFormat.PDF: PdfFormatOption(
pipeline_options=PdfPipelineOptions(document_timeout=1)
)
}
)
result = converter.convert(test_doc_path)
assert result.status == ConversionStatus.PARTIAL_SUCCESS, (
"Expected document timeout to be used"
)
converter = DocumentConverter(
format_options={
InputFormat.PDF: PdfFormatOption(
pipeline_options=PdfPipelineOptions(document_timeout=1),
pipeline_cls=LegacyStandardPdfPipeline,
)
}
)
result = converter.convert(test_doc_path)
assert result.status == ConversionStatus.PARTIAL_SUCCESS, (
"Expected document timeout to be used"
)
def test_ocr_coverage_threshold(test_doc_path):
pipeline_options = PdfPipelineOptions()
pipeline_options.do_ocr = True
pipeline_options.ocr_options.bitmap_area_threshold = 1.1
converter = DocumentConverter(
format_options={
InputFormat.PDF: PdfFormatOption(
pipeline_options=pipeline_options,
)
}
)
test_doc_path = Path("./tests/data_scanned/ocr_test.pdf")
doc_result: ConversionResult = converter.convert(test_doc_path)
# this should have generated no results, since we set a very high threshold
assert len(doc_result.document.texts) == 0
def test_parser_backends(test_doc_path):
pipeline_options = PdfPipelineOptions()
pipeline_options.do_ocr = False
pipeline_options.do_table_structure = False
for backend_t in [
DoclingParseV4DocumentBackend,
DoclingParseV2DocumentBackend,
DoclingParseDocumentBackend,
PyPdfiumDocumentBackend,
]:
converter = DocumentConverter(
format_options={
InputFormat.PDF: PdfFormatOption(
pipeline_options=pipeline_options,
backend=backend_t,
)
}
)
test_doc_path = Path("./tests/data/pdf/code_and_formula.pdf")
doc_result: ConversionResult = converter.convert(test_doc_path)
assert doc_result.status == ConversionStatus.SUCCESS
def test_confidence(test_doc_path):
converter = DocumentConverter()
doc_result: ConversionResult = converter.convert(test_doc_path, page_range=(6, 9))
assert doc_result.confidence.mean_grade == QualityGrade.EXCELLENT
assert doc_result.confidence.low_grade == QualityGrade.EXCELLENT
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/tests/test_backend_image_native.py | tests/test_backend_image_native.py | from io import BytesIO
from pathlib import Path
import pytest
from docling_core.types.doc import BoundingBox, CoordOrigin
from PIL import Image
from docling.backend.image_backend import ImageDocumentBackend, _ImagePageBackend
from docling.datamodel.base_models import DocumentStream, InputFormat
from docling.datamodel.document import InputDocument, _DocumentConversionInput
from docling.document_converter import DocumentConverter, ImageFormatOption
from docling.document_extractor import DocumentExtractor
def _make_png_stream(
width: int = 64, height: int = 48, color=(123, 45, 67)
) -> DocumentStream:
img = Image.new("RGB", (width, height), color)
buf = BytesIO()
img.save(buf, format="PNG")
buf.seek(0)
return DocumentStream(name="test.png", stream=buf)
def _make_multipage_tiff_stream(num_pages: int = 3, size=(32, 32)) -> DocumentStream:
frames = [
Image.new("RGB", size, (i * 10 % 255, i * 20 % 255, i * 30 % 255))
for i in range(num_pages)
]
buf = BytesIO()
frames[0].save(buf, format="TIFF", save_all=True, append_images=frames[1:])
buf.seek(0)
return DocumentStream(name="test.tiff", stream=buf)
def test_docs_builder_uses_image_backend_for_image_stream():
stream = _make_png_stream()
conv_input = _DocumentConversionInput(path_or_stream_iterator=[stream])
# Provide format options mapping that includes IMAGE -> ImageFormatOption (which carries ImageDocumentBackend)
format_options = {InputFormat.IMAGE: ImageFormatOption()}
docs = list(conv_input.docs(format_options))
assert len(docs) == 1
in_doc = docs[0]
assert in_doc.format == InputFormat.IMAGE
assert isinstance(in_doc._backend, ImageDocumentBackend)
assert in_doc.page_count == 1
def test_docs_builder_multipage_tiff_counts_frames():
stream = _make_multipage_tiff_stream(num_pages=4)
conv_input = _DocumentConversionInput(path_or_stream_iterator=[stream])
format_options = {InputFormat.IMAGE: ImageFormatOption()}
in_doc = next(conv_input.docs(format_options))
assert isinstance(in_doc._backend, ImageDocumentBackend)
assert in_doc.page_count == 4
def test_converter_default_maps_image_to_image_backend():
converter = DocumentConverter(allowed_formats=[InputFormat.IMAGE])
backend_cls = converter.format_to_options[InputFormat.IMAGE].backend
assert backend_cls is ImageDocumentBackend
def test_extractor_default_maps_image_to_image_backend():
extractor = DocumentExtractor(allowed_formats=[InputFormat.IMAGE])
backend_cls = extractor.extraction_format_to_options[InputFormat.IMAGE].backend
assert backend_cls is ImageDocumentBackend
def _get_backend_from_stream(stream: DocumentStream):
"""Helper to create InputDocument with ImageDocumentBackend from a stream."""
in_doc = InputDocument(
path_or_stream=stream.stream,
format=InputFormat.IMAGE,
backend=ImageDocumentBackend,
filename=stream.name,
)
return in_doc._backend
def test_num_pages_single():
"""Test page count for single-page image."""
stream = _make_png_stream(width=100, height=80)
doc_backend = _get_backend_from_stream(stream)
assert doc_backend.page_count() == 1
def test_num_pages_multipage():
"""Test page count for multi-page TIFF."""
stream = _make_multipage_tiff_stream(num_pages=5, size=(64, 64))
doc_backend = _get_backend_from_stream(stream)
assert doc_backend.page_count() == 5
def test_get_size():
"""Test getting page size."""
width, height = 120, 90
stream = _make_png_stream(width=width, height=height)
doc_backend = _get_backend_from_stream(stream)
page_backend: _ImagePageBackend = doc_backend.load_page(0)
size = page_backend.get_size()
assert size.width == width
assert size.height == height
def test_get_page_image_full():
"""Test getting full page image."""
width, height = 100, 80
stream = _make_png_stream(width=width, height=height)
doc_backend = _get_backend_from_stream(stream)
page_backend: _ImagePageBackend = doc_backend.load_page(0)
img = page_backend.get_page_image()
assert img.width == width
assert img.height == height
def test_get_page_image_scaled():
"""Test getting scaled page image."""
width, height = 100, 80
scale = 2.0
stream = _make_png_stream(width=width, height=height)
doc_backend = _get_backend_from_stream(stream)
page_backend: _ImagePageBackend = doc_backend.load_page(0)
img = page_backend.get_page_image(scale=scale)
assert img.width == round(width * scale)
assert img.height == round(height * scale)
def test_crop_page_image():
"""Test cropping page image."""
width, height = 200, 150
stream = _make_png_stream(width=width, height=height)
doc_backend = _get_backend_from_stream(stream)
page_backend: _ImagePageBackend = doc_backend.load_page(0)
# Crop a region from the center
cropbox = BoundingBox(l=50, t=30, r=150, b=120, coord_origin=CoordOrigin.TOPLEFT)
img = page_backend.get_page_image(cropbox=cropbox)
assert img.width == 100 # 150 - 50
assert img.height == 90 # 120 - 30
def test_crop_page_image_scaled():
"""Test cropping and scaling page image."""
width, height = 200, 150
scale = 0.5
stream = _make_png_stream(width=width, height=height)
doc_backend = _get_backend_from_stream(stream)
page_backend: _ImagePageBackend = doc_backend.load_page(0)
cropbox = BoundingBox(l=50, t=30, r=150, b=120, coord_origin=CoordOrigin.TOPLEFT)
img = page_backend.get_page_image(scale=scale, cropbox=cropbox)
assert img.width == round(100 * scale) # cropped width * scale
assert img.height == round(90 * scale) # cropped height * scale
def test_get_bitmap_rects():
"""Test getting bitmap rects - should return full page rectangle."""
width, height = 100, 80
stream = _make_png_stream(width=width, height=height)
doc_backend = _get_backend_from_stream(stream)
page_backend: _ImagePageBackend = doc_backend.load_page(0)
rects = list(page_backend.get_bitmap_rects())
assert len(rects) == 1
bbox = rects[0]
assert bbox.l == 0.0
assert bbox.t == 0.0
assert bbox.r == float(width)
assert bbox.b == float(height)
assert bbox.coord_origin == CoordOrigin.TOPLEFT
def test_get_bitmap_rects_scaled():
"""Test getting bitmap rects with scaling."""
width, height = 100, 80
scale = 2.0
stream = _make_png_stream(width=width, height=height)
doc_backend = _get_backend_from_stream(stream)
page_backend: _ImagePageBackend = doc_backend.load_page(0)
rects = list(page_backend.get_bitmap_rects(scale=scale))
assert len(rects) == 1
bbox = rects[0]
assert bbox.l == 0.0
assert bbox.t == 0.0
assert bbox.r == float(width * scale)
assert bbox.b == float(height * scale)
assert bbox.coord_origin == CoordOrigin.TOPLEFT
def test_get_text_in_rect():
"""Test that get_text_in_rect returns empty string for images (no OCR)."""
stream = _make_png_stream()
doc_backend = _get_backend_from_stream(stream)
page_backend: _ImagePageBackend = doc_backend.load_page(0)
bbox = BoundingBox(l=10, t=10, r=50, b=50, coord_origin=CoordOrigin.TOPLEFT)
text = page_backend.get_text_in_rect(bbox)
assert text == ""
def test_multipage_access():
"""Test accessing different pages in multi-page image."""
num_pages = 4
stream = _make_multipage_tiff_stream(num_pages=num_pages, size=(64, 64))
doc_backend = _get_backend_from_stream(stream)
assert doc_backend.page_count() == num_pages
# Access each page
for i in range(num_pages):
page_backend = doc_backend.load_page(i)
assert page_backend.is_valid()
size = page_backend.get_size()
assert size.width == 64
assert size.height == 64
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/tests/test_interfaces.py | tests/test_interfaces.py | from io import BytesIO
from pathlib import Path
from unittest.mock import Mock
import pytest
from docling.datamodel.accelerator_options import AcceleratorDevice
from docling.datamodel.base_models import DocumentStream, InputFormat
from docling.datamodel.pipeline_options_vlm_model import (
InferenceFramework,
InlineVlmOptions,
ResponseFormat,
TransformersPromptStyle,
)
from docling.document_converter import DocumentConverter, PdfFormatOption
from docling.models.base_model import BaseVlmPageModel
from .test_data_gen_flag import GEN_TEST_DATA
from .verify_utils import verify_conversion_result_v2
GENERATE = GEN_TEST_DATA
def get_pdf_path():
pdf_path = Path("./tests/data/pdf/2305.03393v1-pg9.pdf")
return pdf_path
@pytest.fixture
def converter():
from docling.datamodel.pipeline_options import PdfPipelineOptions
pipeline_options = PdfPipelineOptions()
pipeline_options.do_ocr = False
pipeline_options.do_table_structure = True
pipeline_options.table_structure_options.do_cell_matching = True
pipeline_options.accelerator_options.device = AcceleratorDevice.CPU
pipeline_options.generate_parsed_pages = True
converter = DocumentConverter(
format_options={
InputFormat.PDF: PdfFormatOption(
pipeline_options=pipeline_options,
backend=PdfFormatOption().backend,
)
}
)
return converter
def test_convert_path(converter: DocumentConverter):
pdf_path = get_pdf_path()
print(f"converting {pdf_path}")
# Avoid heavy torch-dependent models by not instantiating layout models here in coverage run
doc_result = converter.convert(pdf_path)
verify_conversion_result_v2(
input_path=pdf_path, doc_result=doc_result, generate=GENERATE
)
def test_convert_stream(converter: DocumentConverter):
pdf_path = get_pdf_path()
print(f"converting {pdf_path}")
buf = BytesIO(pdf_path.open("rb").read())
stream = DocumentStream(name=pdf_path.name, stream=buf)
doc_result = converter.convert(stream)
verify_conversion_result_v2(
input_path=pdf_path, doc_result=doc_result, generate=GENERATE
)
class _DummyVlm(BaseVlmPageModel):
def __init__(self, prompt_style: TransformersPromptStyle, repo_id: str = ""): # type: ignore[no-untyped-def]
self.vlm_options = InlineVlmOptions(
repo_id=repo_id or "dummy/repo",
prompt="test prompt",
inference_framework=InferenceFramework.TRANSFORMERS,
response_format=ResponseFormat.PLAINTEXT,
transformers_prompt_style=prompt_style,
)
self.processor = Mock()
def __call__(self, conv_res, page_batch): # type: ignore[no-untyped-def]
return []
def process_images(self, image_batch, prompt): # type: ignore[no-untyped-def]
return []
def test_formulate_prompt_raw():
model = _DummyVlm(TransformersPromptStyle.RAW)
assert model.formulate_prompt("hello") == "hello"
def test_formulate_prompt_none():
model = _DummyVlm(TransformersPromptStyle.NONE)
assert model.formulate_prompt("ignored") == ""
def test_formulate_prompt_phi4_special_case():
model = _DummyVlm(
TransformersPromptStyle.RAW, repo_id="ibm-granite/granite-docling-258M"
)
# RAW style with granite-docling should still invoke the special path only when style not RAW;
# ensure RAW returns the user text
assert model.formulate_prompt("describe image") == "describe image"
def test_formulate_prompt_chat_uses_processor_template():
model = _DummyVlm(TransformersPromptStyle.CHAT)
model.processor.apply_chat_template.return_value = "templated"
out = model.formulate_prompt("summarize")
assert out == "templated"
model.processor.apply_chat_template.assert_called()
def test_formulate_prompt_unknown_style_raises():
# Create an InlineVlmOptions with an invalid enum by patching attribute directly
model = _DummyVlm(TransformersPromptStyle.RAW)
model.vlm_options.transformers_prompt_style = "__invalid__" # type: ignore[assignment]
with pytest.raises(RuntimeError):
model.formulate_prompt("x")
def test_vlm_prompt_style_none_and_chat_variants():
# NONE always empty
m_none = _DummyVlm(TransformersPromptStyle.NONE)
assert m_none.formulate_prompt("anything") == ""
# CHAT path ensures processor used even with complex prompt
m_chat = _DummyVlm(TransformersPromptStyle.CHAT)
m_chat.processor.apply_chat_template.return_value = "ok"
out = m_chat.formulate_prompt("details please")
assert out == "ok"
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/tests/test_input_doc.py | tests/test_input_doc.py | from io import BytesIO
from pathlib import Path
import pytest
from pydantic import ValidationError
from docling.backend.html_backend import HTMLDocumentBackend
from docling.backend.pypdfium2_backend import PyPdfiumDocumentBackend
from docling.datamodel.backend_options import (
BaseBackendOptions,
DeclarativeBackendOptions,
HTMLBackendOptions,
)
from docling.datamodel.base_models import DocumentStream, InputFormat
from docling.datamodel.document import InputDocument, _DocumentConversionInput
from docling.datamodel.settings import DocumentLimits
from docling.document_converter import ImageFormatOption, PdfFormatOption
def test_in_doc_from_valid_path():
test_doc_path = Path("./tests/data/pdf/2206.01062.pdf")
doc = _make_input_doc(test_doc_path)
assert doc.valid is True
assert doc.backend_options is None
def test_in_doc_from_invalid_path():
test_doc_path = Path("./tests/does/not/exist.pdf")
doc = _make_input_doc(test_doc_path)
assert doc.valid is False
def test_in_doc_from_valid_buf():
buf = BytesIO(Path("./tests/data/pdf/2206.01062.pdf").open("rb").read())
stream = DocumentStream(name="my_doc.pdf", stream=buf)
doc = _make_input_doc_from_stream(stream)
assert doc.valid is True
def test_in_doc_from_invalid_buf():
buf = BytesIO(b"")
stream = DocumentStream(name="my_doc.pdf", stream=buf)
doc = _make_input_doc_from_stream(stream)
assert doc.valid is False
def test_in_doc_with_page_range():
test_doc_path = Path("./tests/data/pdf/2206.01062.pdf")
limits = DocumentLimits()
limits.page_range = (1, 10)
doc = InputDocument(
path_or_stream=test_doc_path,
format=InputFormat.PDF,
backend=PyPdfiumDocumentBackend,
limits=limits,
)
assert doc.valid is True
limits.page_range = (9, 9)
doc = InputDocument(
path_or_stream=test_doc_path,
format=InputFormat.PDF,
backend=PyPdfiumDocumentBackend,
limits=limits,
)
assert doc.valid is True
limits.page_range = (11, 12)
doc = InputDocument(
path_or_stream=test_doc_path,
format=InputFormat.PDF,
backend=PyPdfiumDocumentBackend,
limits=limits,
)
assert doc.valid is False
def test_in_doc_with_backend_options():
test_doc_path = Path("./tests/data/html/example_01.html")
doc = InputDocument(
path_or_stream=test_doc_path,
format=InputFormat.HTML,
backend=HTMLDocumentBackend,
backend_options=HTMLBackendOptions(),
)
assert doc.valid
assert doc.backend_options
assert isinstance(doc.backend_options, HTMLBackendOptions)
assert not doc.backend_options.fetch_images
assert not doc.backend_options.enable_local_fetch
assert not doc.backend_options.enable_remote_fetch
with pytest.raises(AttributeError, match="no attribute 'source_uri'"):
doc = InputDocument(
path_or_stream=test_doc_path,
format=InputFormat.HTML,
backend=HTMLDocumentBackend,
backend_options=DeclarativeBackendOptions(),
)
with pytest.raises(ValidationError):
doc = InputDocument(
path_or_stream=test_doc_path,
format=InputFormat.HTML,
backend=HTMLDocumentBackend,
backend_options=BaseBackendOptions(),
)
def test_guess_format(tmp_path):
"""Test docling.datamodel.document._DocumentConversionInput.__guess_format"""
dci = _DocumentConversionInput(path_or_stream_iterator=[])
temp_dir = tmp_path / "test_guess_format"
temp_dir.mkdir()
# Valid PDF
buf = BytesIO(Path("./tests/data/pdf/2206.01062.pdf").open("rb").read())
stream = DocumentStream(name="my_doc.pdf", stream=buf)
assert dci._guess_format(stream) == InputFormat.PDF
doc_path = Path("./tests/data/pdf/2206.01062.pdf")
assert dci._guess_format(doc_path) == InputFormat.PDF
# Valid MS Office
buf = BytesIO(Path("./tests/data/docx/lorem_ipsum.docx").open("rb").read())
stream = DocumentStream(name="lorem_ipsum.docx", stream=buf)
assert dci._guess_format(stream) == InputFormat.DOCX
doc_path = Path("./tests/data/docx/lorem_ipsum.docx")
assert dci._guess_format(doc_path) == InputFormat.DOCX
# Valid HTML
buf = BytesIO(Path("./tests/data/html/wiki_duck.html").open("rb").read())
stream = DocumentStream(name="wiki_duck.html", stream=buf)
assert dci._guess_format(stream) == InputFormat.HTML
doc_path = Path("./tests/data/html/wiki_duck.html")
assert dci._guess_format(doc_path) == InputFormat.HTML
html_str = ( # HTML starting with a script
"<script>\nconsole.log('foo');\n</script>"
'<!doctype html>\n<html lang="en-us class="no-js"></html>'
)
stream = DocumentStream(name="lorem_ipsum", stream=BytesIO(f"{html_str}".encode()))
assert dci._guess_format(stream) == InputFormat.HTML
# Valid MD
buf = BytesIO(Path("./tests/data/md/wiki.md").open("rb").read())
stream = DocumentStream(name="wiki.md", stream=buf)
assert dci._guess_format(stream) == InputFormat.MD
doc_path = Path("./tests/data/md/wiki.md")
assert dci._guess_format(doc_path) == InputFormat.MD
# Valid CSV
buf = BytesIO(Path("./tests/data/csv/csv-comma.csv").open("rb").read())
stream = DocumentStream(name="csv-comma.csv", stream=buf)
assert dci._guess_format(stream) == InputFormat.CSV
stream = DocumentStream(name="test-comma", stream=buf)
assert dci._guess_format(stream) == InputFormat.CSV
doc_path = Path("./tests/data/csv/csv-comma.csv")
assert dci._guess_format(doc_path) == InputFormat.CSV
# Valid XML USPTO patent
buf = BytesIO(Path("./tests/data/uspto/ipa20110039701.xml").open("rb").read())
stream = DocumentStream(name="ipa20110039701.xml", stream=buf)
assert dci._guess_format(stream) == InputFormat.XML_USPTO
doc_path = Path("./tests/data/uspto/ipa20110039701.xml")
assert dci._guess_format(doc_path) == InputFormat.XML_USPTO
buf = BytesIO(Path("./tests/data/uspto/pftaps057006474.txt").open("rb").read())
stream = DocumentStream(name="pftaps057006474.txt", stream=buf)
assert dci._guess_format(stream) == InputFormat.XML_USPTO
doc_path = Path("./tests/data/uspto/pftaps057006474.txt")
assert dci._guess_format(doc_path) == InputFormat.XML_USPTO
# Valid XML JATS
buf = BytesIO(Path("./tests/data/jats/elife-56337.xml").open("rb").read())
stream = DocumentStream(name="elife-56337.xml", stream=buf)
assert dci._guess_format(stream) == InputFormat.XML_JATS
doc_path = Path("./tests/data/jats/elife-56337.xml")
assert dci._guess_format(doc_path) == InputFormat.XML_JATS
buf = BytesIO(Path("./tests/data/jats/elife-56337.nxml").open("rb").read())
stream = DocumentStream(name="elife-56337.nxml", stream=buf)
assert dci._guess_format(stream) == InputFormat.XML_JATS
doc_path = Path("./tests/data/jats/elife-56337.nxml")
assert dci._guess_format(doc_path) == InputFormat.XML_JATS
buf = BytesIO(Path("./tests/data/jats/elife-56337.txt").open("rb").read())
stream = DocumentStream(name="elife-56337.txt", stream=buf)
assert dci._guess_format(stream) == InputFormat.XML_JATS
doc_path = Path("./tests/data/jats/elife-56337.txt")
assert dci._guess_format(doc_path) == InputFormat.XML_JATS
# Valid XML, non-supported flavor
xml_content = (
'<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE docling_test SYSTEM '
'"test.dtd"><docling>Docling parses documents</docling>'
)
doc_path = temp_dir / "docling_test.xml"
doc_path.write_text(xml_content, encoding="utf-8")
assert dci._guess_format(doc_path) is None
buf = BytesIO(Path(doc_path).open("rb").read())
stream = DocumentStream(name="docling_test.xml", stream=buf)
assert dci._guess_format(stream) is None
# Invalid USPTO patent (as plain text)
stream = DocumentStream(name="pftaps057006474.txt", stream=BytesIO(b"xyz"))
assert dci._guess_format(stream) is None
doc_path = temp_dir / "pftaps_wrong.txt"
doc_path.write_text("xyz", encoding="utf-8")
assert dci._guess_format(doc_path) is None
# Valid WebVTT
buf = BytesIO(Path("./tests/data/webvtt/webvtt_example_01.vtt").open("rb").read())
stream = DocumentStream(name="webvtt_example_01.vtt", stream=buf)
assert dci._guess_format(stream) == InputFormat.VTT
# Valid Docling JSON
test_str = '{"name": ""}'
stream = DocumentStream(name="test.json", stream=BytesIO(f"{test_str}".encode()))
assert dci._guess_format(stream) == InputFormat.JSON_DOCLING
doc_path = temp_dir / "test.json"
doc_path.write_text(test_str, encoding="utf-8")
assert dci._guess_format(doc_path) == InputFormat.JSON_DOCLING
# Non-Docling JSON
# TODO: Docling JSON is currently the single supported JSON flavor and the pipeline
# will try to validate *any* JSON (based on suffix/MIME) as Docling JSON; proper
# disambiguation seen as part of https://github.com/docling-project/docling/issues/802
test_str = "{}"
stream = DocumentStream(name="test.json", stream=BytesIO(f"{test_str}".encode()))
assert dci._guess_format(stream) == InputFormat.JSON_DOCLING
doc_path = temp_dir / "test.json"
doc_path.write_text(test_str, encoding="utf-8")
assert dci._guess_format(doc_path) == InputFormat.JSON_DOCLING
def _make_input_doc(path):
in_doc = InputDocument(
path_or_stream=path,
format=InputFormat.PDF,
backend=PdfFormatOption().backend, # use default
)
return in_doc
def _make_input_doc_from_stream(doc_stream):
in_doc = InputDocument(
path_or_stream=doc_stream.stream,
format=InputFormat.PDF,
filename=doc_stream.name,
backend=PdfFormatOption().backend, # use default
)
return in_doc
def test_tiff_two_pages():
tiff_path = Path("./tests/data/tiff/2206.01062.tif")
doc = InputDocument(
path_or_stream=tiff_path,
format=InputFormat.IMAGE,
backend=ImageFormatOption().backend, # use default backend
)
assert doc.valid is True
assert doc.page_count == 2
# Expect two full-page rectangles
rects_page1 = doc._backend.load_page(0).get_bitmap_rects()
rects_page2 = doc._backend.load_page(1).get_bitmap_rects()
page1_rect = next(rects_page1)
page2_rect = next(rects_page2)
assert page1_rect.t == page2_rect.t == 0
assert page1_rect.l == page2_rect.l == 0
assert page1_rect.r == page2_rect.r == 612.0
assert page1_rect.b == page2_rect.b == 792.0
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/tests/test_cli.py | tests/test_cli.py | from pathlib import Path
from typer.testing import CliRunner
from docling.cli.main import app
runner = CliRunner()
def test_cli_help():
result = runner.invoke(app, ["--help"])
assert result.exit_code == 0
def test_cli_version():
result = runner.invoke(app, ["--version"])
assert result.exit_code == 0
def test_cli_convert(tmp_path):
source = "./tests/data/pdf/2305.03393v1-pg9.pdf"
output = tmp_path / "out"
output.mkdir()
result = runner.invoke(app, [source, "--output", str(output)])
assert result.exit_code == 0
converted = output / f"{Path(source).stem}.md"
assert converted.exists()
def test_cli_audio_auto_detection(tmp_path):
"""Test that CLI automatically detects audio files and sets ASR pipeline."""
from docling.datamodel.base_models import FormatToExtensions, InputFormat
# Create a dummy audio file for testing
audio_file = tmp_path / "test_audio.mp3"
audio_file.write_bytes(b"dummy audio content")
output = tmp_path / "out"
output.mkdir()
# Test that audio file triggers ASR pipeline auto-detection
result = runner.invoke(app, [str(audio_file), "--output", str(output)])
# The command should succeed (even if ASR fails due to dummy content)
# The key is that it should attempt ASR processing, not standard processing
assert (
result.exit_code == 0 or result.exit_code == 1
) # Allow for ASR processing failure
def test_cli_explicit_pipeline_not_overridden(tmp_path):
"""Test that explicit pipeline choice is not overridden by audio auto-detection."""
from docling.datamodel.base_models import FormatToExtensions, InputFormat
# Create a dummy audio file for testing
audio_file = tmp_path / "test_audio.mp3"
audio_file.write_bytes(b"dummy audio content")
output = tmp_path / "out"
output.mkdir()
# Test that explicit --pipeline STANDARD is not overridden
result = runner.invoke(
app, [str(audio_file), "--output", str(output), "--pipeline", "standard"]
)
# Should still use standard pipeline despite audio file
assert (
result.exit_code == 0 or result.exit_code == 1
) # Allow for processing failure
def test_cli_audio_extensions_coverage():
"""Test that all audio extensions from FormatToExtensions are covered."""
from docling.datamodel.base_models import FormatToExtensions, InputFormat
# Verify that the centralized audio extensions include all expected formats
audio_extensions = FormatToExtensions[InputFormat.AUDIO]
expected_extensions = [
"wav",
"mp3",
"m4a",
"aac",
"ogg",
"flac",
"mp4",
"avi",
"mov",
]
for ext in expected_extensions:
assert ext in audio_extensions, (
f"Audio extension {ext} not found in FormatToExtensions[InputFormat.AUDIO]"
)
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/tests/test_backend_pdfium.py | tests/test_backend_pdfium.py | from pathlib import Path
import pytest
from docling_core.types.doc import BoundingBox
from docling.backend.pypdfium2_backend import (
PyPdfiumDocumentBackend,
PyPdfiumPageBackend,
)
from docling.datamodel.base_models import InputFormat
from docling.datamodel.document import InputDocument
from docling.datamodel.pipeline_options import PdfPipelineOptions
from docling.document_converter import DocumentConverter, PdfFormatOption
@pytest.fixture
def test_doc_path():
return Path("./tests/data/pdf/2206.01062.pdf")
def _get_backend(pdf_doc):
in_doc = InputDocument(
path_or_stream=pdf_doc,
format=InputFormat.PDF,
backend=PyPdfiumDocumentBackend,
)
doc_backend = in_doc._backend
return doc_backend
def test_get_text_from_rect_rotated():
pdf_doc = Path("./tests/data_scanned/sample_with_rotation_mismatch.pdf")
pipeline_options = PdfPipelineOptions()
pipeline_options.do_ocr = True
doc_converter = DocumentConverter(
format_options={
InputFormat.PDF: PdfFormatOption(
pipeline_options=pipeline_options, backend=PyPdfiumDocumentBackend
)
}
)
conv_res = doc_converter.convert(pdf_doc)
assert "1972" in conv_res.document.export_to_markdown()
def test_text_cell_counts():
pdf_doc = Path("./tests/data/pdf/redp5110_sampled.pdf")
doc_backend = _get_backend(pdf_doc)
for page_index in range(doc_backend.page_count()):
last_cell_count = None
for i in range(10):
page_backend: PyPdfiumPageBackend = doc_backend.load_page(0)
cells = list(page_backend.get_text_cells())
if last_cell_count is None:
last_cell_count = len(cells)
if len(cells) != last_cell_count:
assert False, (
"Loading page multiple times yielded non-identical text cell counts"
)
last_cell_count = len(cells)
def test_get_text_from_rect(test_doc_path):
doc_backend = _get_backend(test_doc_path)
page_backend: PyPdfiumPageBackend = doc_backend.load_page(0)
# Get the title text of the DocLayNet paper
textpiece = page_backend.get_text_in_rect(
bbox=BoundingBox(l=102, t=77, r=511, b=124)
)
ref = "DocLayNet: A Large Human-Annotated Dataset for\r\nDocument-Layout Analysis"
assert textpiece.strip() == ref
def test_crop_page_image(test_doc_path):
doc_backend = _get_backend(test_doc_path)
page_backend: PyPdfiumPageBackend = doc_backend.load_page(0)
# Crop out "Figure 1" from the DocLayNet paper
page_backend.get_page_image(
scale=2, cropbox=BoundingBox(l=317, t=246, r=574, b=527)
)
# im.show()
def test_num_pages(test_doc_path):
doc_backend = _get_backend(test_doc_path)
doc_backend.page_count() == 9
def test_merge_row():
pdf_doc = Path("./tests/data/pdf/multi_page.pdf")
doc_backend = _get_backend(pdf_doc)
page_backend: PyPdfiumPageBackend = doc_backend.load_page(4)
cell = page_backend.get_text_cells()[0]
assert (
cell.text
== "The journey of the word processor—from clunky typewriters to AI-powered platforms—"
)
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/tests/test_backend_patent_uspto.py | tests/test_backend_patent_uspto.py | """Test methods in module docling.backend.patent_uspto_backend.py."""
import logging
import os
from pathlib import Path
from tempfile import NamedTemporaryFile
import pytest
from docling_core.types import DoclingDocument
from docling_core.types.doc import DocItemLabel, TableData, TextItem
from docling.backend.xml.uspto_backend import PatentUsptoDocumentBackend, XmlTable
from docling.datamodel.base_models import InputFormat
from docling.datamodel.document import InputDocument
from .test_data_gen_flag import GEN_TEST_DATA
from .verify_utils import CONFID_PREC, COORD_PREC, verify_document
GENERATE: bool = GEN_TEST_DATA
DATA_PATH: Path = Path("./tests/data/uspto/")
GT_PATH: Path = Path("./tests/data/groundtruth/docling_v2/")
def _generate_groundtruth(doc: DoclingDocument, file_stem: str) -> None:
with open(GT_PATH / f"{file_stem}.itxt", "w", encoding="utf-8") as file_obj:
file_obj.write(doc._export_to_indented_text())
doc.save_as_json(
GT_PATH / f"{file_stem}.json",
coord_precision=COORD_PREC,
confid_precision=CONFID_PREC,
)
doc.save_as_markdown(GT_PATH / f"{file_stem}.md")
@pytest.fixture(scope="module")
def patents() -> list[tuple[Path, DoclingDocument]]:
patent_paths = (
sorted(DATA_PATH.glob("ip*.xml"))
+ sorted(DATA_PATH.glob("pg*.xml"))
+ sorted(DATA_PATH.glob("pa*.xml"))
+ sorted(DATA_PATH.glob("pftaps*.txt"))
)
patents: list[dict[Path, DoclingDocument]] = []
for in_path in patent_paths:
in_doc = InputDocument(
path_or_stream=in_path,
format=InputFormat.XML_USPTO,
backend=PatentUsptoDocumentBackend,
)
backend = PatentUsptoDocumentBackend(in_doc=in_doc, path_or_stream=in_path)
logging.info(f"Converting patent from file {in_path}")
doc = backend.convert()
assert doc, f"Failed to parse document {in_path}"
patents.append((in_path, doc))
return patents
@pytest.fixture(scope="module")
def groundtruth() -> list[tuple[Path, str]]:
patent_paths = (
sorted(GT_PATH.glob("ip*"))
+ sorted(GT_PATH.glob("pg*"))
+ sorted(GT_PATH.glob("pa*"))
+ sorted(GT_PATH.glob("pftaps*"))
)
groundtruth: list[tuple[Path, str]] = []
for in_path in patent_paths:
with open(in_path, encoding="utf-8") as file_obj:
content = file_obj.read()
groundtruth.append((in_path, content))
return groundtruth
@pytest.fixture(scope="module")
def tables() -> list[tuple[Path, TableData]]:
table_paths = sorted(DATA_PATH.glob("tables*.xml"))
tables: list[tuple[Path, TableData]] = []
for in_path in table_paths:
with open(in_path, encoding="utf-8") as file_obj:
content = file_obj.read()
parser = XmlTable(content)
parsed_table = parser.parse()
assert parsed_table
tables.append((in_path, parsed_table))
return tables
@pytest.mark.skip("Slow test")
def test_patent_export(patents):
for _, doc in patents:
with NamedTemporaryFile(suffix=".yaml", delete=False) as tmp_file:
doc.save_as_yaml(
Path(tmp_file.name),
coord_precision=COORD_PREC,
confid_precision=CONFID_PREC,
)
assert os.path.getsize(tmp_file.name) > 0
with NamedTemporaryFile(suffix=".html", delete=False) as tmp_file:
doc.save_as_html(Path(tmp_file.name))
assert os.path.getsize(tmp_file.name) > 0
with NamedTemporaryFile(suffix=".md", delete=False) as tmp_file:
doc.save_as_markdown(Path(tmp_file.name))
assert os.path.getsize(tmp_file.name) > 0
def test_patent_groundtruth(patents, groundtruth):
gt_stems: list[str] = [item[0].stem for item in groundtruth]
gt_names: dict[str, str] = {item[0].name: item[1] for item in groundtruth}
for path, doc in patents:
if path.stem not in gt_stems:
continue
md_name = path.stem + ".md"
if md_name in gt_names:
pred_md = doc.export_to_markdown()
assert pred_md == gt_names[md_name], (
f"Markdown file mismatch against groundtruth {md_name}"
)
json_path = path.with_suffix(".json")
if json_path.stem in gt_names:
assert verify_document(doc, str(json_path), GENERATE), (
f"JSON file mismatch against groundtruth {json_path}"
)
itxt_name = path.stem + ".itxt"
if itxt_name in gt_names:
pred_itxt = doc._export_to_indented_text()
assert pred_itxt == gt_names[itxt_name], (
f"Indented text file mismatch against groundtruth {itxt_name}"
)
def test_tables(tables):
"""Test the table parser."""
# CHECK table in file tables_20180000016.xml
file_name = "tables_ipa20180000016.xml"
file_table = next(item[1] for item in tables if item[0].name == file_name)
assert file_table.num_rows == 13
assert file_table.num_cols == 10
assert len(file_table.table_cells) == 130
def test_patent_uspto_ice(patents):
"""Test applications and grants Full Text Data/XML Version 4.x ICE."""
# CHECK application doc number 20200022300
file_name = "ipa20200022300.xml"
doc = next(item[1] for item in patents if item[0].name == file_name)
if GENERATE:
_generate_groundtruth(doc, Path(file_name).stem)
assert doc.name == file_name
texts = doc.texts
assert len(texts) == 78
assert isinstance(texts[0], TextItem)
assert (
texts[0].text
== "SYSTEM FOR CONTROLLING THE OPERATION OF AN ACTUATOR MOUNTED ON A SEED PLANTING IMPLEMENT"
)
assert texts[0].label == DocItemLabel.TITLE
assert texts[0].parent.cref == "#/body"
assert isinstance(texts[1], TextItem)
assert texts[1].text == "ABSTRACT"
assert texts[1].label == DocItemLabel.SECTION_HEADER
assert texts[1].parent.cref == "#/texts/0"
assert isinstance(texts[2], TextItem)
assert texts[2].text == (
"In one aspect, a system for controlling an operation of an actuator mounted "
"on a seed planting implement may include an actuator configured to adjust a "
"position of a row unit of the seed planting implement relative to a toolbar "
"of the seed planting implement. The system may also include a flow restrictor"
" fluidly coupled to a fluid chamber of the actuator, with the flow restrictor"
" being configured to reduce a rate at which fluid is permitted to exit the "
"fluid chamber in a manner that provides damping to the row unit. Furthermore,"
" the system may include a valve fluidly coupled to the flow restrictor in a "
"parallel relationship such that the valve is configured to permit the fluid "
"exiting the fluid chamber to flow through the flow restrictor and the fluid "
"entering the fluid chamber to bypass the flow restrictor."
)
assert texts[2].label == DocItemLabel.PARAGRAPH
assert texts[2].parent.cref == "#/texts/1"
assert isinstance(texts[3], TextItem)
assert texts[3].text == "FIELD"
assert texts[3].label == DocItemLabel.SECTION_HEADER
assert texts[3].parent.cref == "#/texts/0"
assert isinstance(texts[4], TextItem)
assert texts[4].text == (
"The present disclosure generally relates to seed planting implements and, "
"more particularly, to systems for controlling the operation of an actuator "
"mounted on a seed planting implement in a manner that provides damping to "
"one or more components of the seed planting implement."
)
assert texts[4].label == DocItemLabel.PARAGRAPH
assert texts[4].parent.cref == "#/texts/3"
assert isinstance(texts[5], TextItem)
assert texts[5].text == "BACKGROUND"
assert texts[5].label == DocItemLabel.SECTION_HEADER
assert texts[5].parent.cref == "#/texts/0"
assert isinstance(texts[6], TextItem)
assert texts[6].text == (
"Modern farming practices strive to increase yields of agricultural fields. In"
" this respect, seed planting implements are towed behind a tractor or other "
"work vehicle to deposit seeds in a field. For example, seed planting "
"implements typically include one or more ground engaging tools or openers "
"that form a furrow or trench in the soil. One or more dispensing devices of "
"the seed planting implement may, in turn, deposit seeds into the furrow(s). "
"After deposition of the seeds, a packer wheel may pack the soil on top of the"
" deposited seeds."
)
assert texts[6].label == DocItemLabel.PARAGRAPH
assert texts[6].parent.cref == "#/texts/5"
assert isinstance(texts[7], TextItem)
assert texts[7].text == (
"In certain instances, the packer wheel may also control the penetration depth"
" of the furrow. In this regard, the position of the packer wheel may be moved"
" vertically relative to the associated opener(s) to adjust the depth of the "
"furrow. Additionally, the seed planting implement includes an actuator "
"configured to exert a downward force on the opener(s) to ensure that the "
"opener(s) is able to penetrate the soil to the depth set by the packer wheel."
" However, the seed planting implement may bounce or chatter when traveling at"
" high speeds and/or when the opener(s) encounters hard or compacted soil. As "
"such, operators generally operate the seed planting implement with the "
"actuator exerting more downward force on the opener(s) than is necessary in "
"order to prevent such bouncing or chatter. Operation of the seed planting "
"implement with excessive down pressure applied to the opener(s), however, "
"reduces the overall stability of the seed planting implement."
)
assert texts[7].label == DocItemLabel.PARAGRAPH
assert texts[7].parent.cref == "#/texts/5"
assert isinstance(texts[8], TextItem)
assert texts[8].text == (
"Accordingly, an improved system for controlling the operation of an actuator "
"mounted on s seed planting implement to enhance the overall operation of the "
"implement would be welcomed in the technology."
)
assert texts[8].label == DocItemLabel.PARAGRAPH
assert texts[8].parent.cref == "#/texts/5"
assert isinstance(texts[9], TextItem)
assert texts[9].text == "BRIEF DESCRIPTION"
assert texts[9].label == DocItemLabel.SECTION_HEADER
assert texts[9].parent.cref == "#/texts/0"
assert isinstance(texts[15], TextItem)
assert texts[15].text == "BRIEF DESCRIPTION OF THE DRAWINGS"
assert texts[15].label == DocItemLabel.SECTION_HEADER
assert texts[15].parent.cref == "#/texts/0"
assert isinstance(texts[17], TextItem)
assert texts[17].text == (
"FIG. 1 illustrates a perspective view of one embodiment of a seed planting "
"implement in accordance with aspects of the present subject matter;"
)
assert texts[17].label == DocItemLabel.PARAGRAPH
assert texts[17].parent.cref == "#/texts/15"
assert isinstance(texts[27], TextItem)
assert texts[27].text == "DETAILED DESCRIPTION"
assert texts[27].label == DocItemLabel.SECTION_HEADER
assert texts[27].parent.cref == "#/texts/0"
assert isinstance(texts[57], TextItem)
assert texts[57].text == (
"This written description uses examples to disclose the technology, including "
"the best mode, and also to enable any person skilled in the art to practice "
"the technology, including making and using any devices or systems and "
"performing any incorporated methods. The patentable scope of the technology "
"is defined by the claims, and may include other examples that occur to those "
"skilled in the art. Such other examples are intended to be within the scope "
"of the claims if they include structural elements that do not differ from the"
" literal language of the claims, or if they include equivalent structural "
"elements with insubstantial differences from the literal language of the "
"claims."
)
assert texts[57].label == DocItemLabel.PARAGRAPH
assert texts[57].parent.cref == "#/texts/27"
assert isinstance(texts[58], TextItem)
assert texts[58].text == "CLAIMS"
assert texts[58].label == DocItemLabel.SECTION_HEADER
assert texts[58].parent.cref == "#/texts/0"
assert isinstance(texts[77], TextItem)
assert texts[77].text == (
"19. The system of claim 18, wherein the flow restrictor and the valve are "
"fluidly coupled in a parallel relationship."
)
assert texts[77].label == DocItemLabel.PARAGRAPH
assert texts[77].parent.cref == "#/texts/58"
# CHECK application doc number 20180000016 for HTML entities, level 2 headings, tables
file_name = "ipa20180000016.xml"
doc = next(item[1] for item in patents if item[0].name == file_name)
if GENERATE:
_generate_groundtruth(doc, Path(file_name).stem)
assert doc.name == file_name
texts = doc.texts
assert len(texts) == 183
assert isinstance(texts[0], TextItem)
assert texts[0].text == "LIGHT EMITTING DEVICE AND PLANT CULTIVATION METHOD"
assert texts[0].label == DocItemLabel.TITLE
assert texts[0].parent.cref == "#/body"
assert isinstance(texts[1], TextItem)
assert texts[1].text == "ABSTRACT"
assert texts[1].label == DocItemLabel.SECTION_HEADER
assert texts[1].parent.cref == "#/texts/0"
assert isinstance(texts[2], TextItem)
assert texts[2].text == (
"Provided is a light emitting device that includes a light emitting element "
"having a light emission peak wavelength ranging from 380 nm to 490 nm, and a "
"fluorescent material excited by light from the light emitting element and "
"emitting light having at a light emission peak wavelength ranging from 580 nm"
" or more to less than 680 nm. The light emitting device emits light having a "
"ratio R/B of a photon flux density R to a photon flux density B ranging from "
"2.0 to 4.0 and a ratio R/FR of the photon flux density R to a photon flux "
"density FR ranging from 0.7 to 13.0, the photon flux density R being in a "
"wavelength range of 620 nm or more and less than 700 nm, the photon flux "
"density B being in a wavelength range of 380 nm or more and 490 nm or less, "
"and the photon flux density FR being in a wavelength range of 700 nm or more "
"and 780 nm or less."
)
assert isinstance(texts[3], TextItem)
assert texts[3].text == "CROSS-REFERENCE TO RELATED APPLICATION"
assert texts[3].label == DocItemLabel.SECTION_HEADER
assert texts[3].parent.cref == "#/texts/0"
assert isinstance(texts[4], TextItem)
assert texts[5].text == "BACKGROUND"
assert texts[5].label == DocItemLabel.SECTION_HEADER
assert texts[5].parent.cref == "#/texts/0"
assert isinstance(texts[6], TextItem)
assert texts[6].text == "Technical Field"
assert texts[6].label == DocItemLabel.SECTION_HEADER
assert texts[6].parent.cref == "#/texts/0"
assert isinstance(texts[7], TextItem)
assert texts[7].text == (
"The present disclosure relates to a light emitting device and a plant "
"cultivation method."
)
assert texts[7].label == DocItemLabel.PARAGRAPH
assert texts[7].parent.cref == "#/texts/6"
assert isinstance(texts[8], TextItem)
assert texts[8].text == "Description of Related Art"
assert texts[8].label == DocItemLabel.SECTION_HEADER
assert texts[8].parent.cref == "#/texts/0"
assert isinstance(texts[63], TextItem)
assert texts[63].text == (
"wherein r, s, and t are numbers satisfying 0≦r≦1.0, 0≦s≦1.0, 0<t<1.0, and "
"r+s+t≦1.0."
)
assert texts[63].label == DocItemLabel.PARAGRAPH
assert texts[63].parent.cref == "#/texts/51"
assert isinstance(texts[89], TextItem)
assert texts[89].text == (
"Examples of the compound containing Al, Ga, or In specifically include Al₂O₃, "
"Ga₂O₃, and In₂O₃."
)
assert texts[89].label == DocItemLabel.PARAGRAPH
assert texts[89].parent.cref == "#/texts/87"
# CHECK application doc number 20110039701 for complex long tables
file_name = "ipa20110039701.xml"
doc = next(item[1] for item in patents if item[0].name == file_name)
assert doc.name == file_name
assert len(doc.tables) == 17
def test_patent_uspto_grant_v2(patents):
"""Test applications and grants Full Text Data/APS."""
# CHECK application doc number 06442728
file_name = "pg06442728.xml"
doc = next(item[1] for item in patents if item[0].name == file_name)
if GENERATE:
_generate_groundtruth(doc, Path(file_name).stem)
assert doc.name == file_name
texts = doc.texts
assert len(texts) == 108
assert isinstance(texts[0], TextItem)
assert texts[0].text == "Methods and apparatus for turbo code"
assert texts[0].label == DocItemLabel.TITLE
assert texts[0].parent.cref == "#/body"
assert isinstance(texts[1], TextItem)
assert texts[1].text == "ABSTRACT"
assert texts[1].label == DocItemLabel.SECTION_HEADER
assert texts[1].parent.cref == "#/texts/0"
assert isinstance(texts[2], TextItem)
assert texts[2].text == (
"An interleaver receives incoming data frames of size N. The interleaver "
"indexes the elements of the frame with an N₁×N₂ index array. The interleaver " # noqa: RUF001
"then effectively rearranges (permutes) the data by permuting the rows of the "
"index array. The interleaver employs the equation I(j,k)=I(j,αjk+βj)modP) to " # noqa: RUF001
"permute the columns (indexed by k) of each row (indexed by j). P is at least "
"equal to N₂, βj is a constant which may be different for each row, and each "
"αj is a relative prime number relative to P. After permuting, the " # noqa: RUF001
"interleaver outputs the data in a different order than received (e.g., "
"receives sequentially row by row, outputs sequentially each column by column)."
)
# check that the formula has been skipped
assert texts[43].text == (
"Calculating the specified equation with the specified values for permuting "
"row 0 of array D 350 into row 0 of array D₁ 360 proceeds as:"
)
assert texts[44].text == (
"and the permuted data frame is contained in array D₁ 360 shown in FIG. 3. "
"Outputting the array column by column outputs the frame elements in the "
"order:"
)
def test_patent_uspto_app_v1(patents):
"""Test applications Full Text Data/XML Version 1.x."""
# CHECK application doc number 20010031492
file_name = "pa20010031492.xml"
doc = next(item[1] for item in patents if item[0].name == file_name)
if GENERATE:
_generate_groundtruth(doc, Path(file_name).stem)
assert doc.name == file_name
texts = doc.texts
assert len(texts) == 103
assert isinstance(texts[0], TextItem)
assert texts[0].text == "Assay reagent"
assert texts[0].label == DocItemLabel.TITLE
assert texts[0].parent.cref == "#/body"
assert isinstance(texts[1], TextItem)
assert texts[1].text == "ABSTRACT"
assert texts[1].label == DocItemLabel.SECTION_HEADER
assert texts[1].parent.cref == "#/texts/0"
# check that the formula has been skipped
assert texts[62].text == (
"5. The % toxic effect for each sample was calculated as follows:"
)
assert texts[63].text == "where: Cₒ=light in control at time zero"
assert len(doc.tables) == 1
assert doc.tables[0].data.num_rows == 6
assert doc.tables[0].data.num_cols == 3
def test_patent_uspto_grant_aps(patents):
"""Test applications Full Text Data/APS."""
# CHECK application doc number 057006474
file_name = "pftaps057006474.txt"
doc = next(item[1] for item in patents if item[0].name == file_name)
if GENERATE:
_generate_groundtruth(doc, Path(file_name).stem)
assert doc.name == file_name
texts = doc.texts
assert len(texts) == 75
assert isinstance(texts[0], TextItem)
assert texts[0].text == "Carbocation containing cyanine-type dye"
assert texts[0].label == DocItemLabel.TITLE
assert texts[0].parent.cref == "#/body"
assert isinstance(texts[1], TextItem)
assert texts[1].text == "ABSTRACT"
assert texts[1].label == DocItemLabel.SECTION_HEADER
assert texts[1].parent.cref == "#/texts/0"
assert isinstance(texts[2], TextItem)
assert texts[2].text == (
"To provide a reagent with excellent stability under storage, which can detect"
" a subject compound to be measured with higher specificity and sensitibity. "
"Complexes of a compound represented by the general formula (IV):"
)
assert len(doc.tables) == 0
for item in texts:
assert "##STR1##" not in item.text
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/tests/test_ocr_utils.py | tests/test_ocr_utils.py | from typing import Tuple
import pytest
from docling_core.types.doc import BoundingBox, CoordOrigin
from docling_core.types.doc.page import BoundingRectangle
from docling.utils.orientation import rotate_bounding_box
IM_SIZE = (4, 5)
BBOX = BoundingBox(l=1, t=3, r=3, b=4, coord_origin=CoordOrigin.TOPLEFT)
RECT = BoundingRectangle(
r_x0=1,
r_y0=4,
r_x1=3,
r_y1=4,
r_x2=3,
r_y2=3,
r_x3=1,
r_y3=3,
coord_origin=CoordOrigin.TOPLEFT,
)
RECT_90 = BoundingRectangle(
r_x0=4,
r_y0=3,
r_x1=4,
r_y1=1,
r_x2=3,
r_y2=1,
r_x3=3,
r_y3=3,
coord_origin=CoordOrigin.TOPLEFT,
)
RECT_180 = BoundingRectangle(
r_x0=3,
r_y0=1,
r_x1=1,
r_y1=1,
r_x2=1,
r_y2=2,
r_x3=3,
r_y3=2,
coord_origin=CoordOrigin.TOPLEFT,
)
RECT_270 = BoundingRectangle(
r_x0=1,
r_y0=1,
r_x1=1,
r_y1=3,
r_x2=2,
r_y2=3,
r_x3=2,
r_y3=1,
coord_origin=CoordOrigin.TOPLEFT,
)
@pytest.mark.parametrize(
["bbox", "im_size", "angle", "expected_rectangle"],
[
# (BBOX, IM_SIZE, 0, RECT),
# (BBOX, IM_SIZE, 90, RECT_90),
(BBOX, IM_SIZE, 180, RECT_180),
# (BBOX, IM_SIZE, 270, RECT_270),
# (BBOX, IM_SIZE, 360, RECT),
# (BBOX, IM_SIZE, -90, RECT_270),
(BBOX, IM_SIZE, -180, RECT_180),
# (BBOX, IM_SIZE, -270, RECT_90),
],
)
def test_rotate_bounding_box(
bbox: BoundingBox,
im_size: Tuple[int, int],
angle: int,
expected_rectangle: BoundingRectangle,
):
rotated = rotate_bounding_box(bbox, angle, im_size)
assert rotated == expected_rectangle
expected_angle_360 = angle % 360
assert rotated.angle_360 == expected_angle_360
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/tests/test_legacy_format_transform.py | tests/test_legacy_format_transform.py | import json
from pathlib import Path
import pytest
from docling.datamodel.base_models import InputFormat
from docling.datamodel.pipeline_options import PdfPipelineOptions
from docling.document_converter import DocumentConverter, PdfFormatOption
@pytest.fixture
def test_doc_paths():
return [
Path("tests/data/html/wiki_duck.html"),
Path("tests/data/docx/word_sample.docx"),
Path("tests/data/docx/lorem_ipsum.docx"),
Path("tests/data/pptx/powerpoint_sample.pptx"),
Path("tests/data/2305.03393v1-pg9-img.png"),
Path("tests/data/pdf/2206.01062.pdf"),
]
def get_converter():
pipeline_options = PdfPipelineOptions()
pipeline_options.do_ocr = False
converter = DocumentConverter(
format_options={
InputFormat.PDF: PdfFormatOption(pipeline_options=pipeline_options),
InputFormat.IMAGE: PdfFormatOption(
pipeline_options=pipeline_options,
),
}
)
return converter
@pytest.mark.skip(reason="Legacy format transform is deprecated")
def test_compare_legacy_output(test_doc_paths):
converter = get_converter()
res = converter.convert_all(test_doc_paths, raises_on_error=True)
for conv_res in res:
print(f"Results for {conv_res.input.file}")
with pytest.warns(DeprecationWarning, match="Use document instead"):
print(
json.dumps(
conv_res.legacy_document.model_dump(
mode="json", by_alias=True, exclude_none=True
)
)
)
# assert res.legacy_output == res.legacy_output_transformed
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/tests/test_backend_docling_parse.py | tests/test_backend_docling_parse.py | from pathlib import Path
import pytest
from docling_core.types.doc import BoundingBox
from docling.backend.docling_parse_backend import (
DoclingParseDocumentBackend,
DoclingParsePageBackend,
)
from docling.datamodel.base_models import InputFormat
from docling.datamodel.document import InputDocument
@pytest.fixture
def test_doc_path():
return Path("./tests/data/pdf/2206.01062.pdf")
def _get_backend(pdf_doc):
in_doc = InputDocument(
path_or_stream=pdf_doc,
format=InputFormat.PDF,
backend=DoclingParseDocumentBackend,
)
doc_backend = in_doc._backend
return doc_backend
def test_text_cell_counts():
pdf_doc = Path("./tests/data/pdf/redp5110_sampled.pdf")
doc_backend = _get_backend(pdf_doc)
for page_index in range(doc_backend.page_count()):
last_cell_count = None
for i in range(10):
page_backend: DoclingParsePageBackend = doc_backend.load_page(0)
cells = list(page_backend.get_text_cells())
if last_cell_count is None:
last_cell_count = len(cells)
if len(cells) != last_cell_count:
assert False, (
"Loading page multiple times yielded non-identical text cell counts"
)
last_cell_count = len(cells)
def test_get_text_from_rect(test_doc_path):
doc_backend = _get_backend(test_doc_path)
page_backend: DoclingParsePageBackend = doc_backend.load_page(0)
# Get the title text of the DocLayNet paper
textpiece = page_backend.get_text_in_rect(
bbox=BoundingBox(l=102, t=77, r=511, b=124)
)
ref = "DocLayNet: A Large Human-Annotated Dataset for Document-Layout Analysis"
assert textpiece.strip() == ref
def test_crop_page_image(test_doc_path):
doc_backend = _get_backend(test_doc_path)
page_backend: DoclingParsePageBackend = doc_backend.load_page(0)
# Crop out "Figure 1" from the DocLayNet paper
page_backend.get_page_image(
scale=2, cropbox=BoundingBox(l=317, t=246, r=574, b=527)
)
# im.show()
def test_num_pages(test_doc_path):
doc_backend = _get_backend(test_doc_path)
doc_backend.page_count() == 9
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/tests/test_conversion_result_json.py | tests/test_conversion_result_json.py | from io import BytesIO
from pathlib import Path
import pytest
from docling.backend.pypdfium2_backend import (
PyPdfiumDocumentBackend,
PyPdfiumPageBackend,
)
from docling.datamodel.base_models import ConversionStatus, InputFormat
from docling.datamodel.document import ConversionAssets
from docling.datamodel.pipeline_options import PdfPipelineOptions
from docling.document_converter import DocumentConverter, PdfFormatOption
def test_conversion_result_json_roundtrip_string():
pdf_doc = Path("./tests/data/pdf/redp5110_sampled.pdf")
pipeline_options = PdfPipelineOptions()
pipeline_options.do_ocr = False
pipeline_options.images_scale = 1.0
pipeline_options.generate_page_images = False
pipeline_options.do_table_structure = False
pipeline_options.table_structure_options.do_cell_matching = True
pipeline_options.generate_parsed_pages = True
doc_converter = DocumentConverter(
format_options={
InputFormat.PDF: PdfFormatOption(
pipeline_options=pipeline_options, backend=PyPdfiumDocumentBackend
)
}
)
conv_res = doc_converter.convert(pdf_doc)
fpath: Path = Path("./test-conversion.zip")
conv_res.save(filename=fpath) # returns string when no filename is given
# assert isinstance(json_str, str) and len(json_str) > 0
loaded = ConversionAssets.load(filename=fpath)
assert loaded.status == conv_res.status
assert loaded.document.name == conv_res.document.name
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/tests/__init__.py | tests/__init__.py | python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false | |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/tests/test_backend_jats.py | tests/test_backend_jats.py | import os
from io import BytesIO
from pathlib import Path
from docling_core.types.doc import DoclingDocument
from docling.datamodel.base_models import DocumentStream, InputFormat
from docling.datamodel.document import ConversionResult
from docling.document_converter import DocumentConverter
from .test_data_gen_flag import GEN_TEST_DATA
from .verify_utils import verify_document, verify_export
GENERATE = GEN_TEST_DATA
def get_jats_paths():
directory = Path(os.path.dirname(__file__) + "/data/jats/")
xml_files = sorted(directory.rglob("*.nxml"))
return xml_files
def get_converter():
converter = DocumentConverter(allowed_formats=[InputFormat.XML_JATS])
return converter
def test_e2e_jats_conversions(use_stream=False):
jats_paths = get_jats_paths()
converter = get_converter()
for jats_path in jats_paths:
gt_path = (
jats_path.parent.parent / "groundtruth" / "docling_v2" / jats_path.name
)
if use_stream:
buf = BytesIO(jats_path.open("rb").read())
stream = DocumentStream(name=jats_path.name, stream=buf)
conv_result: ConversionResult = converter.convert(stream)
else:
conv_result: ConversionResult = converter.convert(jats_path)
doc: DoclingDocument = conv_result.document
pred_md: str = doc.export_to_markdown()
assert verify_export(pred_md, str(gt_path) + ".md"), "export to md"
pred_itxt: str = doc._export_to_indented_text(
max_text_len=70, explicit_tables=False
)
assert verify_export(pred_itxt, str(gt_path) + ".itxt"), (
"export to indented-text"
)
assert verify_document(doc, str(gt_path) + ".json", GENERATE), "export to json"
def test_e2e_jats_conversions_stream():
test_e2e_jats_conversions(use_stream=True)
def test_e2e_jats_conversions_no_stream():
test_e2e_jats_conversions(use_stream=False)
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/tests/test_e2e_conversion.py | tests/test_e2e_conversion.py | from pathlib import Path
from docling.datamodel.accelerator_options import AcceleratorDevice
from docling.datamodel.base_models import InputFormat
from docling.datamodel.document import ConversionResult
from docling.datamodel.pipeline_options import PdfPipelineOptions
from docling.document_converter import DocumentConverter, PdfFormatOption
from .test_data_gen_flag import GEN_TEST_DATA
from .verify_utils import verify_conversion_result_v2
GENERATE_V2 = GEN_TEST_DATA
SKIP_DOCTAGS_COMPARISON = ["2203.01017v2.pdf"]
def get_pdf_paths():
# Define the directory you want to search
directory = Path("./tests/data/pdf/")
# List all PDF files in the directory and its subdirectories
pdf_files = sorted(directory.rglob("*.pdf"))
return pdf_files
def get_converter():
pipeline_options = PdfPipelineOptions()
pipeline_options.do_ocr = False
pipeline_options.do_table_structure = True
pipeline_options.table_structure_options.do_cell_matching = True
pipeline_options.accelerator_options.device = AcceleratorDevice.CPU
pipeline_options.generate_parsed_pages = True
converter = DocumentConverter(
format_options={
InputFormat.PDF: PdfFormatOption(
pipeline_options=pipeline_options,
backend=PdfFormatOption().backend,
)
}
)
return converter
def test_e2e_pdfs_conversions():
pdf_paths = get_pdf_paths()
converter = get_converter()
for pdf_path in pdf_paths:
print(f"converting {pdf_path}")
doc_result: ConversionResult = converter.convert(pdf_path)
# Decide if to skip doctags comparison
verify_doctags = pdf_path.name not in SKIP_DOCTAGS_COMPARISON
verify_conversion_result_v2(
input_path=pdf_path,
doc_result=doc_result,
generate=GENERATE_V2,
verify_doctags=verify_doctags,
)
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/tests/test_code_formula.py | tests/test_code_formula.py | from pathlib import Path
from docling_core.types.doc import CodeItem, TextItem
from docling_core.types.doc.labels import CodeLanguageLabel, DocItemLabel
from docling.datamodel.base_models import InputFormat
from docling.datamodel.document import ConversionResult
from docling.datamodel.pipeline_options import PdfPipelineOptions
from docling.document_converter import DocumentConverter, PdfFormatOption
from docling.pipeline.standard_pdf_pipeline import StandardPdfPipeline
def get_converter():
pipeline_options = PdfPipelineOptions()
pipeline_options.generate_page_images = True
pipeline_options.do_ocr = False
pipeline_options.do_table_structure = False
pipeline_options.do_code_enrichment = True
pipeline_options.do_formula_enrichment = True
converter = DocumentConverter(
format_options={
InputFormat.PDF: PdfFormatOption(
pipeline_cls=StandardPdfPipeline,
pipeline_options=pipeline_options,
)
}
)
return converter
def test_code_and_formula_conversion():
pdf_path = Path("tests/data/pdf/code_and_formula.pdf")
converter = get_converter()
print(f"converting {pdf_path}")
doc_result: ConversionResult = converter.convert(pdf_path)
results = doc_result.document.texts
code_blocks = [el for el in results if isinstance(el, CodeItem)]
assert len(code_blocks) == 1
gt = "function add(a, b) {\n return a + b;\n}\nconsole.log(add(3, 5));"
predicted = code_blocks[0].text.strip()
assert predicted == gt, f"mismatch in text {predicted=}, {gt=}"
assert code_blocks[0].code_language == CodeLanguageLabel.JAVASCRIPT
formula_blocks = [
el
for el in results
if isinstance(el, TextItem) and el.label == DocItemLabel.FORMULA
]
assert len(formula_blocks) == 1
gt = "a ^ { 2 } + 8 = 1 2"
predicted = formula_blocks[0].text
assert predicted == gt, f"mismatch in text {predicted=}, {gt=}"
def test_formula_conversion_with_page_range():
pdf_path = Path("tests/data/pdf/code_and_formula.pdf")
converter = get_converter()
print(f"converting {pdf_path} with page range")
doc_result: ConversionResult = converter.convert(pdf_path, page_range=(2, 2))
results = doc_result.document.texts
formula_blocks = [
el
for el in results
if isinstance(el, TextItem) and el.label == DocItemLabel.FORMULA
]
assert len(formula_blocks) == 1
gt = "a ^ { 2 } + 8 = 1 2"
predicted = formula_blocks[0].text
assert predicted == gt, f"mismatch in text {predicted=}, {gt=}"
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/tests/test_asr_pipeline.py | tests/test_asr_pipeline.py | import sys
from pathlib import Path
from unittest.mock import Mock, patch
import pytest
from docling.datamodel import asr_model_specs
from docling.datamodel.base_models import ConversionStatus, InputFormat
from docling.datamodel.document import ConversionResult, InputDocument
from docling.datamodel.pipeline_options import AsrPipelineOptions
from docling.document_converter import AudioFormatOption, DocumentConverter
from docling.pipeline.asr_pipeline import AsrPipeline
pytestmark = pytest.mark.skipif(
sys.version_info >= (3, 14),
reason="Python 3.14 is not yet supported by whisper dependencies.",
)
@pytest.fixture
def test_audio_path():
return Path("./tests/data/audio/sample_10s.mp3")
def get_asr_converter():
"""Create a DocumentConverter configured for ASR with whisper_turbo model."""
pipeline_options = AsrPipelineOptions()
pipeline_options.asr_options = asr_model_specs.WHISPER_TINY
converter = DocumentConverter(
format_options={
InputFormat.AUDIO: AudioFormatOption(
pipeline_cls=AsrPipeline,
pipeline_options=pipeline_options,
)
}
)
return converter
def test_asr_pipeline_conversion(test_audio_path):
"""Test ASR pipeline conversion using whisper_turbo model on sample_10s.mp3."""
# Check if the test audio file exists
assert test_audio_path.exists(), f"Test audio file not found: {test_audio_path}"
converter = get_asr_converter()
# Convert the audio file
doc_result: ConversionResult = converter.convert(test_audio_path)
# Verify conversion was successful
assert doc_result.status == ConversionStatus.SUCCESS, (
f"Conversion failed with status: {doc_result.status}"
)
# Verify we have a document
assert doc_result.document is not None, "No document was created"
# Verify we have text content (transcribed audio)
texts = doc_result.document.texts
assert len(texts) > 0, "No text content found in transcribed audio"
# Print transcribed text for verification (optional, for debugging)
print(f"Transcribed text from {test_audio_path.name}:")
for i, text_item in enumerate(texts):
print(f" {i + 1}: {text_item.text}")
@pytest.fixture
def silent_audio_path():
"""Fixture to provide the path to a silent audio file."""
path = Path("./tests/data/audio/silent_1s.wav")
if not path.exists():
pytest.skip("Silent audio file for testing not found at " + str(path))
return path
def test_asr_pipeline_with_silent_audio(silent_audio_path):
"""
Test that the ASR pipeline correctly handles silent audio files
by returning a PARTIAL_SUCCESS status.
"""
converter = get_asr_converter()
doc_result: ConversionResult = converter.convert(silent_audio_path)
# Accept PARTIAL_SUCCESS or SUCCESS depending on runtime behavior
assert doc_result.status in (
ConversionStatus.PARTIAL_SUCCESS,
ConversionStatus.SUCCESS,
)
def test_has_text_and_determine_status_helpers():
"""Unit-test _has_text and _determine_status on a minimal ConversionResult."""
pipeline_options = AsrPipelineOptions()
pipeline_options.asr_options = asr_model_specs.WHISPER_TINY
# Avoid importing torch in decide_device by forcing CPU-only native path
pipeline_options.asr_options = asr_model_specs.WHISPER_TINY_NATIVE
pipeline = AsrPipeline(pipeline_options)
# Create an empty ConversionResult with proper InputDocument
doc_path = Path("./tests/data/audio/sample_10s.mp3")
from docling.backend.noop_backend import NoOpBackend
from docling.datamodel.base_models import InputFormat
input_doc = InputDocument(
path_or_stream=doc_path,
format=InputFormat.AUDIO,
backend=NoOpBackend,
)
conv_res = ConversionResult(input=input_doc)
# Simulate run result with empty document/texts
conv_res.status = ConversionStatus.SUCCESS
assert pipeline._has_text(conv_res.document) is False
assert pipeline._determine_status(conv_res) in (
ConversionStatus.PARTIAL_SUCCESS,
ConversionStatus.SUCCESS,
ConversionStatus.FAILURE,
)
# Now make a document with whitespace-only text to exercise empty detection
conv_res.document.texts = []
conv_res.errors = []
assert pipeline._has_text(conv_res.document) is False
# Emulate non-empty
class _T:
def __init__(self, t):
self.text = t
conv_res.document.texts = [_T(" "), _T("ok")]
assert pipeline._has_text(conv_res.document) is True
def test_is_backend_supported_noop_backend():
from pathlib import Path
from docling.backend.noop_backend import NoOpBackend
from docling.datamodel.base_models import InputFormat
from docling.datamodel.document import InputDocument
class _Dummy:
pass
# Create a proper NoOpBackend instance
doc_path = Path("./tests/data/audio/sample_10s.mp3")
input_doc = InputDocument(
path_or_stream=doc_path,
format=InputFormat.AUDIO,
backend=NoOpBackend,
)
noop_backend = NoOpBackend(input_doc, doc_path)
assert AsrPipeline.is_backend_supported(noop_backend) is True
assert AsrPipeline.is_backend_supported(_Dummy()) is False
def test_native_and_mlx_transcribe_language_handling(monkeypatch, tmp_path):
"""Cover language None/empty handling in model.transcribe wrappers."""
from docling.datamodel.accelerator_options import (
AcceleratorDevice,
AcceleratorOptions,
)
from docling.datamodel.pipeline_options_asr_model import (
InferenceAsrFramework,
InlineAsrMlxWhisperOptions,
InlineAsrNativeWhisperOptions,
)
from docling.pipeline.asr_pipeline import _MlxWhisperModel, _NativeWhisperModel
# Native
opts_n = InlineAsrNativeWhisperOptions(
repo_id="tiny",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=False,
timestamps=False,
word_timestamps=False,
temperature=0.0,
max_new_tokens=1,
max_time_chunk=1.0,
language="",
)
m = _NativeWhisperModel(
True, None, AcceleratorOptions(device=AcceleratorDevice.CPU), opts_n
)
m.model = Mock()
m.verbose = False
m.word_timestamps = False
# ensure language mapping occurs and transcribe is called
m.model.transcribe.return_value = {"segments": []}
m.transcribe(tmp_path / "a.wav")
m.model.transcribe.assert_called()
# MLX
opts_m = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-tiny-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="",
)
with patch.dict("sys.modules", {"mlx_whisper": Mock()}):
mm = _MlxWhisperModel(
True, None, AcceleratorOptions(device=AcceleratorDevice.MPS), opts_m
)
mm.mlx_whisper = Mock()
mm.mlx_whisper.transcribe.return_value = {"segments": []}
mm.transcribe(tmp_path / "b.wav")
mm.mlx_whisper.transcribe.assert_called()
def test_native_init_with_artifacts_path_and_device_logging(tmp_path):
"""Cover _NativeWhisperModel init path with artifacts_path passed."""
from docling.datamodel.accelerator_options import (
AcceleratorDevice,
AcceleratorOptions,
)
from docling.datamodel.pipeline_options_asr_model import (
InferenceAsrFramework,
InlineAsrNativeWhisperOptions,
)
from docling.pipeline.asr_pipeline import _NativeWhisperModel
opts = InlineAsrNativeWhisperOptions(
repo_id="tiny",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=False,
timestamps=False,
word_timestamps=False,
temperature=0.0,
max_new_tokens=1,
max_time_chunk=1.0,
language="en",
)
# Patch out whisper import side-effects during init by stubbing decide_device path only
model = _NativeWhisperModel(
True, tmp_path, AcceleratorOptions(device=AcceleratorDevice.CPU), opts
)
# swap real model for mock to avoid actual load
model.model = Mock()
assert model.enabled is True
def test_native_run_success_with_bytesio_builds_document(tmp_path):
"""Cover _NativeWhisperModel.run with BytesIO input and success path."""
from io import BytesIO
from docling.backend.noop_backend import NoOpBackend
from docling.datamodel.accelerator_options import (
AcceleratorDevice,
AcceleratorOptions,
)
from docling.datamodel.document import ConversionResult, InputDocument
from docling.datamodel.pipeline_options_asr_model import (
InferenceAsrFramework,
InlineAsrNativeWhisperOptions,
)
from docling.pipeline.asr_pipeline import _NativeWhisperModel
# Prepare InputDocument with BytesIO
audio_bytes = BytesIO(b"RIFF....WAVE")
input_doc = InputDocument(
path_or_stream=audio_bytes,
format=InputFormat.AUDIO,
backend=NoOpBackend,
filename="a.wav",
)
conv_res = ConversionResult(input=input_doc)
# Model with mocked underlying whisper
opts = InlineAsrNativeWhisperOptions(
repo_id="tiny",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=False,
timestamps=False,
word_timestamps=True,
temperature=0.0,
max_new_tokens=1,
max_time_chunk=1.0,
language="en",
)
model = _NativeWhisperModel(
True, None, AcceleratorOptions(device=AcceleratorDevice.CPU), opts
)
model.model = Mock()
model.verbose = False
model.word_timestamps = True
model.model.transcribe.return_value = {
"segments": [
{
"start": 0.0,
"end": 1.0,
"text": "hi",
"words": [{"start": 0.0, "end": 0.5, "word": "hi"}],
}
]
}
out = model.run(conv_res)
# Status is determined later by pipeline; here we validate document content
assert out.document is not None
assert len(out.document.texts) >= 1
def test_native_run_failure_sets_status(tmp_path):
"""Cover _NativeWhisperModel.run failure path when transcribe raises."""
from docling.backend.noop_backend import NoOpBackend
from docling.datamodel.accelerator_options import (
AcceleratorDevice,
AcceleratorOptions,
)
from docling.datamodel.document import ConversionResult, InputDocument
from docling.datamodel.pipeline_options_asr_model import (
InferenceAsrFramework,
InlineAsrNativeWhisperOptions,
)
from docling.pipeline.asr_pipeline import _NativeWhisperModel
# Create a real file so backend initializes
audio_path = tmp_path / "a.wav"
audio_path.write_bytes(b"RIFF....WAVE")
input_doc = InputDocument(
path_or_stream=audio_path, format=InputFormat.AUDIO, backend=NoOpBackend
)
conv_res = ConversionResult(input=input_doc)
opts = InlineAsrNativeWhisperOptions(
repo_id="tiny",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=False,
timestamps=False,
word_timestamps=False,
temperature=0.0,
max_new_tokens=1,
max_time_chunk=1.0,
language="en",
)
model = _NativeWhisperModel(
True, None, AcceleratorOptions(device=AcceleratorDevice.CPU), opts
)
model.model = Mock()
model.model.transcribe.side_effect = RuntimeError("boom")
out = model.run(conv_res)
assert out.status.name == "FAILURE"
def test_mlx_run_success_and_failure(tmp_path):
"""Cover _MlxWhisperModel.run success and failure paths."""
from docling.backend.noop_backend import NoOpBackend
from docling.datamodel.accelerator_options import (
AcceleratorDevice,
AcceleratorOptions,
)
from docling.datamodel.document import ConversionResult, InputDocument
from docling.datamodel.pipeline_options_asr_model import (
InferenceAsrFramework,
InlineAsrMlxWhisperOptions,
)
from docling.pipeline.asr_pipeline import _MlxWhisperModel
# Success path
# Create real files so backend initializes and hashes compute
path_ok = tmp_path / "b.wav"
path_ok.write_bytes(b"RIFF....WAVE")
input_doc = InputDocument(
path_or_stream=path_ok, format=InputFormat.AUDIO, backend=NoOpBackend
)
conv_res = ConversionResult(input=input_doc)
with patch.dict("sys.modules", {"mlx_whisper": Mock()}):
opts = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-tiny-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
)
model = _MlxWhisperModel(
True, None, AcceleratorOptions(device=AcceleratorDevice.MPS), opts
)
model.mlx_whisper = Mock()
model.mlx_whisper.transcribe.return_value = {
"segments": [{"start": 0.0, "end": 1.0, "text": "ok"}]
}
out = model.run(conv_res)
assert out.status.name == "SUCCESS"
# Failure path
path_fail = tmp_path / "c.wav"
path_fail.write_bytes(b"RIFF....WAVE")
input_doc2 = InputDocument(
path_or_stream=path_fail, format=InputFormat.AUDIO, backend=NoOpBackend
)
conv_res2 = ConversionResult(input=input_doc2)
with patch.dict("sys.modules", {"mlx_whisper": Mock()}):
opts2 = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-tiny-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
)
model2 = _MlxWhisperModel(
True, None, AcceleratorOptions(device=AcceleratorDevice.MPS), opts2
)
model2.mlx_whisper = Mock()
model2.mlx_whisper.transcribe.side_effect = RuntimeError("fail")
out2 = model2.run(conv_res2)
assert out2.status.name == "FAILURE"
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/tests/test_e2e_ocr_conversion.py | tests/test_e2e_ocr_conversion.py | import sys
from pathlib import Path
from typing import List, Tuple
from docling.backend.docling_parse_backend import DoclingParseDocumentBackend
from docling.backend.docling_parse_v2_backend import DoclingParseV2DocumentBackend
from docling.backend.docling_parse_v4_backend import DoclingParseV4DocumentBackend
from docling.backend.pypdfium2_backend import PyPdfiumDocumentBackend
from docling.datamodel.accelerator_options import AcceleratorDevice
from docling.datamodel.base_models import InputFormat
from docling.datamodel.document import ConversionResult
from docling.datamodel.pipeline_options import (
EasyOcrOptions,
OcrMacOptions,
OcrOptions,
PdfPipelineOptions,
RapidOcrOptions,
TesseractCliOcrOptions,
TesseractOcrOptions,
)
from docling.document_converter import DocumentConverter, PdfFormatOption
from .test_data_gen_flag import GEN_TEST_DATA
from .verify_utils import verify_conversion_result_v2
GENERATE_V2 = GEN_TEST_DATA
def get_pdf_paths():
# Define the directory you want to search
directory = Path("./tests/data_scanned")
# List all PDF files in the directory and its subdirectories
pdf_files = sorted(directory.rglob("ocr_test*.pdf"))
return pdf_files
def get_converter(ocr_options: OcrOptions):
pipeline_options = PdfPipelineOptions()
pipeline_options.do_ocr = True
pipeline_options.do_table_structure = True
pipeline_options.table_structure_options.do_cell_matching = True
pipeline_options.ocr_options = ocr_options
pipeline_options.accelerator_options.device = AcceleratorDevice.CPU
converter = DocumentConverter(
format_options={
InputFormat.PDF: PdfFormatOption(
pipeline_options=pipeline_options,
backend=DoclingParseDocumentBackend, # PdfFormatOption().backend,
)
}
)
return converter
def test_e2e_conversions():
pdf_paths = get_pdf_paths()
engines: List[Tuple[OcrOptions, bool]] = [
(TesseractOcrOptions(), True),
(TesseractCliOcrOptions(), True),
(EasyOcrOptions(), False),
(TesseractOcrOptions(psm=3), True),
(TesseractOcrOptions(force_full_page_ocr=True), True),
(TesseractOcrOptions(force_full_page_ocr=True, lang=["auto"]), True),
(TesseractCliOcrOptions(force_full_page_ocr=True), True),
(TesseractCliOcrOptions(force_full_page_ocr=True, lang=["auto"]), True),
(EasyOcrOptions(force_full_page_ocr=True), False),
]
for rapidocr_backend in ["onnxruntime", "torch"]:
if sys.version_info >= (3, 14) and rapidocr_backend == "onnxruntime":
# skip onnxruntime backend on Python 3.14
continue
engines.append((RapidOcrOptions(backend=rapidocr_backend), False))
engines.append(
(RapidOcrOptions(backend=rapidocr_backend, force_full_page_ocr=True), False)
)
engines.append(
(
RapidOcrOptions(
backend=rapidocr_backend,
force_full_page_ocr=True,
rec_font_path="test",
rapidocr_params={"Rec.font_path": None}, # overwrites rec_font_path
),
False,
)
)
# only works on mac
if "darwin" == sys.platform:
engines.append((OcrMacOptions(), True))
engines.append((OcrMacOptions(force_full_page_ocr=True), True))
for ocr_options, supports_rotation in engines:
print(
f"Converting with ocr_engine: {ocr_options.kind}, language: {ocr_options.lang}"
)
converter = get_converter(ocr_options=ocr_options)
for pdf_path in pdf_paths:
if not supports_rotation and "rotated" in pdf_path.name:
continue
print(f"converting {pdf_path}")
doc_result: ConversionResult = converter.convert(pdf_path)
verify_conversion_result_v2(
input_path=pdf_path,
doc_result=doc_result,
generate=GENERATE_V2,
fuzzy=True,
)
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/tests/test_data_gen_flag.py | tests/test_data_gen_flag.py | import os
from pydantic import TypeAdapter
GEN_TEST_DATA = TypeAdapter(bool).validate_python(os.getenv("DOCLING_GEN_TEST_DATA", 0))
def test_gen_test_data_flag():
assert not GEN_TEST_DATA
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/tests/test_backend_mets_gbs.py | tests/test_backend_mets_gbs.py | from pathlib import Path
import pytest
from docling.backend.mets_gbs_backend import MetsGbsDocumentBackend, MetsGbsPageBackend
from docling.datamodel.base_models import BoundingBox, InputFormat
from docling.datamodel.document import InputDocument
@pytest.fixture
def test_doc_path():
return Path("tests/data/mets_gbs/32044009881525_select.tar.gz")
def _get_backend(pdf_doc):
in_doc = InputDocument(
path_or_stream=pdf_doc,
format=InputFormat.METS_GBS,
backend=MetsGbsDocumentBackend,
)
doc_backend = in_doc._backend
return doc_backend
def test_process_pages(test_doc_path):
doc_backend: MetsGbsDocumentBackend = _get_backend(test_doc_path)
for page_index in range(doc_backend.page_count()):
page_backend: MetsGbsPageBackend = doc_backend.load_page(page_index)
list(page_backend.get_text_cells())
# Clean up page backend after each iteration
page_backend.unload()
# Explicitly clean up document backend to prevent race conditions in CI
doc_backend.unload()
def test_get_text_from_rect(test_doc_path):
doc_backend: MetsGbsDocumentBackend = _get_backend(test_doc_path)
page_backend: MetsGbsPageBackend = doc_backend.load_page(0)
# Get the title text of the DocLayNet paper
textpiece = page_backend.get_text_in_rect(
bbox=BoundingBox(l=275, t=263, r=1388, b=311)
)
ref = "recently become prevalent that he who speaks"
assert textpiece.strip() == ref
# Explicitly clean up resources
page_backend.unload()
doc_backend.unload()
def test_crop_page_image(test_doc_path):
doc_backend: MetsGbsDocumentBackend = _get_backend(test_doc_path)
page_backend: MetsGbsPageBackend = doc_backend.load_page(0)
page_backend.get_page_image(
scale=2, cropbox=BoundingBox(l=270, t=587, r=1385, b=1995)
)
# im.show()
# Explicitly clean up resources
page_backend.unload()
doc_backend.unload()
def test_num_pages(test_doc_path):
doc_backend: MetsGbsDocumentBackend = _get_backend(test_doc_path)
assert doc_backend.is_valid()
assert doc_backend.page_count() == 3
# Explicitly clean up resources to prevent race conditions in CI
doc_backend.unload()
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/tests/test_backend_docling_parse_v2.py | tests/test_backend_docling_parse_v2.py | from pathlib import Path
import pytest
from docling.backend.docling_parse_v2_backend import (
DoclingParseV2DocumentBackend,
DoclingParseV2PageBackend,
)
from docling.datamodel.base_models import BoundingBox, InputFormat
from docling.datamodel.document import InputDocument
@pytest.fixture
def test_doc_path():
return Path("./tests/data/pdf/2206.01062.pdf")
def _get_backend(pdf_doc):
in_doc = InputDocument(
path_or_stream=pdf_doc,
format=InputFormat.PDF,
backend=DoclingParseV2DocumentBackend,
)
doc_backend = in_doc._backend
return doc_backend
def test_text_cell_counts():
pdf_doc = Path("./tests/data/pdf/redp5110_sampled.pdf")
doc_backend = _get_backend(pdf_doc)
for page_index in range(doc_backend.page_count()):
last_cell_count = None
for i in range(10):
page_backend: DoclingParseV2PageBackend = doc_backend.load_page(0)
cells = list(page_backend.get_text_cells())
if last_cell_count is None:
last_cell_count = len(cells)
if len(cells) != last_cell_count:
assert False, (
"Loading page multiple times yielded non-identical text cell counts"
)
last_cell_count = len(cells)
def test_get_text_from_rect(test_doc_path):
doc_backend = _get_backend(test_doc_path)
page_backend: DoclingParseV2PageBackend = doc_backend.load_page(0)
# Get the title text of the DocLayNet paper
textpiece = page_backend.get_text_in_rect(
bbox=BoundingBox(l=102, t=77, r=511, b=124)
)
ref = "DocLayNet: A Large Human-Annotated Dataset for Document-Layout Analysis"
assert textpiece.strip() == ref
def test_crop_page_image(test_doc_path):
doc_backend = _get_backend(test_doc_path)
page_backend: DoclingParseV2PageBackend = doc_backend.load_page(0)
# Crop out "Figure 1" from the DocLayNet paper
page_backend.get_page_image(
scale=2, cropbox=BoundingBox(l=317, t=246, r=574, b=527)
)
# im.show()
def test_num_pages(test_doc_path):
doc_backend = _get_backend(test_doc_path)
doc_backend.page_count() == 9
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/tests/test_backend_csv.py | tests/test_backend_csv.py | from pathlib import Path
from pytest import warns
from docling.datamodel.base_models import InputFormat
from docling.datamodel.document import ConversionResult, DoclingDocument
from docling.document_converter import DocumentConverter
from .test_data_gen_flag import GEN_TEST_DATA
from .verify_utils import verify_document, verify_export
GENERATE = GEN_TEST_DATA
def get_csv_paths():
# Define the directory you want to search
directory = Path("./tests/data/csv/")
# List all CSV files in the directory and its subdirectories
return sorted(directory.rglob("*.csv"))
def get_csv_path(name: str):
# Return the matching CSV file path
return Path(f"./tests/data/csv/{name}.csv")
def get_converter():
converter = DocumentConverter(allowed_formats=[InputFormat.CSV])
return converter
def test_e2e_valid_csv_conversions():
valid_csv_paths = get_csv_paths()
converter = get_converter()
for csv_path in valid_csv_paths:
print(f"converting {csv_path}")
gt_path = csv_path.parent.parent / "groundtruth" / "docling_v2" / csv_path.name
if csv_path.stem in (
"csv-too-few-columns",
"csv-too-many-columns",
"csv-inconsistent-header",
):
with warns(UserWarning, match="Inconsistent column lengths"):
conv_result: ConversionResult = converter.convert(csv_path)
else:
conv_result: ConversionResult = converter.convert(csv_path)
doc: DoclingDocument = conv_result.document
pred_md: str = doc.export_to_markdown()
assert verify_export(pred_md, str(gt_path) + ".md"), "export to md"
pred_itxt: str = doc._export_to_indented_text(
max_text_len=70, explicit_tables=False
)
assert verify_export(pred_itxt, str(gt_path) + ".itxt"), (
"export to indented-text"
)
assert verify_document(
pred_doc=doc,
gtfile=str(gt_path) + ".json",
generate=GENERATE,
), "export to json"
def test_e2e_invalid_csv_conversions():
csv_too_few_columns = get_csv_path("csv-too-few-columns")
csv_too_many_columns = get_csv_path("csv-too-many-columns")
csv_inconsistent_header = get_csv_path("csv-inconsistent-header")
converter = get_converter()
print(f"converting {csv_too_few_columns}")
with warns(UserWarning, match="Inconsistent column lengths"):
converter.convert(csv_too_few_columns)
print(f"converting {csv_too_many_columns}")
with warns(UserWarning, match="Inconsistent column lengths"):
converter.convert(csv_too_many_columns)
print(f"converting {csv_inconsistent_header}")
with warns(UserWarning, match="Inconsistent column lengths"):
converter.convert(csv_inconsistent_header)
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/tests/test_backend_asciidoc.py | tests/test_backend_asciidoc.py | import glob
import os
from pathlib import Path
from docling.backend.asciidoc_backend import (
DEFAULT_IMAGE_HEIGHT,
DEFAULT_IMAGE_WIDTH,
AsciiDocBackend,
)
from docling.datamodel.base_models import InputFormat
from docling.datamodel.document import InputDocument
def _get_backend(fname):
in_doc = InputDocument(
path_or_stream=fname,
format=InputFormat.ASCIIDOC,
backend=AsciiDocBackend,
)
doc_backend = in_doc._backend
return doc_backend
def test_parse_picture():
line = (
"image::images/example1.png[Example Image, width=200, height=150, align=center]"
)
res = AsciiDocBackend._parse_picture(line)
assert res
assert res.get("width", 0) == "200"
assert res.get("height", 0) == "150"
assert res.get("uri", "") == "images/example1.png"
line = "image::renamed-bookmark.png[Renamed bookmark]"
res = AsciiDocBackend._parse_picture(line)
assert res
assert "width" not in res
assert "height" not in res
assert res.get("uri", "") == "renamed-bookmark.png"
def test_asciidocs_examples():
fnames = sorted(glob.glob("./tests/data/asciidoc/*.asciidoc"))
for fname in fnames:
print(f"reading {fname}")
bname = os.path.basename(fname)
gname = os.path.join("./tests/data/groundtruth/docling_v2/", bname + ".md")
doc_backend = _get_backend(Path(fname))
doc = doc_backend.convert()
pred_itdoc = doc._export_to_indented_text(max_text_len=16)
print("\n\n", pred_itdoc)
pred_mddoc = doc.export_to_markdown()
print("\n\n", pred_mddoc)
if os.path.exists(gname):
with open(gname) as fr:
fr.read()
# assert pred_mddoc == true_mddoc, "pred_mddoc!=true_mddoc for asciidoc"
else:
with open(gname, "w") as fw:
fw.write(pred_mddoc)
# print("\n\n", doc.export_to_markdown())
assert True
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/tests/test_backend_html.py | tests/test_backend_html.py | from io import BytesIO
from pathlib import Path, PurePath
from unittest.mock import Mock, mock_open, patch
import pytest
from bs4 import BeautifulSoup
from docling_core.types.doc import PictureItem
from docling_core.types.doc.document import ContentLayer
from pydantic import AnyUrl, ValidationError
from docling.backend.html_backend import HTMLDocumentBackend
from docling.datamodel.backend_options import HTMLBackendOptions
from docling.datamodel.base_models import InputFormat
from docling.datamodel.document import (
ConversionResult,
DoclingDocument,
InputDocument,
SectionHeaderItem,
)
from docling.document_converter import DocumentConverter, HTMLFormatOption
from .test_data_gen_flag import GEN_TEST_DATA
from .verify_utils import verify_document, verify_export
GENERATE = GEN_TEST_DATA
def test_html_backend_options():
options = HTMLBackendOptions()
assert options.kind == "html"
assert not options.fetch_images
assert options.source_uri is None
url = "http://example.com"
source_location = AnyUrl(url=url)
options = HTMLBackendOptions(source_uri=source_location)
assert options.source_uri == source_location
source_location = PurePath("/local/path/to/file.html")
options = HTMLBackendOptions(source_uri=source_location)
assert options.source_uri == source_location
with pytest.raises(ValidationError, match="Input is not a valid path"):
HTMLBackendOptions(source_uri=12345)
def test_resolve_relative_path():
html_path = Path("./tests/data/html/example_01.html")
in_doc = InputDocument(
path_or_stream=html_path,
format=InputFormat.HTML,
backend=HTMLDocumentBackend,
filename="test",
)
html_doc = HTMLDocumentBackend(path_or_stream=html_path, in_doc=in_doc)
html_doc.base_path = "/local/path/to/file.html"
relative_path = "subdir/another.html"
expected_abs_loc = "/local/path/to/subdir/another.html"
assert html_doc._resolve_relative_path(relative_path) == expected_abs_loc
absolute_path = "/absolute/path/to/file.html"
assert html_doc._resolve_relative_path(absolute_path) == absolute_path
html_doc.base_path = "http://my_host.com"
protocol_relative_url = "//example.com/file.html"
expected_abs_loc = "https://example.com/file.html"
assert html_doc._resolve_relative_path(protocol_relative_url) == expected_abs_loc
html_doc.base_path = "http://example.com"
remote_relative_path = "subdir/file.html"
expected_abs_loc = "http://example.com/subdir/file.html"
assert html_doc._resolve_relative_path(remote_relative_path) == expected_abs_loc
html_doc.base_path = "http://example.com"
remote_relative_path = "https://my_host.com/my_page.html"
expected_abs_loc = "https://my_host.com/my_page.html"
assert html_doc._resolve_relative_path(remote_relative_path) == expected_abs_loc
html_doc.base_path = "http://example.com"
remote_relative_path = "/static/images/my_image.png"
expected_abs_loc = "http://example.com/static/images/my_image.png"
assert html_doc._resolve_relative_path(remote_relative_path) == expected_abs_loc
html_doc.base_path = None
relative_path = "subdir/file.html"
assert html_doc._resolve_relative_path(relative_path) == relative_path
def test_heading_levels():
in_path = Path("tests/data/html/wiki_duck.html")
in_doc = InputDocument(
path_or_stream=in_path,
format=InputFormat.HTML,
backend=HTMLDocumentBackend,
)
backend = HTMLDocumentBackend(
in_doc=in_doc,
path_or_stream=in_path,
)
doc = backend.convert()
found_lvl_1 = found_lvl_2 = False
for item, _ in doc.iterate_items():
if isinstance(item, SectionHeaderItem):
if item.text == "Etymology":
found_lvl_1 = True
# h2 becomes level 1 because of h1 as title
assert item.level == 1
elif item.text == "Feeding":
found_lvl_2 = True
# h3 becomes level 2 because of h1 as title
assert item.level == 2
assert found_lvl_1 and found_lvl_2
def test_ordered_lists():
test_set: list[tuple[bytes, str]] = []
test_set.append(
(
b"<html><body><ol><li>1st item</li><li>2nd item</li></ol></body></html>",
"1. 1st item\n2. 2nd item",
)
)
test_set.append(
(
b'<html><body><ol start="1"><li>1st item</li><li>2nd item</li></ol></body></html>',
"1. 1st item\n2. 2nd item",
)
)
test_set.append(
(
b'<html><body><ol start="2"><li>1st item</li><li>2nd item</li></ol></body></html>',
"2. 1st item\n3. 2nd item",
)
)
test_set.append(
(
b'<html><body><ol start="0"><li>1st item</li><li>2nd item</li></ol></body></html>',
"0. 1st item\n1. 2nd item",
)
)
test_set.append(
(
b'<html><body><ol start="-5"><li>1st item</li><li>2nd item</li></ol></body></html>',
"1. 1st item\n2. 2nd item",
)
)
test_set.append(
(
b'<html><body><ol start="foo"><li>1st item</li><li>2nd item</li></ol></body></html>',
"1. 1st item\n2. 2nd item",
)
)
for idx, pair in enumerate(test_set):
in_doc = InputDocument(
path_or_stream=BytesIO(pair[0]),
format=InputFormat.HTML,
backend=HTMLDocumentBackend,
filename="test",
)
backend = HTMLDocumentBackend(
in_doc=in_doc,
path_or_stream=BytesIO(pair[0]),
)
doc: DoclingDocument = backend.convert()
assert doc
assert doc.export_to_markdown() == pair[1], f"Error in case {idx}"
def test_unicode_characters():
raw_html = "<html><body><h1>Hello World!</h1></body></html>".encode() # noqa: RUF001
in_doc = InputDocument(
path_or_stream=BytesIO(raw_html),
format=InputFormat.HTML,
backend=HTMLDocumentBackend,
filename="test",
)
backend = HTMLDocumentBackend(
in_doc=in_doc,
path_or_stream=BytesIO(raw_html),
)
doc: DoclingDocument = backend.convert()
assert doc.texts[0].text == "Hello World!"
def test_extract_parent_hyperlinks():
html_path = Path("./tests/data/html/hyperlink_04.html")
in_doc = InputDocument(
path_or_stream=html_path,
format=InputFormat.HTML,
backend=HTMLDocumentBackend,
filename="test",
)
backend = HTMLDocumentBackend(
in_doc=in_doc,
path_or_stream=html_path,
)
div_tag = backend.soup.find("div")
a_tag = backend.soup.find("a")
annotated_text_list = backend._extract_text_and_hyperlink_recursively(
div_tag, find_parent_annotation=True
)
assert str(annotated_text_list[0].hyperlink) == a_tag.get("href")
@pytest.fixture(scope="module")
def html_paths() -> list[Path]:
# Define the directory you want to search
directory = Path("./tests/data/html/")
# List all HTML files in the directory and its subdirectories
html_files = sorted(directory.rglob("*.html"))
return html_files
def get_converter():
converter = DocumentConverter(allowed_formats=[InputFormat.HTML])
return converter
def test_e2e_html_conversions(html_paths):
converter = get_converter()
for html_path in html_paths:
gt_path = (
html_path.parent.parent / "groundtruth" / "docling_v2" / html_path.name
)
conv_result: ConversionResult = converter.convert(html_path)
doc: DoclingDocument = conv_result.document
pred_md: str = doc.export_to_markdown()
assert verify_export(pred_md, str(gt_path) + ".md", generate=GENERATE), (
"export to md"
)
pred_itxt: str = doc._export_to_indented_text(
max_text_len=70, explicit_tables=False
)
assert verify_export(pred_itxt, str(gt_path) + ".itxt", generate=GENERATE), (
"export to indented-text"
)
assert verify_document(doc, str(gt_path) + ".json", GENERATE)
@patch("docling.backend.html_backend.requests.get")
@patch("docling.backend.html_backend.open", new_callable=mock_open)
def test_e2e_html_conversion_with_images(mock_local, mock_remote):
source = "tests/data/html/example_01.html"
image_path = "tests/data/html/example_image_01.png"
with open(image_path, "rb") as f:
img_bytes = f.read()
# fetching image locally
mock_local.return_value.__enter__.return_value = BytesIO(img_bytes)
backend_options = HTMLBackendOptions(
enable_local_fetch=True, fetch_images=True, source_uri=source
)
converter = DocumentConverter(
allowed_formats=[InputFormat.HTML],
format_options={
InputFormat.HTML: HTMLFormatOption(backend_options=backend_options)
},
)
res_local = converter.convert(source)
mock_local.assert_called_once()
assert res_local.document
num_pic: int = 0
for element, _ in res_local.document.iterate_items():
if isinstance(element, PictureItem):
assert element.image
num_pic += 1
assert num_pic == 1, "No embedded picture was found in the converted file"
# fetching image remotely
mock_resp = Mock()
mock_resp.status_code = 200
mock_resp.content = img_bytes
mock_remote.return_value = mock_resp
source_location = "https://example.com/example_01.html"
backend_options = HTMLBackendOptions(
enable_remote_fetch=True, fetch_images=True, source_uri=source_location
)
converter = DocumentConverter(
allowed_formats=[InputFormat.HTML],
format_options={
InputFormat.HTML: HTMLFormatOption(backend_options=backend_options)
},
)
res_remote = converter.convert(source)
mock_remote.assert_called_once_with(
"https://example.com/example_image_01.png", stream=True
)
assert res_remote.document
num_pic = 0
for element, _ in res_remote.document.iterate_items():
if isinstance(element, PictureItem):
assert element.image
assert element.image.mimetype == "image/png"
num_pic += 1
assert num_pic == 1, "No embedded picture was found in the converted file"
# both methods should generate the same DoclingDocument
assert res_remote.document == res_local.document
# checking exported formats
gt_path = (
"tests/data/groundtruth/docling_v2/" + str(Path(source).stem) + "_images.html"
)
pred_md: str = res_local.document.export_to_markdown()
assert verify_export(pred_md, gt_path + ".md", generate=GENERATE)
assert verify_document(res_local.document, gt_path + ".json", GENERATE)
def test_html_furniture():
raw_html = (
b"<html><body><p>Initial content with some <strong>bold text</strong></p>"
b"<h1>Main Heading</h1>"
b"<p>Some Content</p>"
b"<footer><p>Some Footer Content</p></footer></body></html"
)
in_doc = InputDocument(
path_or_stream=BytesIO(raw_html),
format=InputFormat.HTML,
backend=HTMLDocumentBackend,
filename="test",
)
backend = HTMLDocumentBackend(
in_doc=in_doc,
path_or_stream=BytesIO(raw_html),
)
doc: DoclingDocument = backend.convert()
md_body = doc.export_to_markdown()
assert md_body == "# Main Heading\n\nSome Content"
md_all = doc.export_to_markdown(
included_content_layers={ContentLayer.BODY, ContentLayer.FURNITURE}
)
assert md_all == (
"Initial content with some **bold text**\n\n# Main Heading\n\nSome Content\n\n"
"Some Footer Content"
)
def test_fetch_remote_images(monkeypatch):
source = "./tests/data/html/example_01.html"
# no image fetching: the image_fetch flag is False
backend_options = HTMLBackendOptions(
fetch_images=False, source_uri="http://example.com"
)
converter = DocumentConverter(
allowed_formats=[InputFormat.HTML],
format_options={
InputFormat.HTML: HTMLFormatOption(backend_options=backend_options)
},
)
with patch("docling.backend.html_backend.requests.get") as mocked_get:
res = converter.convert(source)
mocked_get.assert_not_called()
assert res.document
# no image fetching: the source location is False and enable_local_fetch is False
backend_options = HTMLBackendOptions(fetch_images=True)
converter = DocumentConverter(
allowed_formats=[InputFormat.HTML],
format_options={
InputFormat.HTML: HTMLFormatOption(backend_options=backend_options)
},
)
with (
patch("docling.backend.html_backend.requests.get") as mocked_get,
pytest.warns(
match="Fetching local resources is only allowed when set explicitly"
),
):
res = converter.convert(source)
mocked_get.assert_not_called()
assert res.document
# no image fetching: the enable_remote_fetch is False
backend_options = HTMLBackendOptions(
fetch_images=True, source_uri="http://example.com"
)
converter = DocumentConverter(
allowed_formats=[InputFormat.HTML],
format_options={
InputFormat.HTML: HTMLFormatOption(backend_options=backend_options)
},
)
with (
patch("docling.backend.html_backend.requests.get") as mocked_get,
pytest.warns(
match="Fetching remote resources is only allowed when set explicitly"
),
):
res = converter.convert(source)
mocked_get.assert_not_called()
assert res.document
# image fetching: all conditions apply, source location is remote
backend_options = HTMLBackendOptions(
enable_remote_fetch=True, fetch_images=True, source_uri="http://example.com"
)
converter = DocumentConverter(
allowed_formats=[InputFormat.HTML],
format_options={
InputFormat.HTML: HTMLFormatOption(backend_options=backend_options)
},
)
with (
patch("docling.backend.html_backend.requests.get") as mocked_get,
pytest.warns(match="a bytes-like object is required"),
):
res = converter.convert(source)
mocked_get.assert_called_once()
assert res.document
# image fetching: all conditions apply, local fetching allowed
backend_options = HTMLBackendOptions(
enable_local_fetch=True, fetch_images=True, source_uri=source
)
converter = DocumentConverter(
allowed_formats=[InputFormat.HTML],
format_options={
InputFormat.HTML: HTMLFormatOption(backend_options=backend_options)
},
)
with (
patch("docling.backend.html_backend.open") as mocked_open,
pytest.warns(match="a bytes-like object is required"),
):
res = converter.convert(source)
mocked_open.assert_called_once_with(
"tests/data/html/example_image_01.png", "rb"
)
assert res.document
def test_is_rich_table_cell(html_paths):
"""Test the function is_rich_table_cell."""
name = "html_rich_table_cells.html"
path = next(item for item in html_paths if item.name == name)
in_doc = InputDocument(
path_or_stream=path,
format=InputFormat.HTML,
backend=HTMLDocumentBackend,
filename=name,
)
backend = HTMLDocumentBackend(
in_doc=in_doc,
path_or_stream=path,
)
gt_cells: dict[int, list[bool]] = {}
# table: Basic duck facts
gt_cells[0] = [
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
True,
False,
True,
True,
]
# table: Duck family tree
gt_cells[1] = [False, False, True, False, True, False, True, False]
# table: Duck-related actions
gt_cells[2] = [False, True, True, True, False, True, True]
# table: nested table
gt_cells[3] = [False, False, False, False, False, False]
# table: Famous Ducks with Images
gt_cells[4] = [
False,
False,
False,
False,
False,
True,
False,
False,
True,
False,
False,
True,
False,
False,
False,
]
for idx_t, table in enumerate(backend.soup.find_all("table")):
gt_it = iter(gt_cells[idx_t])
num_cells = 0
containers = table.find_all(["thead", "tbody"], recursive=False)
for part in containers:
for idx_r, row in enumerate(part.find_all("tr", recursive=False)):
cells = row.find_all(["td", "th"], recursive=False)
if not cells:
continue
for idx_c, cell in enumerate(cells):
assert next(gt_it) == backend._is_rich_table_cell(cell), (
f"Wrong cell type in table {idx_t}, row {idx_r}, col {idx_c} "
f"with text: {cell.text}"
)
num_cells += 1
assert num_cells == len(gt_cells[idx_t]), (
f"Cell number does not match in table {idx_t}"
)
data_fix_par = [
(
"<p>Text<h2>Heading</h2>More text</p>",
"<p>Text</p><h2>Heading</h2><p>More text</p>",
),
(
"<html><body><p>Some text<h2>A heading</h2>More text</p></body></html>",
"<html><body><p>Some text</p><h2>A heading</h2><p>More text</p></body></html>",
),
(
"<p>Some text<h2>A heading</h2><i>Italics</i></p>",
"<p>Some text</p><h2>A heading</h2><p><i>Italics</i></p>",
),
(
"<p>Some text<p>Another paragraph</p>More text</p>",
"<p>Some text</p><p>Another paragraph</p><p>More text</p>",
),
(
"<p><table><tr><th>Name</th><th>Age</th></tr><tr><td>Alice</td><td>29</td></tr>"
"<tr><td>Bob</td><td>34</td></tr></table></p>",
"<table><tr><th>Name</th><th>Age</th></tr><tr><td>Alice</td><td>29</td></tr>"
"<tr><td>Bob</td><td>34</td></tr></table>",
),
]
@pytest.mark.parametrize("html,expected", data_fix_par)
def test_fix_invalid_paragraph_structure(html, expected):
"""Test the function _fix_invalid_paragraph_structure."""
soup = BeautifulSoup(html, "html.parser")
HTMLDocumentBackend._fix_invalid_paragraph_structure(soup)
assert str(soup) == expected
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/tests/test_pdf_password.py | tests/test_pdf_password.py | from dataclasses import dataclass
from pathlib import Path
from typing import Iterable
import pytest
from docling.backend.docling_parse_v4_backend import DoclingParseV4DocumentBackend
from docling.backend.pypdfium2_backend import (
PyPdfiumDocumentBackend,
)
from docling.datamodel.backend_options import PdfBackendOptions
from docling.datamodel.base_models import ConversionStatus, InputFormat
from docling.datamodel.pipeline_options import PdfPipelineOptions
from docling.document_converter import DocumentConverter, PdfFormatOption
@pytest.fixture
def test_doc_path():
return Path("./tests/data/pdf_password/2206.01062_pg3.pdf")
@dataclass
class TestOption:
options: PdfFormatOption
name: str
def converter_opts_gen() -> Iterable[TestOption]:
pipeline_options = PdfPipelineOptions(
do_ocr=False,
do_table_structure=False,
)
backend_options = PdfBackendOptions(password="1234")
yield TestOption(
options=PdfFormatOption(
pipeline_options=pipeline_options,
backend=PyPdfiumDocumentBackend,
backend_options=backend_options,
),
name="PyPdfium",
)
yield TestOption(
options=PdfFormatOption(
pipeline_options=pipeline_options,
backend=DoclingParseV4DocumentBackend,
backend_options=backend_options,
),
name="DoclingParseV4",
)
@pytest.mark.asyncio
@pytest.mark.parametrize("test_options", converter_opts_gen(), ids=lambda o: o.name)
def test_get_text_from_rect(test_doc_path: Path, test_options: TestOption):
converter = DocumentConverter(
format_options={InputFormat.PDF: test_options.options}
)
res = converter.convert(test_doc_path)
assert res.status == ConversionStatus.SUCCESS
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/tests/test_backend_msexcel.py | tests/test_backend_msexcel.py | import logging
from io import BytesIO
from pathlib import Path
import pytest
from openpyxl import load_workbook
from docling.backend.msexcel_backend import MsExcelDocumentBackend
from docling.datamodel.backend_options import MsExcelBackendOptions
from docling.datamodel.base_models import InputFormat
from docling.datamodel.document import ConversionResult, DoclingDocument, InputDocument
from docling.document_converter import DocumentConverter, ExcelFormatOption
from .test_data_gen_flag import GEN_TEST_DATA
from .verify_utils import verify_document, verify_export
_log = logging.getLogger(__name__)
GENERATE = GEN_TEST_DATA
def get_excel_paths():
# Define the directory you want to search
directory = Path("./tests/data/xlsx/")
# List all Excel files in the directory and its subdirectories
excel_files = sorted(directory.rglob("*.xlsx")) + sorted(directory.rglob("*.xlsm"))
return excel_files
def get_converter():
converter = DocumentConverter(allowed_formats=[InputFormat.XLSX])
return converter
@pytest.fixture(scope="module")
def documents() -> list[tuple[Path, DoclingDocument]]:
documents: list[dict[Path, DoclingDocument]] = []
excel_paths = get_excel_paths()
converter = get_converter()
for excel_path in excel_paths:
_log.debug(f"converting {excel_path}")
gt_path = (
excel_path.parent.parent / "groundtruth" / "docling_v2" / excel_path.name
)
conv_result: ConversionResult = converter.convert(excel_path)
doc: DoclingDocument = conv_result.document
assert doc, f"Failed to convert document from file {gt_path}"
documents.append((gt_path, doc))
return documents
def test_e2e_excel_conversions(documents) -> None:
for gt_path, doc in documents:
pred_md: str = doc.export_to_markdown()
assert verify_export(pred_md, str(gt_path) + ".md"), "export to md"
pred_itxt: str = doc._export_to_indented_text(
max_text_len=70, explicit_tables=False
)
assert verify_export(pred_itxt, str(gt_path) + ".itxt"), (
"export to indented-text"
)
assert verify_document(doc, str(gt_path) + ".json", GENERATE), (
"document document"
)
def test_pages(documents) -> None:
"""Test the page count and page size of converted documents.
Args:
documents: The paths and converted documents.
"""
# number of pages from the backend method
path = next(item for item in get_excel_paths() if item.stem == "xlsx_01")
in_doc = InputDocument(
path_or_stream=path,
format=InputFormat.XLSX,
filename=path.stem,
backend=MsExcelDocumentBackend,
)
backend = MsExcelDocumentBackend(in_doc=in_doc, path_or_stream=path)
assert backend.page_count() == 4
# number of pages from the converted document
doc = next(item for path, item in documents if path.stem == "xlsx_01")
assert len(doc.pages) == 4
# page sizes as number of cells
assert doc.pages.get(1).size.as_tuple() == (3.0, 7.0)
assert doc.pages.get(2).size.as_tuple() == (9.0, 18.0)
assert doc.pages.get(3).size.as_tuple() == (13.0, 36.0)
assert doc.pages.get(4).size.as_tuple() == (0.0, 0.0)
def test_chartsheet(documents) -> None:
"""Test the conversion of Chartsheets.
Args:
documents: The paths and converted documents.
"""
doc = next(item for path, item in documents if path.stem == "xlsx_03_chartsheet")
assert len(doc.pages) == 2
# Chartseet content is for now ignored
assert doc.groups[1].name == "sheet: Duck Chart"
assert doc.pages[2].size.height == 0
assert doc.pages[2].size.width == 0
def test_chartsheet_data_values(documents) -> None:
"""Test that data values are extracted correctly from xlsx_03_chartsheet.
This test verifies that calculated values (not formulas) are returned.
The file contains duck observations with year 2024 having a total of 310 ducks.
We need to verify that both 2024 and 310 appear in the parsed data.
Args:
documents: The paths and converted documents.
"""
doc = next(item for path, item in documents if path.stem == "xlsx_03_chartsheet")
# Find all tables
tables = list(doc.tables)
assert len(tables) > 0, "Should have at least one table"
# Look for a table that has the year 2024 in it
table_with_2024 = None
row_index_of_2024 = None
for table in tables:
for cell in table.data.table_cells:
if cell.text == "2024":
table_with_2024 = table
row_index_of_2024 = cell.start_row_offset_idx
break
if table_with_2024:
break
assert table_with_2024 is not None, "Should find a table containing year 2024"
assert row_index_of_2024 is not None, "Should find row index for 2024"
# Now verify that the value 310 exists in the document
# (it may be in the same table or a different table due to how the parser splits tables)
found_310 = False
for table in tables:
for cell in table.data.table_cells:
if cell.text == "310":
found_310 = True
break
if found_310:
break
assert found_310, "Should find the value 310 (total ducks for 2024) in the document"
def test_inflated_rows_handling(documents) -> None:
"""Test that files with inflated max_row are handled correctly.
xlsx_04_inflated.xlsx has inflated max_row (1,048,496) but only 7 rows of actual data.
This test verifies that our backend correctly identifies true data bounds.
"""
# First, verify the file has inflated max_row using openpyxl directly
path = next(item for item in get_excel_paths() if item.stem == "xlsx_04_inflated")
wb = load_workbook(path)
ws = wb.active
reported_max_row = ws.max_row
# Assert that openpyxl reports inflated max_row
assert reported_max_row > 100000, (
f"xlsx_04_inflated.xlsx should have inflated max_row (expected >100k, got {reported_max_row:,}). "
f"This test file is designed to verify proper handling of Excel files with inflated row counts."
)
_log.info(
f"xlsx_04_inflated.xlsx - Openpyxl reported max_row: {reported_max_row:,}"
)
# Now test that our backend handles it correctly
in_doc = InputDocument(
path_or_stream=path,
format=InputFormat.XLSX,
filename=path.stem,
backend=MsExcelDocumentBackend,
)
backend = MsExcelDocumentBackend(in_doc=in_doc, path_or_stream=path)
# Verify backend detects correct number of pages (should be 4, like test-01)
page_count = backend.page_count()
assert page_count == 4, (
f"Backend should detect 4 pages (same as test-01), got {page_count}"
)
# Verify converted document has correct pages
doc = next(item for path, item in documents if path.stem == "xlsx_04_inflated")
assert len(doc.pages) == 4, f"Document should have 4 pages, got {len(doc.pages)}"
# Verify page sizes match expected dimensions (same as test-01)
# These should reflect actual data, not inflated row counts
assert doc.pages.get(1).size.as_tuple() == (3.0, 7.0), (
f"Page 1 should be 3x7 cells, got {doc.pages.get(1).size.as_tuple()}"
)
assert doc.pages.get(2).size.as_tuple() == (9.0, 18.0), (
f"Page 2 should be 9x18 cells, got {doc.pages.get(2).size.as_tuple()}"
)
assert doc.pages.get(3).size.as_tuple() == (13.0, 36.0), (
f"Page 3 should be 13x36 cells, got {doc.pages.get(3).size.as_tuple()}"
)
assert doc.pages.get(4).size.as_tuple() == (0.0, 0.0), (
f"Page 4 should be 0x0 cells (empty), got {doc.pages.get(4).size.as_tuple()}"
)
_log.info(
f"✓ Successfully handled inflated max_row: "
f"reported {reported_max_row:,} rows, "
f"correctly processed as {page_count} pages with proper dimensions"
)
def test_table_with_title():
"""Test that singleton cells with non-numeric content are treated as TextItem.
When treat_singleton_as_text option is enabled, 1x1 tables containing non-numeric
text should be converted to TextItem instead of TableItem. This test verifies that
xlsx_05_table_with_title.xlsx is correctly parsed with this option.
"""
path = next(
item for item in get_excel_paths() if item.stem == "xlsx_05_table_with_title"
)
# Create converter with treat_singleton_as_text=True
options = MsExcelBackendOptions(treat_singleton_as_text=True)
format_options = {InputFormat.XLSX: ExcelFormatOption(backend_options=options)}
converter = DocumentConverter(
allowed_formats=[InputFormat.XLSX], format_options=format_options
)
conv_result: ConversionResult = converter.convert(path)
doc: DoclingDocument = conv_result.document
# With treat_singleton_as_text=True, the singleton title cell should be a TextItem
texts = list(doc.texts)
tables = list(doc.tables)
assert len(texts) == 1, f"Should have 1 text item (the title), got {len(texts)}"
assert len(tables) == 1, f"Should have 1 table, got {len(tables)}"
# Verify the text item contains the title
assert texts[0].text == "Number of freshwater ducks per year", (
f"Text should be 'Number of freshwater ducks per year', got '{texts[0].text}'"
)
# Verify table dimensions
table = tables[0]
assert table.data.num_rows == 7, (
f"Table should have 7 rows, got {table.data.num_rows}"
)
assert table.data.num_cols == 2, (
f"Table should have 2 columns, got {table.data.num_cols}"
)
def test_bytesio_stream():
"""Test that Excel files can be loaded from BytesIO streams.
This test verifies that the BytesIO code path in the backend is working correctly,
ensuring that data_only=True is applied when loading workbooks from streams.
"""
# Get a test Excel file
path = next(item for item in get_excel_paths() if item.stem == "xlsx_01")
# Load the file into a BytesIO stream
buf = BytesIO(path.open("rb").read())
# Create an InputDocument with the BytesIO stream
in_doc = InputDocument(
path_or_stream=buf,
format=InputFormat.XLSX,
filename=path.stem,
backend=MsExcelDocumentBackend,
)
# Initialize the backend with the BytesIO stream
backend = MsExcelDocumentBackend(in_doc=in_doc, path_or_stream=buf)
# Verify the backend is valid
assert backend.is_valid(), "Backend should be valid when loaded from BytesIO"
# Verify page count matches expected value
assert backend.page_count() == 4, "Should detect 4 pages from BytesIO stream"
# Convert the document
doc = backend.convert()
# Verify the document was converted successfully
assert doc is not None, "Document should be converted from BytesIO stream"
assert len(doc.pages) == 4, "Document should have 4 pages"
# Verify page sizes match expected dimensions
assert doc.pages.get(1).size.as_tuple() == (3.0, 7.0)
assert doc.pages.get(2).size.as_tuple() == (9.0, 18.0)
assert doc.pages.get(3).size.as_tuple() == (13.0, 36.0)
assert doc.pages.get(4).size.as_tuple() == (0.0, 0.0)
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/tests/test_backend_webp.py | tests/test_backend_webp.py | import sys
from pathlib import Path
from typing import List
from pydantic.type_adapter import R
from docling.datamodel.base_models import InputFormat
from docling.datamodel.document import ConversionResult, DoclingDocument
from docling.datamodel.pipeline_options import (
EasyOcrOptions,
OcrMacOptions,
OcrOptions,
RapidOcrOptions,
TesseractCliOcrOptions,
TesseractOcrOptions,
)
from docling.document_converter import DocumentConverter, ImageFormatOption
from tests.verify_utils import verify_conversion_result_v2
from .test_data_gen_flag import GEN_TEST_DATA
GENERATE = GEN_TEST_DATA
def get_webp_paths():
# Define the directory you want to search
directory = Path("./tests/data/webp/")
# List all WEBP files in the directory and its subdirectories
webp_files = sorted(directory.rglob("*.webp"))
return webp_files
def get_converter(ocr_options: OcrOptions):
image_format_option = ImageFormatOption()
image_format_option.pipeline_options.ocr_options = ocr_options
converter = DocumentConverter(
format_options={InputFormat.IMAGE: image_format_option},
allowed_formats=[InputFormat.IMAGE],
)
return converter
def test_e2e_webp_conversions():
webp_paths = get_webp_paths()
engines: List[OcrOptions] = [
EasyOcrOptions(),
TesseractOcrOptions(),
TesseractCliOcrOptions(),
EasyOcrOptions(force_full_page_ocr=True),
TesseractOcrOptions(force_full_page_ocr=True),
TesseractOcrOptions(force_full_page_ocr=True, lang=["auto"]),
TesseractCliOcrOptions(force_full_page_ocr=True),
TesseractCliOcrOptions(force_full_page_ocr=True, lang=["auto"]),
]
# rapidocr is only available for Python >=3.6,<3.14
if sys.version_info < (3, 14):
engines.append(RapidOcrOptions())
engines.append(RapidOcrOptions(force_full_page_ocr=True))
# only works on mac
if "darwin" == sys.platform:
engines.append(OcrMacOptions())
engines.append(OcrMacOptions(force_full_page_ocr=True))
for ocr_options in engines:
print(
f"Converting with ocr_engine: {ocr_options.kind}, language: {ocr_options.lang}"
)
converter = get_converter(ocr_options=ocr_options)
for webp_path in webp_paths:
print(f"converting {webp_path}")
doc_result: ConversionResult = converter.convert(
webp_path, raises_on_error=True
)
verify_conversion_result_v2(
input_path=webp_path,
doc_result=doc_result,
generate=GENERATE,
fuzzy=True,
)
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/tests/verify_utils.py | tests/verify_utils.py | import json
import os
from pathlib import Path
from typing import Optional
import pytest
from docling_core.types.doc import (
CodeItem,
DocItem,
DoclingDocument,
FormulaItem,
PictureItem,
TableItem,
TextItem,
)
from docling_core.types.legacy_doc.document import ExportedCCSDocument as DsDocument
from PIL import Image as PILImage
from pydantic import BaseModel, TypeAdapter
from pydantic.json import pydantic_encoder
from docling.datamodel.base_models import ConversionStatus, Page
from docling.datamodel.document import ConversionResult
COORD_PREC = 2 # decimal places for coordinates
CONFID_PREC = 3 # decimal places for confidence
class _TestPagesMeta(BaseModel):
num_cells: int
@classmethod
def from_page(cls, page: Page):
return cls(num_cells=len(page.cells))
def levenshtein(str1: str, str2: str) -> int:
# Ensure str1 is the shorter string to optimize memory usage
if len(str1) > len(str2):
str1, str2 = str2, str1
# Previous and current row buffers
previous_row = list(range(len(str2) + 1))
current_row = [0] * (len(str2) + 1)
# Compute the Levenshtein distance row by row
for i, c1 in enumerate(str1, start=1):
current_row[0] = i
for j, c2 in enumerate(str2, start=1):
insertions = previous_row[j] + 1
deletions = current_row[j - 1] + 1
substitutions = previous_row[j - 1] + (c1 != c2)
current_row[j] = min(insertions, deletions, substitutions)
# Swap rows for the next iteration
previous_row, current_row = current_row, previous_row
# The result is in the last element of the previous row
return previous_row[-1]
def verify_text(gt: str, pred: str, fuzzy: bool, fuzzy_threshold: float = 0.4):
if len(gt) == 0 or not fuzzy:
assert gt == pred, f"{gt}!={pred}"
else:
dist = levenshtein(gt, pred)
diff = dist / len(gt)
assert diff < fuzzy_threshold, f"{gt}!~{pred}"
return True
def verify_cells(
doc_pred_pages: list[_TestPagesMeta], doc_true_pages: list[_TestPagesMeta]
):
assert len(doc_pred_pages) == len(doc_true_pages), (
"pred- and true-doc do not have the same number of pages"
)
for pid, page_true_item in enumerate(doc_true_pages):
num_true_cells = page_true_item.num_cells
num_pred_cells = doc_pred_pages[pid].num_cells
assert num_true_cells == num_pred_cells, (
f"num_true_cells!=num_pred_cells {num_true_cells}!={num_pred_cells}"
)
return True
# def verify_maintext(doc_pred: DsDocument, doc_true: DsDocument):
# assert doc_true.main_text is not None, "doc_true cannot be None"
# assert doc_pred.main_text is not None, "doc_true cannot be None"
#
# assert len(doc_true.main_text) == len(
# doc_pred.main_text
# ), f"document has different length of main-text than expected. {len(doc_true.main_text)}!={len(doc_pred.main_text)}"
#
# for l, true_item in enumerate(doc_true.main_text):
# pred_item = doc_pred.main_text[l]
# # Validate type
# assert (
# true_item.obj_type == pred_item.obj_type
# ), f"Item[{l}] type does not match. expected[{true_item.obj_type}] != predicted [{pred_item.obj_type}]"
#
# # Validate text ceels
# if isinstance(true_item, BaseText):
# assert isinstance(
# pred_item, BaseText
# ), f"{pred_item} is not a BaseText element, but {true_item} is."
# assert true_item.text == pred_item.text
#
# return True
def verify_tables_v1(doc_pred: DsDocument, doc_true: DsDocument, fuzzy: bool):
if doc_true.tables is None:
# No tables to check
assert doc_pred.tables is None, "not expecting any table on this document"
return True
assert doc_pred.tables is not None, "no tables predicted, but expected in doc_true"
# print("Expected number of tables: {}, result: {}".format(len(doc_true.tables), len(doc_pred.tables)))
assert len(doc_true.tables) == len(doc_pred.tables), (
"document has different count of tables than expected."
)
for ix, true_item in enumerate(doc_true.tables):
pred_item = doc_pred.tables[ix]
assert true_item.num_rows == pred_item.num_rows, (
"table does not have the same #-rows"
)
assert true_item.num_cols == pred_item.num_cols, (
"table does not have the same #-cols"
)
assert true_item.data is not None, "documents are expected to have table data"
assert pred_item.data is not None, "documents are expected to have table data"
print("True: \n", true_item.export_to_dataframe().to_markdown())
print("Pred: \n", true_item.export_to_dataframe().to_markdown())
for i, row in enumerate(true_item.data):
for j, col in enumerate(true_item.data[i]):
# print("true: ", true_item.data[i][j].text)
# print("pred: ", pred_item.data[i][j].text)
# print("")
verify_text(
true_item.data[i][j].text, pred_item.data[i][j].text, fuzzy=fuzzy
)
assert true_item.data[i][j].obj_type == pred_item.data[i][j].obj_type, (
"table-cell does not have the same type"
)
return True
def verify_table_v2(true_item: TableItem, pred_item: TableItem, fuzzy: bool):
assert true_item.data.num_rows == pred_item.data.num_rows, (
"table does not have the same #-rows"
)
assert true_item.data.num_cols == pred_item.data.num_cols, (
"table does not have the same #-cols"
)
assert true_item.data is not None, "documents are expected to have table data"
assert pred_item.data is not None, "documents are expected to have table data"
for i, row in enumerate(true_item.data.grid):
for j, col in enumerate(true_item.data.grid[i]):
# print("true: ", true_item.data[i][j].text)
# print("pred: ", pred_item.data[i][j].text)
# print("")
verify_text(
true_item.data.grid[i][j].text,
pred_item.data.grid[i][j].text,
fuzzy=fuzzy,
)
assert (
true_item.data.grid[i][j].column_header
== pred_item.data.grid[i][j].column_header
), "table-cell should be a column_header but prediction isn't"
assert (
true_item.data.grid[i][j].row_header
== pred_item.data.grid[i][j].row_header
), "table-cell should be a row_header but prediction isn't"
assert (
true_item.data.grid[i][j].row_section
== pred_item.data.grid[i][j].row_section
), "table-cell should be a row_section but prediction isn't"
return True
def verify_picture_image_v2(
true_image: PILImage.Image, pred_item: Optional[PILImage.Image]
):
assert pred_item is not None, "predicted image is None"
assert true_image.size == pred_item.size
assert true_image.mode == pred_item.mode
# assert true_image.tobytes() == pred_item.tobytes()
return True
# def verify_output(doc_pred: DsDocument, doc_true: DsDocument):
# #assert verify_maintext(doc_pred, doc_true), "verify_maintext(doc_pred, doc_true)"
# assert verify_tables_v1(doc_pred, doc_true), "verify_tables(doc_pred, doc_true)"
# return True
def verify_docitems(doc_pred: DoclingDocument, doc_true: DoclingDocument, fuzzy: bool):
assert len(doc_pred.texts) == len(doc_true.texts), "Text lengths do not match."
assert len(doc_true.tables) == len(doc_pred.tables), (
"document has different count of tables than expected."
)
for (true_item, _true_level), (pred_item, _pred_level) in zip(
doc_true.iterate_items(), doc_pred.iterate_items()
):
if not isinstance(true_item, DocItem):
continue
assert isinstance(pred_item, DocItem), "Test item is not a DocItem"
# Validate type
assert true_item.label == pred_item.label, "Object label does not match."
# Validate provenance
assert len(true_item.prov) == len(pred_item.prov), "Length of prov mismatch"
if len(true_item.prov) > 0:
true_prov = true_item.prov[0]
pred_prov = pred_item.prov[0]
assert true_prov.page_no == pred_prov.page_no, "Page provenance mistmatch"
# TODO: add bbox check with tolerance
# Validate text content
if isinstance(true_item, TextItem):
assert isinstance(pred_item, TextItem), (
f"Test item should be a TextItem {true_item=} {pred_item=} "
)
assert verify_text(true_item.text, pred_item.text, fuzzy=fuzzy)
# Validate table content
if isinstance(true_item, TableItem):
assert isinstance(pred_item, TableItem), "Test item should be a TableItem"
assert verify_table_v2(true_item, pred_item, fuzzy=fuzzy), (
"Tables not matching"
)
# Validate picture content
if isinstance(true_item, PictureItem):
assert isinstance(pred_item, PictureItem), (
"Test item should be a PictureItem"
)
true_image = true_item.get_image(doc=doc_true)
pred_image = true_item.get_image(doc=doc_pred)
if true_image is not None:
assert verify_picture_image_v2(true_image, pred_image), (
"Picture image mismatch"
)
# TODO: check picture annotations
# Validate code content
if isinstance(true_item, CodeItem):
assert isinstance(pred_item, CodeItem), "Test item should be a CodeItem"
assert true_item.code_language == pred_item.code_language
# Validate formula content
if isinstance(true_item, FormulaItem):
assert isinstance(pred_item, FormulaItem), (
"Test item should be a FormulaItem"
)
return True
def verify_md(doc_pred_md: str, doc_true_md: str, fuzzy: bool):
return verify_text(doc_true_md, doc_pred_md, fuzzy)
def verify_dt(doc_pred_dt: str, doc_true_dt: str, fuzzy: bool):
return verify_text(doc_true_dt, doc_pred_dt, fuzzy)
def verify_conversion_result_v1(
input_path: Path,
doc_result: ConversionResult,
generate: bool = False,
ocr_engine: Optional[str] = None,
fuzzy: bool = False,
indent: int = 2,
):
assert doc_result.status == ConversionStatus.SUCCESS, (
f"Doc {input_path} did not convert successfully."
)
with pytest.warns(DeprecationWarning, match="Use document instead"):
doc_pred: DsDocument = doc_result.legacy_document
doc_pred_md = doc_result.legacy_document.export_to_markdown()
doc_pred_dt = doc_result.legacy_document.export_to_document_tokens()
engine_suffix = "" if ocr_engine is None else f".{ocr_engine}"
gt_subpath = input_path.parent / "groundtruth" / "docling_v1" / input_path.name
if str(input_path.parent).endswith("pdf"):
gt_subpath = (
input_path.parent.parent / "groundtruth" / "docling_v1" / input_path.name
)
json_path = gt_subpath.with_suffix(f"{engine_suffix}.json")
md_path = gt_subpath.with_suffix(f"{engine_suffix}.md")
dt_path = gt_subpath.with_suffix(f"{engine_suffix}.doctags.txt")
if generate: # only used when re-generating truth
json_path.parent.mkdir(parents=True, exist_ok=True)
with open(json_path, mode="w", encoding="utf-8") as fw:
fw.write(json.dumps(doc_pred, default=pydantic_encoder, indent=indent))
md_path.parent.mkdir(parents=True, exist_ok=True)
with open(md_path, mode="w", encoding="utf-8") as fw:
fw.write(doc_pred_md)
dt_path.parent.mkdir(parents=True, exist_ok=True)
with open(dt_path, mode="w", encoding="utf-8") as fw:
fw.write(doc_pred_dt)
else: # default branch in test
with open(json_path, encoding="utf-8") as fr:
doc_true: DsDocument = DsDocument.model_validate_json(fr.read())
with open(md_path, encoding="utf-8") as fr:
doc_true_md = fr.read()
with open(dt_path, encoding="utf-8") as fr:
doc_true_dt = fr.read()
# assert verify_output(
# doc_pred, doc_true
# ), f"Mismatch in JSON prediction for {input_path}"
assert verify_tables_v1(doc_pred, doc_true, fuzzy=fuzzy), (
f"verify_tables(doc_pred, doc_true) mismatch for {input_path}"
)
assert verify_md(doc_pred_md, doc_true_md, fuzzy=fuzzy), (
f"Mismatch in Markdown prediction for {input_path}"
)
assert verify_dt(doc_pred_dt, doc_true_dt, fuzzy=fuzzy), (
f"Mismatch in DocTags prediction for {input_path}"
)
def verify_conversion_result_v2(
input_path: Path,
doc_result: ConversionResult,
generate: bool = False,
ocr_engine: Optional[str] = None,
fuzzy: bool = False,
verify_doctags: bool = True,
indent: int = 2,
):
PageMetaList = TypeAdapter(list[_TestPagesMeta])
assert doc_result.status == ConversionStatus.SUCCESS, (
f"Doc {input_path} did not convert successfully."
)
doc_pred_pages: list[Page] = doc_result.pages
doc_pred_pages_meta: list[_TestPagesMeta] = [
_TestPagesMeta.from_page(page) for page in doc_pred_pages
]
doc_pred: DoclingDocument = doc_result.document
doc_pred_md = doc_result.document.export_to_markdown()
doc_pred_dt = doc_result.document.export_to_doctags()
engine_suffix = "" if ocr_engine is None else f".{ocr_engine}"
gt_subpath = input_path.parent / "groundtruth" / "docling_v2" / input_path.name
if str(input_path.parent).endswith("pdf"):
gt_subpath = (
input_path.parent.parent / "groundtruth" / "docling_v2" / input_path.name
)
pages_path = gt_subpath.with_suffix(f"{engine_suffix}.pages.meta.json")
json_path = gt_subpath.with_suffix(f"{engine_suffix}.json")
md_path = gt_subpath.with_suffix(f"{engine_suffix}.md")
dt_path = gt_subpath.with_suffix(f"{engine_suffix}.doctags.txt")
if generate: # only used when re-generating truth
pages_path.parent.mkdir(parents=True, exist_ok=True)
pages_data = PageMetaList.dump_json(doc_pred_pages_meta, indent=2)
with open(pages_path, mode="w", encoding="utf-8") as fw:
fw.write(pages_data.decode())
json_path.parent.mkdir(parents=True, exist_ok=True)
doc_pred.save_as_json(
json_path, coord_precision=COORD_PREC, confid_precision=CONFID_PREC
)
md_path.parent.mkdir(parents=True, exist_ok=True)
with open(md_path, mode="w", encoding="utf-8") as fw:
fw.write(doc_pred_md)
dt_path.parent.mkdir(parents=True, exist_ok=True)
with open(dt_path, mode="w", encoding="utf-8") as fw:
fw.write(doc_pred_dt)
else: # default branch in test
with open(pages_path, encoding="utf-8") as fr:
doc_true_pages_meta = PageMetaList.validate_json(fr.read())
with open(json_path, encoding="utf-8") as fr:
doc_true: DoclingDocument = DoclingDocument.model_validate_json(fr.read())
with open(md_path, encoding="utf-8") as fr:
doc_true_md = fr.read()
with open(dt_path, encoding="utf-8") as fr:
doc_true_dt = fr.read()
if not fuzzy:
assert verify_cells(doc_pred_pages_meta, doc_true_pages_meta), (
f"Mismatch in PDF cell prediction for {input_path}"
)
assert verify_docitems(doc_pred, doc_true, fuzzy=fuzzy), (
f"verify_docling_document(doc_pred, doc_true) mismatch for {input_path}"
)
assert verify_md(doc_pred_md, doc_true_md, fuzzy=fuzzy), (
f"Mismatch in Markdown prediction for {input_path}"
)
if verify_doctags:
assert verify_dt(doc_pred_dt, doc_true_dt, fuzzy=fuzzy), (
f"Mismatch in DocTags prediction for {input_path}"
)
def verify_document(pred_doc: DoclingDocument, gtfile: str, generate: bool = False):
if not os.path.exists(gtfile) or generate:
with open(gtfile, mode="w", encoding="utf-8") as fw:
pred_dict = pred_doc.export_to_dict(
coord_precision=COORD_PREC,
confid_precision=CONFID_PREC,
)
json.dump(pred_dict, fw, ensure_ascii=False, indent=2)
return True
else:
with open(gtfile, encoding="utf-8") as fr:
true_doc = DoclingDocument.model_validate_json(fr.read())
return verify_docitems(pred_doc, true_doc, fuzzy=False)
def verify_export(pred_text: str, gtfile: str, generate: bool = False) -> bool:
file = Path(gtfile)
if not file.exists() or generate:
with file.open(mode="w", encoding="utf-8") as fw:
fw.write(pred_text)
return True
with file.open(encoding="utf-8") as fr:
true_text = fr.read()
return pred_text == true_text
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docs/examples/vlm_pipeline_api_model.py | docs/examples/vlm_pipeline_api_model.py | # %% [markdown]
# Use the VLM pipeline with remote API models (LM Studio, Ollama, watsonx.ai).
#
# What this example does
# - Shows how to configure `ApiVlmOptions` for different VLM providers.
# - Converts a single PDF page using the VLM pipeline and prints Markdown.
#
# Prerequisites
# - Install Docling with VLM extras and `python-dotenv` if using environment files.
# - For local APIs: run LM Studio (HTTP server) or Ollama locally.
# - For cloud APIs: set required environment variables (see below).
# - Requires `requests` for HTTP calls and `python-dotenv` if loading env vars from `.env`.
#
# How to run
# - From the repo root: `python docs/examples/vlm_pipeline_api_model.py`.
# - The script prints the converted Markdown to stdout.
#
# Choosing a provider
# - Uncomment exactly one `pipeline_options.vlm_options = ...` block below.
# - Keep `enable_remote_services=True` to permit calling remote APIs.
#
# Notes
# - LM Studio default endpoint: `http://localhost:1234/v1/chat/completions`.
# - Ollama default endpoint: `http://localhost:11434/v1/chat/completions`.
# - watsonx.ai requires `WX_API_KEY` and `WX_PROJECT_ID` in env/`.env`.
# %%
import json
import logging
import os
from pathlib import Path
from typing import Optional
import requests
from docling_core.types.doc.page import SegmentedPage
from dotenv import load_dotenv
from docling.datamodel.base_models import InputFormat
from docling.datamodel.pipeline_options import (
VlmPipelineOptions,
)
from docling.datamodel.pipeline_options_vlm_model import ApiVlmOptions, ResponseFormat
from docling.document_converter import DocumentConverter, PdfFormatOption
from docling.pipeline.vlm_pipeline import VlmPipeline
### Example of ApiVlmOptions definitions
#### Using LM Studio or VLLM (OpenAI-compatible APIs)
def openai_compatible_vlm_options(
model: str,
prompt: str,
format: ResponseFormat,
hostname_and_port,
temperature: float = 0.7,
max_tokens: int = 4096,
api_key: str = "",
skip_special_tokens=False,
):
headers = {}
if api_key:
headers["Authorization"] = f"Bearer {api_key}"
options = ApiVlmOptions(
url=f"http://{hostname_and_port}/v1/chat/completions", # LM studio defaults to port 1234, VLLM to 8000
params=dict(
model=model,
max_tokens=max_tokens,
skip_special_tokens=skip_special_tokens, # needed for VLLM
),
headers=headers,
prompt=prompt,
timeout=90,
scale=2.0,
temperature=temperature,
response_format=format,
)
return options
#### Using LM Studio with OlmOcr model
def lms_olmocr_vlm_options(model: str):
class OlmocrVlmOptions(ApiVlmOptions):
def build_prompt(self, page: Optional[SegmentedPage]) -> str:
if page is None:
return self.prompt.replace("#RAW_TEXT#", "")
anchor = [
f"Page dimensions: {int(page.dimension.width)}x{int(page.dimension.height)}"
]
for text_cell in page.textline_cells:
if not text_cell.text.strip():
continue
bbox = text_cell.rect.to_bounding_box().to_bottom_left_origin(
page.dimension.height
)
anchor.append(f"[{int(bbox.l)}x{int(bbox.b)}] {text_cell.text}")
for image_cell in page.bitmap_resources:
bbox = image_cell.rect.to_bounding_box().to_bottom_left_origin(
page.dimension.height
)
anchor.append(
f"[Image {int(bbox.l)}x{int(bbox.b)} to {int(bbox.r)}x{int(bbox.t)}]"
)
if len(anchor) == 1:
anchor.append(
f"[Image 0x0 to {int(page.dimension.width)}x{int(page.dimension.height)}]"
)
# Original prompt uses cells sorting. We are skipping it for simplicity.
raw_text = "\n".join(anchor)
return self.prompt.replace("#RAW_TEXT#", raw_text)
def decode_response(self, text: str) -> str:
# OlmOcr trained to generate json response with language, rotation and other info
try:
generated_json = json.loads(text)
except json.decoder.JSONDecodeError:
return ""
return generated_json["natural_text"]
options = OlmocrVlmOptions(
url="http://localhost:1234/v1/chat/completions",
params=dict(
model=model,
),
prompt=(
"Below is the image of one page of a document, as well as some raw textual"
" content that was previously extracted for it. Just return the plain text"
" representation of this document as if you were reading it naturally.\n"
"Do not hallucinate.\n"
"RAW_TEXT_START\n#RAW_TEXT#\nRAW_TEXT_END"
),
timeout=90,
scale=1.0,
max_size=1024, # from OlmOcr pipeline
response_format=ResponseFormat.MARKDOWN,
)
return options
#### Using Ollama
def ollama_vlm_options(model: str, prompt: str):
options = ApiVlmOptions(
url="http://localhost:11434/v1/chat/completions", # the default Ollama endpoint
params=dict(
model=model,
),
prompt=prompt,
timeout=90,
scale=1.0,
response_format=ResponseFormat.MARKDOWN,
)
return options
#### Using a cloud service like IBM watsonx.ai
def watsonx_vlm_options(model: str, prompt: str):
load_dotenv()
api_key = os.environ.get("WX_API_KEY")
project_id = os.environ.get("WX_PROJECT_ID")
def _get_iam_access_token(api_key: str) -> str:
res = requests.post(
url="https://iam.cloud.ibm.com/identity/token",
headers={
"Content-Type": "application/x-www-form-urlencoded",
},
data=f"grant_type=urn:ibm:params:oauth:grant-type:apikey&apikey={api_key}",
)
res.raise_for_status()
api_out = res.json()
print(f"{api_out=}")
return api_out["access_token"]
options = ApiVlmOptions(
url="https://us-south.ml.cloud.ibm.com/ml/v1/text/chat?version=2023-05-29",
params=dict(
model_id=model,
project_id=project_id,
parameters=dict(
max_new_tokens=400,
),
),
headers={
"Authorization": "Bearer " + _get_iam_access_token(api_key=api_key),
},
prompt=prompt,
timeout=60,
response_format=ResponseFormat.MARKDOWN,
)
return options
### Usage and conversion
def main():
logging.basicConfig(level=logging.INFO)
data_folder = Path(__file__).parent / "../../tests/data"
input_doc_path = data_folder / "pdf/2305.03393v1-pg9.pdf"
# Configure the VLM pipeline. Enabling remote services allows HTTP calls to
# locally hosted APIs (LM Studio, Ollama) or cloud services.
pipeline_options = VlmPipelineOptions(
enable_remote_services=True # required when calling remote VLM endpoints
)
# The ApiVlmOptions() allows to interface with APIs supporting
# the multi-modal chat interface. Here follow a few example on how to configure those.
# One possibility is self-hosting the model, e.g., via LM Studio, Ollama or VLLM.
#
# e.g. with VLLM, serve granite-docling with these commands:
# > vllm serve ibm-granite/granite-docling-258M --revision untied
#
# with LM Studio, serve granite-docling with these commands:
# > lms server start
# > lms load ibm-granite/granite-docling-258M-mlx
# Example using the Granite-Docling model with LM Studio or VLLM:
pipeline_options.vlm_options = openai_compatible_vlm_options(
model="granite-docling-258m-mlx", # For VLLM use "ibm-granite/granite-docling-258M"
hostname_and_port="localhost:1234", # LM studio defaults to port 1234, VLLM to 8000
prompt="Convert this page to docling.",
format=ResponseFormat.DOCTAGS,
api_key="",
)
# Example using the OlmOcr (dynamic prompt) model with LM Studio:
# (uncomment the following lines)
# pipeline_options.vlm_options = lms_olmocr_vlm_options(
# model="hf.co/lmstudio-community/olmOCR-7B-0225-preview-GGUF",
# )
# Example using the Granite Vision model with Ollama:
# (uncomment the following lines)
# pipeline_options.vlm_options = ollama_vlm_options(
# model="granite3.2-vision:2b",
# prompt="OCR the full page to markdown.",
# )
# Another possibility is using online services, e.g., watsonx.ai.
# Using watsonx.ai requires setting env variables WX_API_KEY and WX_PROJECT_ID
# (see the top-level docstring for details). You can use a .env file as well.
# (uncomment the following lines)
# pipeline_options.vlm_options = watsonx_vlm_options(
# model="ibm/granite-vision-3-2-2b", prompt="OCR the full page to markdown."
# )
# Create the DocumentConverter and launch the conversion.
doc_converter = DocumentConverter(
format_options={
InputFormat.PDF: PdfFormatOption(
pipeline_options=pipeline_options,
pipeline_cls=VlmPipeline,
)
}
)
result = doc_converter.convert(input_doc_path)
print(result.document.export_to_markdown())
if __name__ == "__main__":
main()
# %%
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docs/examples/pictures_description_api.py | docs/examples/pictures_description_api.py | # %% [markdown]
# Describe pictures using a remote VLM API (vLLM, LM Studio, or watsonx.ai).
#
# What this example does
# - Configures `PictureDescriptionApiOptions` for local or cloud providers.
# - Converts a PDF, then prints each picture's caption and annotations.
#
# Prerequisites
# - Install Docling and `python-dotenv` if loading env vars from a `.env` file.
# - For local providers: ensure vLLM or LM Studio is running.
# - For watsonx.ai: set `WX_API_KEY` and `WX_PROJECT_ID` in the environment.
#
# How to run
# - From the repo root: `python docs/examples/pictures_description_api.py`.
# - Uncomment exactly one provider config and set `enable_remote_services=True` (already set).
#
# Notes
# - vLLM default endpoint: `http://localhost:8000/v1/chat/completions`.
# - LM Studio default endpoint: `http://localhost:1234/v1/chat/completions`.
# - Calling remote APIs sends page images/text to the provider; review privacy and
# costs. For local testing, LM Studio runs everything on your machine.
# %%
import logging
import os
from pathlib import Path
import requests
from docling_core.types.doc import PictureItem
from dotenv import load_dotenv
from docling.datamodel.base_models import InputFormat
from docling.datamodel.pipeline_options import (
PdfPipelineOptions,
PictureDescriptionApiOptions,
)
from docling.document_converter import DocumentConverter, PdfFormatOption
### Example of PictureDescriptionApiOptions definitions
#### Using vLLM
# Models can be launched via:
# $ vllm serve MODEL_NAME
def vllm_local_options(model: str):
options = PictureDescriptionApiOptions(
url="http://localhost:8000/v1/chat/completions",
params=dict(
model=model,
seed=42,
max_completion_tokens=200,
),
prompt="Describe the image in three sentences. Be consise and accurate.",
timeout=90,
)
return options
#### Using LM Studio
def lms_local_options(model: str):
options = PictureDescriptionApiOptions(
url="http://localhost:1234/v1/chat/completions",
params=dict(
model=model,
seed=42,
max_completion_tokens=200,
),
prompt="Describe the image in three sentences. Be consise and accurate.",
timeout=90,
)
return options
#### Using a cloud service like IBM watsonx.ai
def watsonx_vlm_options():
load_dotenv()
api_key = os.environ.get("WX_API_KEY")
project_id = os.environ.get("WX_PROJECT_ID")
def _get_iam_access_token(api_key: str) -> str:
res = requests.post(
url="https://iam.cloud.ibm.com/identity/token",
headers={
"Content-Type": "application/x-www-form-urlencoded",
},
data=f"grant_type=urn:ibm:params:oauth:grant-type:apikey&apikey={api_key}",
)
res.raise_for_status()
api_out = res.json()
print(f"{api_out=}")
return api_out["access_token"]
# Background information in case the model_id is updated:
# [1] Official list of models: https://dataplatform.cloud.ibm.com/docs/content/wsj/analyze-data/fm-models.html?context=wx
# [2] Info on granite vision 3.3: https://dataplatform.cloud.ibm.com/docs/content/wsj/analyze-data/fm-models-ibm.html?context=wx#granite-vision-3-3-2b
options = PictureDescriptionApiOptions(
url="https://us-south.ml.cloud.ibm.com/ml/v1/text/chat?version=2023-05-29",
params=dict(
model_id="ibm/granite-vision-3-3-2b",
project_id=project_id,
parameters=dict(
max_new_tokens=400,
),
),
headers={
"Authorization": "Bearer " + _get_iam_access_token(api_key=api_key),
},
prompt="Describe the image in three sentences. Be consise and accurate.",
timeout=60,
)
return options
### Usage and conversion
def main():
logging.basicConfig(level=logging.INFO)
data_folder = Path(__file__).parent / "../../tests/data"
input_doc_path = data_folder / "pdf/2206.01062.pdf"
pipeline_options = PdfPipelineOptions(
enable_remote_services=True # <-- this is required!
)
pipeline_options.do_picture_description = True
# The PictureDescriptionApiOptions() allows to interface with APIs supporting
# the multi-modal chat interface. Here follow a few example on how to configure those.
#
# One possibility is self-hosting model, e.g. via VLLM.
# $ vllm serve MODEL_NAME
# Then PictureDescriptionApiOptions can point to the localhost endpoint.
# Example for the Granite Vision model:
# (uncomment the following lines)
# pipeline_options.picture_description_options = vllm_local_options(
# model="ibm-granite/granite-vision-3.3-2b"
# )
# Example for the SmolVLM model:
# (uncomment the following lines)
# pipeline_options.picture_description_options = vllm_local_options(
# model="HuggingFaceTB/SmolVLM-256M-Instruct"
# )
# For using models on LM Studio using the built-in GGUF or MLX runtimes, e.g. the SmolVLM model:
# (uncomment the following lines)
pipeline_options.picture_description_options = lms_local_options(
model="smolvlm-256m-instruct"
)
# Another possibility is using online services, e.g. watsonx.ai.
# Using requires setting the env variables WX_API_KEY and WX_PROJECT_ID.
# (uncomment the following lines)
# pipeline_options.picture_description_options = watsonx_vlm_options()
doc_converter = DocumentConverter(
format_options={
InputFormat.PDF: PdfFormatOption(
pipeline_options=pipeline_options,
)
}
)
result = doc_converter.convert(input_doc_path)
for element, _level in result.document.iterate_items():
if isinstance(element, PictureItem):
print(
f"Picture {element.self_ref}\n"
f"Caption: {element.caption_text(doc=result.document)}\n"
f"Annotations: {element.annotations}"
)
if __name__ == "__main__":
main()
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docs/examples/run_md.py | docs/examples/run_md.py | import json
import logging
import os
from pathlib import Path
import yaml
from docling.backend.md_backend import MarkdownDocumentBackend
from docling.datamodel.base_models import InputFormat
from docling.datamodel.document import InputDocument
_log = logging.getLogger(__name__)
def main():
input_paths = [Path("README.md")]
for path in input_paths:
in_doc = InputDocument(
path_or_stream=path,
format=InputFormat.PDF,
backend=MarkdownDocumentBackend,
)
mdb = MarkdownDocumentBackend(in_doc=in_doc, path_or_stream=path)
document = mdb.convert()
out_path = Path("scratch")
print(f"Document {path} converted.\nSaved markdown output to: {out_path!s}")
# Export Docling document format to markdowndoc:
fn = os.path.basename(path)
with (out_path / f"{fn}.md").open("w") as fp:
fp.write(document.export_to_markdown())
with (out_path / f"{fn}.json").open("w") as fp:
fp.write(json.dumps(document.export_to_dict()))
with (out_path / f"{fn}.yaml").open("w") as fp:
fp.write(yaml.safe_dump(document.export_to_dict()))
if __name__ == "__main__":
main()
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docs/examples/enrich_simple_pipeline.py | docs/examples/enrich_simple_pipeline.py | import logging
from pathlib import Path
from docling.datamodel.base_models import InputFormat
from docling.datamodel.pipeline_options import ConvertPipelineOptions
from docling.document_converter import (
DocumentConverter,
HTMLFormatOption,
WordFormatOption,
)
_log = logging.getLogger(__name__)
def main():
input_path = Path("tests/data/docx/word_sample.docx")
pipeline_options = ConvertPipelineOptions()
pipeline_options.do_picture_classification = True
pipeline_options.do_picture_description = True
doc_converter = DocumentConverter(
format_options={
InputFormat.DOCX: WordFormatOption(pipeline_options=pipeline_options),
InputFormat.HTML: HTMLFormatOption(pipeline_options=pipeline_options),
},
)
res = doc_converter.convert(input_path)
print(res.document.export_to_markdown())
if __name__ == "__main__":
main()
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docs/examples/run_with_accelerator.py | docs/examples/run_with_accelerator.py | # %% [markdown]
# Run conversion with an explicit accelerator configuration (CPU/MPS/CUDA).
#
# What this example does
# - Shows how to select the accelerator device and thread count.
# - Enables OCR and table structure to exercise compute paths, and prints timings.
#
# How to run
# - From the repo root: `python docs/examples/run_with_accelerator.py`.
# - Toggle the commented `AcceleratorOptions` examples to try AUTO/MPS/CUDA.
#
# Notes
# - EasyOCR does not support `cuda:N` device selection (defaults to `cuda:0`).
# - `settings.debug.profile_pipeline_timings = True` prints profiling details.
# - `AcceleratorDevice.MPS` is macOS-only; `CUDA` requires a compatible GPU and
# CUDA-enabled PyTorch build. CPU mode works everywhere.
# %%
from pathlib import Path
from docling.datamodel.accelerator_options import AcceleratorDevice, AcceleratorOptions
from docling.datamodel.base_models import InputFormat
from docling.datamodel.pipeline_options import (
PdfPipelineOptions,
)
from docling.datamodel.settings import settings
from docling.document_converter import DocumentConverter, PdfFormatOption
def main():
data_folder = Path(__file__).parent / "../../tests/data"
input_doc_path = data_folder / "pdf/2206.01062.pdf"
# Explicitly set the accelerator
# accelerator_options = AcceleratorOptions(
# num_threads=8, device=AcceleratorDevice.AUTO
# )
accelerator_options = AcceleratorOptions(
num_threads=8, device=AcceleratorDevice.CPU
)
# accelerator_options = AcceleratorOptions(
# num_threads=8, device=AcceleratorDevice.MPS
# )
# accelerator_options = AcceleratorOptions(
# num_threads=8, device=AcceleratorDevice.CUDA
# )
# easyocr doesnt support cuda:N allocation, defaults to cuda:0
# accelerator_options = AcceleratorOptions(num_threads=8, device="cuda:1")
pipeline_options = PdfPipelineOptions()
pipeline_options.accelerator_options = accelerator_options
pipeline_options.do_ocr = True
pipeline_options.do_table_structure = True
pipeline_options.table_structure_options.do_cell_matching = True
converter = DocumentConverter(
format_options={
InputFormat.PDF: PdfFormatOption(
pipeline_options=pipeline_options,
)
}
)
# Enable the profiling to measure the time spent
settings.debug.profile_pipeline_timings = True
# Convert the document
conversion_result = converter.convert(input_doc_path)
doc = conversion_result.document
# List with total time per document
doc_conversion_secs = conversion_result.timings["pipeline_total"].times
md = doc.export_to_markdown()
print(md)
print(f"Conversion secs: {doc_conversion_secs}")
if __name__ == "__main__":
main()
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docs/examples/develop_picture_enrichment.py | docs/examples/develop_picture_enrichment.py | # %% [markdown]
# Developing a picture enrichment model (classifier scaffold only).
#
# What this example does
# - Demonstrates how to implement an enrichment model that annotates pictures.
# - Adds a dummy PictureClassificationData entry to each PictureItem.
#
# Important
# - This is a scaffold for development; it does not run a real classifier.
#
# How to run
# - From the repo root: `python docs/examples/develop_picture_enrichment.py`.
#
# Notes
# - Enables picture image generation and sets `images_scale` to improve crops.
# - Extends `StandardPdfPipeline` with a custom enrichment stage.
# %%
import logging
from collections.abc import Iterable
from pathlib import Path
from typing import Any
from docling_core.types.doc import (
DoclingDocument,
NodeItem,
PictureClassificationClass,
PictureClassificationData,
PictureItem,
)
from docling.datamodel.base_models import InputFormat
from docling.datamodel.pipeline_options import PdfPipelineOptions
from docling.document_converter import DocumentConverter, PdfFormatOption
from docling.models.base_model import BaseEnrichmentModel
from docling.pipeline.standard_pdf_pipeline import StandardPdfPipeline
class ExamplePictureClassifierPipelineOptions(PdfPipelineOptions):
do_picture_classifer: bool = True
class ExamplePictureClassifierEnrichmentModel(BaseEnrichmentModel):
def __init__(self, enabled: bool):
self.enabled = enabled
def is_processable(self, doc: DoclingDocument, element: NodeItem) -> bool:
return self.enabled and isinstance(element, PictureItem)
def __call__(
self, doc: DoclingDocument, element_batch: Iterable[NodeItem]
) -> Iterable[Any]:
if not self.enabled:
return
for element in element_batch:
assert isinstance(element, PictureItem)
# uncomment this to interactively visualize the image
# element.get_image(doc).show() # may block; avoid in headless runs
element.annotations.append(
PictureClassificationData(
provenance="example_classifier-0.0.1",
predicted_classes=[
PictureClassificationClass(class_name="dummy", confidence=0.42)
],
)
)
yield element
class ExamplePictureClassifierPipeline(StandardPdfPipeline):
def __init__(self, pipeline_options: ExamplePictureClassifierPipelineOptions):
super().__init__(pipeline_options)
self.pipeline_options: ExamplePictureClassifierPipeline
self.enrichment_pipe = [
ExamplePictureClassifierEnrichmentModel(
enabled=pipeline_options.do_picture_classifer
)
]
@classmethod
def get_default_options(cls) -> ExamplePictureClassifierPipelineOptions:
return ExamplePictureClassifierPipelineOptions()
def main():
logging.basicConfig(level=logging.INFO)
data_folder = Path(__file__).parent / "../../tests/data"
input_doc_path = data_folder / "pdf/2206.01062.pdf"
pipeline_options = ExamplePictureClassifierPipelineOptions()
pipeline_options.images_scale = 2.0
pipeline_options.generate_picture_images = True
doc_converter = DocumentConverter(
format_options={
InputFormat.PDF: PdfFormatOption(
pipeline_cls=ExamplePictureClassifierPipeline,
pipeline_options=pipeline_options,
)
}
)
result = doc_converter.convert(input_doc_path)
for element, _level in result.document.iterate_items():
if isinstance(element, PictureItem):
print(
f"The model populated the `data` portion of picture {element.self_ref}:\n{element.annotations}"
)
if __name__ == "__main__":
main()
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docs/examples/develop_formula_understanding.py | docs/examples/develop_formula_understanding.py | # %% [markdown]
# Developing an enrichment model example (formula understanding: scaffold only).
#
# What this example does
# - Shows how to define pipeline options, an enrichment model, and extend a pipeline.
# - Displays cropped images of formula items and yields them back unchanged.
#
# Important
# - This is a development scaffold; it does not run a real formula understanding model.
#
# How to run
# - From the repo root: `python docs/examples/develop_formula_understanding.py`.
#
# Notes
# - Set `do_formula_understanding=True` to enable the example enrichment stage.
# - Extends `StandardPdfPipeline` and keeps the backend when enrichment is enabled.
# %%
import logging
from collections.abc import Iterable
from pathlib import Path
from docling_core.types.doc import DocItemLabel, DoclingDocument, NodeItem, TextItem
from docling.datamodel.base_models import InputFormat, ItemAndImageEnrichmentElement
from docling.datamodel.pipeline_options import PdfPipelineOptions
from docling.document_converter import DocumentConverter, PdfFormatOption
from docling.models.base_model import BaseItemAndImageEnrichmentModel
from docling.pipeline.standard_pdf_pipeline import StandardPdfPipeline
class ExampleFormulaUnderstandingPipelineOptions(PdfPipelineOptions):
do_formula_understanding: bool = True
# A new enrichment model using both the document element and its image as input
class ExampleFormulaUnderstandingEnrichmentModel(BaseItemAndImageEnrichmentModel):
images_scale = 2.6
def __init__(self, enabled: bool):
self.enabled = enabled
def is_processable(self, doc: DoclingDocument, element: NodeItem) -> bool:
return (
self.enabled
and isinstance(element, TextItem)
and element.label == DocItemLabel.FORMULA
)
def __call__(
self,
doc: DoclingDocument,
element_batch: Iterable[ItemAndImageEnrichmentElement],
) -> Iterable[NodeItem]:
if not self.enabled:
return
for enrich_element in element_batch:
# Opens a window for each cropped formula image; comment this out when
# running headless or processing many items to avoid blocking spam.
enrich_element.image.show()
yield enrich_element.item
# How the pipeline can be extended.
class ExampleFormulaUnderstandingPipeline(StandardPdfPipeline):
def __init__(self, pipeline_options: ExampleFormulaUnderstandingPipelineOptions):
super().__init__(pipeline_options)
self.pipeline_options: ExampleFormulaUnderstandingPipelineOptions
self.enrichment_pipe = [
ExampleFormulaUnderstandingEnrichmentModel(
enabled=self.pipeline_options.do_formula_understanding
)
]
if self.pipeline_options.do_formula_understanding:
self.keep_backend = True
@classmethod
def get_default_options(cls) -> ExampleFormulaUnderstandingPipelineOptions:
return ExampleFormulaUnderstandingPipelineOptions()
# Example main. In the final version, we simply have to set do_formula_understanding to true.
def main():
logging.basicConfig(level=logging.INFO)
data_folder = Path(__file__).parent / "../../tests/data"
input_doc_path = data_folder / "pdf/2203.01017v2.pdf"
pipeline_options = ExampleFormulaUnderstandingPipelineOptions()
pipeline_options.do_formula_understanding = True
doc_converter = DocumentConverter(
format_options={
InputFormat.PDF: PdfFormatOption(
pipeline_cls=ExampleFormulaUnderstandingPipeline,
pipeline_options=pipeline_options,
)
}
)
doc_converter.convert(input_doc_path)
if __name__ == "__main__":
main()
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docs/examples/translate.py | docs/examples/translate.py | # %% [markdown]
# Translate extracted text content and regenerate Markdown with embedded images.
#
# What this example does
# - Converts a PDF and saves original Markdown with embedded images.
# - Translates text elements and table cell contents, then saves a translated Markdown.
#
# Prerequisites
# - Install Docling. Add a translation library of your choice inside `translate()`.
#
# How to run
# - From the repo root: `python docs/examples/translate.py`.
# - The script writes original and translated Markdown to `scratch/`.
#
# Notes
# - `translate()` is a placeholder; integrate your preferred translation API/client.
# - Image generation is enabled to preserve embedded images in the output.
# %%
import logging
from pathlib import Path
from docling_core.types.doc import ImageRefMode, TableItem, TextItem
from docling.datamodel.base_models import InputFormat
from docling.datamodel.pipeline_options import PdfPipelineOptions
from docling.document_converter import DocumentConverter, PdfFormatOption
_log = logging.getLogger(__name__)
IMAGE_RESOLUTION_SCALE = 2.0
# FIXME: put in your favorite translation code ....
def translate(text: str, src: str = "en", dest: str = "de"):
_log.warning("!!! IMPLEMENT HERE YOUR FAVORITE TRANSLATION CODE!!!")
# from googletrans import Translator
# Initialize the translator
# translator = Translator()
# Translate text from English to German
# text = "Hello, how are you?"
# translated = translator.translate(text, src="en", dest="de")
return text
def main():
logging.basicConfig(level=logging.INFO)
data_folder = Path(__file__).parent / "../../tests/data"
input_doc_path = data_folder / "pdf/2206.01062.pdf"
output_dir = Path("scratch") # ensure this directory exists before saving
# Important: For operating with page images, we must keep them, otherwise the DocumentConverter
# will destroy them for cleaning up memory.
# This is done by setting PdfPipelineOptions.images_scale, which also defines the scale of images.
# scale=1 correspond of a standard 72 DPI image
# The PdfPipelineOptions.generate_* are the selectors for the document elements which will be enriched
# with the image field
pipeline_options = PdfPipelineOptions()
pipeline_options.images_scale = IMAGE_RESOLUTION_SCALE
pipeline_options.generate_page_images = True
pipeline_options.generate_picture_images = True
doc_converter = DocumentConverter(
format_options={
InputFormat.PDF: PdfFormatOption(pipeline_options=pipeline_options)
}
)
conv_res = doc_converter.convert(input_doc_path)
conv_doc = conv_res.document
doc_filename = conv_res.input.file.name
# Save markdown with embedded pictures in original text
# Tip: create the `scratch/` folder first or adjust `output_dir`.
md_filename = output_dir / f"{doc_filename}-with-images-orig.md"
conv_doc.save_as_markdown(md_filename, image_mode=ImageRefMode.EMBEDDED)
for element, _level in conv_res.document.iterate_items():
if isinstance(element, TextItem):
element.orig = element.text
element.text = translate(text=element.text)
elif isinstance(element, TableItem):
for cell in element.data.table_cells:
cell.text = translate(text=cell.text)
# Save markdown with embedded pictures in translated text
md_filename = output_dir / f"{doc_filename}-with-images-translated.md"
conv_doc.save_as_markdown(md_filename, image_mode=ImageRefMode.EMBEDDED)
if __name__ == "__main__":
main()
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docs/examples/parquet_images.py | docs/examples/parquet_images.py | # %% [markdown]
# What this example does
# - Run a batch conversion on a parquet file with an image column.
#
# Requirements
# - Python 3.9+
# - Install Docling: `pip install docling`
#
# How to run
# - `python docs/examples/parquet_images.py FILE`
#
# The parquet file should be in the format similar to the ViDoRe V3 dataset.
# https://huggingface.co/collections/vidore/vidore-benchmark-v3
#
# For example:
# - https://huggingface.co/datasets/vidore/vidore_v3_hr/blob/main/corpus/test-00000-of-00001.parquet
#
# ### Start models with vllm
# ```console
# vllm serve ibm-granite/granite-docling-258M \
# --host 127.0.0.1 --port 8000 \
# --max-num-seqs 512 \
# --max-num-batched-tokens 8192 \
# --enable-chunked-prefill \
# --gpu-memory-utilization 0.9
# ```
# %%
import io
import sys
import time
from pathlib import Path
from typing import Annotated, Literal
import pyarrow.parquet as pq
import typer
from PIL import Image
from docling.datamodel import vlm_model_specs
from docling.datamodel.accelerator_options import AcceleratorDevice, AcceleratorOptions
from docling.datamodel.base_models import ConversionStatus, DocumentStream, InputFormat
from docling.datamodel.pipeline_options import (
PdfPipelineOptions,
PipelineOptions,
RapidOcrOptions,
VlmPipelineOptions,
)
from docling.datamodel.pipeline_options_vlm_model import ApiVlmOptions, ResponseFormat
from docling.datamodel.settings import settings
from docling.document_converter import DocumentConverter, ImageFormatOption
from docling.pipeline.base_pipeline import ConvertPipeline
from docling.pipeline.legacy_standard_pdf_pipeline import LegacyStandardPdfPipeline
from docling.pipeline.standard_pdf_pipeline import StandardPdfPipeline
from docling.pipeline.vlm_pipeline import VlmPipeline
from docling.utils.accelerator_utils import decide_device
def process_document(
images: list[Image.Image], chunk_idx: int, doc_converter: DocumentConverter
):
"""Builds a tall image and sends it through Docling."""
print(f"\n--- Processing chunk {chunk_idx} with {len(images)} images ---")
# Convert images to mode RGB (TIFF pages must match)
rgb_images = [im.convert("RGB") for im in images]
# First image is the base frame
first = rgb_images[0]
rest = rgb_images[1:]
# Create multi-page TIFF using PIL frames
buf = io.BytesIO()
first.save(
buf,
format="TIFF",
save_all=True,
append_images=rest,
compression="tiff_deflate", # good compression, optional
)
buf.seek(0)
# Docling conversion
doc_stream = DocumentStream(name=f"doc_{chunk_idx}.tiff", stream=buf)
start_time = time.time()
conv_result = doc_converter.convert(doc_stream)
runtime = time.time() - start_time
assert conv_result.status == ConversionStatus.SUCCESS
pages = len(conv_result.pages)
print(
f"Chunk {chunk_idx} converted in {runtime:.2f} sec ({pages / runtime:.2f} pages/s)."
)
def run(
filename: Annotated[Path, typer.Argument()] = Path(
"docs/examples/data/vidore_v3_hr-slice.parquet"
),
doc_size: int = 192,
batch_size: int = 64,
pipeline: Literal["standard", "vlm", "legacy"] = "standard",
):
acc_opts = AcceleratorOptions()
device = decide_device(acc_opts.device)
ocr_options = RapidOcrOptions()
if "cuda" in device:
ocr_options = RapidOcrOptions(backend="torch")
# On Python 3.14 we only have torch
if sys.version_info >= (3, 14):
ocr_options = RapidOcrOptions(backend="torch")
if pipeline == "standard":
pipeline_cls: type[ConvertPipeline] = StandardPdfPipeline
pipeline_options: PipelineOptions = PdfPipelineOptions(
ocr_options=ocr_options,
ocr_batch_size=batch_size,
layout_batch_size=batch_size,
table_batch_size=4,
)
elif pipeline == "legacy":
settings.perf.page_batch_size = batch_size
pipeline_cls: type[ConvertPipeline] = LegacyStandardPdfPipeline
pipeline_options: PipelineOptions = PdfPipelineOptions(
ocr_options=ocr_options,
ocr_batch_size=batch_size,
layout_batch_size=batch_size,
table_batch_size=4,
)
elif pipeline == "vlm":
settings.perf.page_batch_size = batch_size
pipeline_cls = VlmPipeline
vlm_options = vlm_model_specs.GRANITEDOCLING_VLLM_API
vlm_options.concurrency = batch_size
vlm_options.scale = 1.0 # avoid rescaling image inputs
pipeline_options = VlmPipelineOptions(
vlm_options=vlm_options,
enable_remote_services=True, # required when using a remote inference service.
)
else:
raise RuntimeError(f"Pipeline {pipeline} not available.")
doc_converter = DocumentConverter(
format_options={
InputFormat.IMAGE: ImageFormatOption(
pipeline_cls=pipeline_cls,
pipeline_options=pipeline_options,
)
}
)
start_time = time.time()
doc_converter.initialize_pipeline(InputFormat.IMAGE)
init_runtime = time.time() - start_time
print(f"Pipeline initialized in {init_runtime:.2f} seconds.")
# ------------------------------------------------------------
# Open parquet file in streaming mode
# ------------------------------------------------------------
pf = pq.ParquetFile(filename)
image_buffer = [] # holds up to doc_size images
chunk_idx = 0
# ------------------------------------------------------------
# Stream batches from parquet
# ------------------------------------------------------------
for batch in pf.iter_batches(batch_size=batch_size, columns=["image"]):
col = batch.column("image")
# Extract Python objects (PIL images)
# Arrow stores them as Python objects inside an ObjectArray
for i in range(len(col)):
img_dict = col[i].as_py() # {"bytes": ..., "path": ...}
pil_image = Image.open(io.BytesIO(img_dict["bytes"]))
image_buffer.append(pil_image)
# If enough images gathered → process one doc
if len(image_buffer) == doc_size:
process_document(image_buffer, chunk_idx, doc_converter)
image_buffer.clear()
chunk_idx += 1
# ------------------------------------------------------------
# Process trailing images (last partial chunk)
# ------------------------------------------------------------
if image_buffer:
process_document(image_buffer, chunk_idx, doc_converter)
if __name__ == "__main__":
typer.run(run)
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docs/examples/compare_vlm_models.py | docs/examples/compare_vlm_models.py | # %% [markdown]
# Compare different VLM models by running the VLM pipeline and timing outputs.
#
# What this example does
# - Iterates through a list of VLM model configurations and converts the same file.
# - Prints per-page generation times and saves JSON/MD/HTML to `scratch/`.
# - Summarizes total inference time and pages processed in a table.
#
# Requirements
# - Install `tabulate` for pretty printing (`pip install tabulate`).
#
# Prerequisites
# - Install Docling with VLM extras. Ensure models can be downloaded or are available.
#
# How to run
# - From the repo root: `python docs/examples/compare_vlm_models.py`.
# - Results are saved to `scratch/` with filenames including the model and framework.
#
# Notes
# - MLX models are skipped automatically on non-macOS platforms.
# - On CUDA systems, you can enable flash_attention_2 (see commented lines).
# - Running multiple VLMs can be GPU/CPU intensive and time-consuming; ensure
# enough VRAM/system RAM and close other memory-heavy apps.
# %%
import json
import sys
import time
from pathlib import Path
from docling_core.types.doc import DocItemLabel, ImageRefMode
from docling_core.types.doc.document import DEFAULT_EXPORT_LABELS
from tabulate import tabulate
from docling.datamodel import vlm_model_specs
from docling.datamodel.accelerator_options import AcceleratorDevice
from docling.datamodel.base_models import InputFormat
from docling.datamodel.pipeline_options import (
VlmPipelineOptions,
)
from docling.datamodel.pipeline_options_vlm_model import (
InferenceFramework,
InlineVlmOptions,
ResponseFormat,
TransformersModelType,
TransformersPromptStyle,
)
from docling.document_converter import DocumentConverter, PdfFormatOption
from docling.pipeline.vlm_pipeline import VlmPipeline
def convert(sources: list[Path], converter: DocumentConverter):
# Note: this helper assumes a single-item `sources` list. It returns after
# processing the first source to keep runtime/output focused.
model_id = pipeline_options.vlm_options.repo_id.replace("/", "_")
framework = pipeline_options.vlm_options.inference_framework
for source in sources:
print("================================================")
print("Processing...")
print(f"Source: {source}")
print("---")
print(f"Model: {model_id}")
print(f"Framework: {framework}")
print("================================================")
print("")
res = converter.convert(source)
print("")
fname = f"{res.input.file.stem}-{model_id}-{framework}"
inference_time = 0.0
for i, page in enumerate(res.pages):
inference_time += page.predictions.vlm_response.generation_time
print("")
print(
f" ---------- Predicted page {i} in {pipeline_options.vlm_options.response_format} in {page.predictions.vlm_response.generation_time} [sec]:"
)
print(page.predictions.vlm_response.text)
print(" ---------- ")
print("===== Final output of the converted document =======")
# Manual export for illustration. Below, `save_as_json()` writes the same
# JSON again; kept intentionally to show both approaches.
with (out_path / f"{fname}.json").open("w") as fp:
fp.write(json.dumps(res.document.export_to_dict()))
res.document.save_as_json(
out_path / f"{fname}.json",
image_mode=ImageRefMode.PLACEHOLDER,
)
print(f" => produced {out_path / fname}.json")
res.document.save_as_markdown(
out_path / f"{fname}.md",
image_mode=ImageRefMode.PLACEHOLDER,
)
print(f" => produced {out_path / fname}.md")
res.document.save_as_html(
out_path / f"{fname}.html",
image_mode=ImageRefMode.EMBEDDED,
labels=[*DEFAULT_EXPORT_LABELS, DocItemLabel.FOOTNOTE],
split_page_view=True,
)
print(f" => produced {out_path / fname}.html")
pg_num = res.document.num_pages()
print("")
print(
f"Total document prediction time: {inference_time:.2f} seconds, pages: {pg_num}"
)
print("====================================================")
return [
source,
model_id,
str(framework),
pg_num,
inference_time,
]
if __name__ == "__main__":
sources = [
"tests/data/pdf/2305.03393v1-pg9.pdf",
]
out_path = Path("scratch")
out_path.mkdir(parents=True, exist_ok=True)
## Definiton of more inline models
llava_qwen = InlineVlmOptions(
repo_id="llava-hf/llava-interleave-qwen-0.5b-hf",
# prompt="Read text in the image.",
prompt="Convert this page to markdown. Do not miss any text and only output the bare markdown!",
# prompt="Parse the reading order of this document.",
response_format=ResponseFormat.MARKDOWN,
inference_framework=InferenceFramework.TRANSFORMERS,
transformers_model_type=TransformersModelType.AUTOMODEL_IMAGETEXTTOTEXT,
supported_devices=[AcceleratorDevice.CUDA, AcceleratorDevice.CPU],
scale=2.0,
temperature=0.0,
)
# Note that this is not the expected way of using the Dolphin model, but it shows the usage of a raw prompt.
dolphin_oneshot = InlineVlmOptions(
repo_id="ByteDance/Dolphin",
prompt="<s>Read text in the image. <Answer/>",
response_format=ResponseFormat.MARKDOWN,
inference_framework=InferenceFramework.TRANSFORMERS,
transformers_model_type=TransformersModelType.AUTOMODEL_IMAGETEXTTOTEXT,
transformers_prompt_style=TransformersPromptStyle.RAW,
supported_devices=[AcceleratorDevice.CUDA, AcceleratorDevice.CPU],
scale=2.0,
temperature=0.0,
)
## Use VlmPipeline
pipeline_options = VlmPipelineOptions()
pipeline_options.generate_page_images = True
## On GPU systems, enable flash_attention_2 with CUDA:
# pipeline_options.accelerator_options.device = AcceleratorDevice.CUDA
# pipeline_options.accelerator_options.cuda_use_flash_attention2 = True
vlm_models = [
## DocTags / SmolDocling models
vlm_model_specs.SMOLDOCLING_MLX,
vlm_model_specs.SMOLDOCLING_TRANSFORMERS,
## Markdown models (using MLX framework)
vlm_model_specs.QWEN25_VL_3B_MLX,
vlm_model_specs.PIXTRAL_12B_MLX,
vlm_model_specs.GEMMA3_12B_MLX,
## Markdown models (using Transformers framework)
vlm_model_specs.GRANITE_VISION_TRANSFORMERS,
vlm_model_specs.PHI4_TRANSFORMERS,
vlm_model_specs.PIXTRAL_12B_TRANSFORMERS,
## More inline models
dolphin_oneshot,
llava_qwen,
]
# Remove MLX models if not on Mac
if sys.platform != "darwin":
vlm_models = [
m for m in vlm_models if m.inference_framework != InferenceFramework.MLX
]
rows = []
for vlm_options in vlm_models:
pipeline_options.vlm_options = vlm_options
## Set up pipeline for PDF or image inputs
converter = DocumentConverter(
format_options={
InputFormat.PDF: PdfFormatOption(
pipeline_cls=VlmPipeline,
pipeline_options=pipeline_options,
),
InputFormat.IMAGE: PdfFormatOption(
pipeline_cls=VlmPipeline,
pipeline_options=pipeline_options,
),
},
)
row = convert(sources=sources, converter=converter)
rows.append(row)
print(
tabulate(
rows, headers=["source", "model_id", "framework", "num_pages", "time"]
)
)
print("see if memory gets released ...")
time.sleep(10)
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docs/examples/tesseract_lang_detection.py | docs/examples/tesseract_lang_detection.py | # %% [markdown]
# Detect language automatically with Tesseract OCR and force full-page OCR.
#
# What this example does
# - Configures Tesseract (CLI in this snippet) with `lang=["auto"]`.
# - Forces full-page OCR and prints the recognized text as Markdown.
#
# How to run
# - From the repo root: `python docs/examples/tesseract_lang_detection.py`.
# - Ensure Tesseract CLI (or library) is installed and on PATH.
#
# Notes
# - You can switch to `TesseractOcrOptions` instead of `TesseractCliOcrOptions`.
# - Language packs must be installed; set `TESSDATA_PREFIX` if Tesseract
# cannot find language data. Using `lang=["auto"]` requires traineddata
# that supports script/language detection on your system.
# %%
from pathlib import Path
from docling.datamodel.base_models import InputFormat
from docling.datamodel.pipeline_options import (
PdfPipelineOptions,
TesseractCliOcrOptions,
)
from docling.document_converter import DocumentConverter, PdfFormatOption
def main():
data_folder = Path(__file__).parent / "../../tests/data"
input_doc_path = data_folder / "pdf/2206.01062.pdf"
# Set lang=["auto"] with a tesseract OCR engine: TesseractOcrOptions, TesseractCliOcrOptions
# ocr_options = TesseractOcrOptions(lang=["auto"])
ocr_options = TesseractCliOcrOptions(lang=["auto"])
pipeline_options = PdfPipelineOptions(
do_ocr=True, force_full_page_ocr=True, ocr_options=ocr_options
)
converter = DocumentConverter(
format_options={
InputFormat.PDF: PdfFormatOption(
pipeline_options=pipeline_options,
)
}
)
doc = converter.convert(input_doc_path).document
md = doc.export_to_markdown()
print(md)
if __name__ == "__main__":
main()
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docs/examples/run_with_formats.py | docs/examples/run_with_formats.py | # %% [markdown]
# Run conversion across multiple input formats and customize handling per type.
#
# What this example does
# - Demonstrates converting a mixed list of files (PDF, DOCX, PPTX, HTML, images, etc.).
# - Shows how to restrict `allowed_formats` and override `format_options` per format.
# - Writes results (Markdown, JSON, YAML) to `scratch/`.
#
# Prerequisites
# - Install Docling and any format-specific dependencies (e.g., for DOCX/PPTX parsing).
# - Ensure you can import `docling` from your Python environment.
# - YAML export requires `PyYAML` (`pip install pyyaml`).
#
# How to run
# - From the repository root, run: `python docs/examples/run_with_formats.py`.
# - Outputs are written under `scratch/` next to where you run the script.
# - If `scratch/` does not exist, create it before running.
#
# Customizing inputs
# - Update `input_paths` to include or remove files on your machine.
# - Non-whitelisted formats are ignored (see `allowed_formats`).
#
# Notes
# - `allowed_formats`: explicit whitelist of formats that will be processed.
# - `format_options`: per-format pipeline/backend overrides. Everything is optional; defaults exist.
# - Exports: per input, writes `<stem>.md`, `<stem>.json`, and `<stem>.yaml` in `scratch/`.
# %%
import json
import logging
from pathlib import Path
import yaml
from docling.backend.pypdfium2_backend import PyPdfiumDocumentBackend
from docling.datamodel.base_models import InputFormat
from docling.document_converter import (
DocumentConverter,
PdfFormatOption,
WordFormatOption,
)
from docling.pipeline.simple_pipeline import SimplePipeline
from docling.pipeline.standard_pdf_pipeline import StandardPdfPipeline
_log = logging.getLogger(__name__)
def main():
input_paths = [
Path("README.md"),
Path("tests/data/html/wiki_duck.html"),
Path("tests/data/docx/word_sample.docx"),
Path("tests/data/docx/lorem_ipsum.docx"),
Path("tests/data/pptx/powerpoint_sample.pptx"),
Path("tests/data/2305.03393v1-pg9-img.png"),
Path("tests/data/pdf/2206.01062.pdf"),
Path("tests/data/asciidoc/test_01.asciidoc"),
]
## for defaults use:
# doc_converter = DocumentConverter()
## to customize use:
# Below we explicitly whitelist formats and override behavior for some of them.
# You can omit this block and use the defaults (see above) for a quick start.
doc_converter = DocumentConverter( # all of the below is optional, has internal defaults.
allowed_formats=[
InputFormat.PDF,
InputFormat.IMAGE,
InputFormat.DOCX,
InputFormat.HTML,
InputFormat.PPTX,
InputFormat.ASCIIDOC,
InputFormat.CSV,
InputFormat.MD,
], # whitelist formats, non-matching files are ignored.
format_options={
InputFormat.PDF: PdfFormatOption(
pipeline_cls=StandardPdfPipeline, backend=PyPdfiumDocumentBackend
),
InputFormat.DOCX: WordFormatOption(
pipeline_cls=SimplePipeline # or set a backend, e.g., MsWordDocumentBackend
# If you change the backend, remember to import it, e.g.:
# from docling.backend.msword_backend import MsWordDocumentBackend
),
},
)
conv_results = doc_converter.convert_all(input_paths)
for res in conv_results:
out_path = Path("scratch") # ensure this directory exists before running
print(
f"Document {res.input.file.name} converted."
f"\nSaved markdown output to: {out_path!s}"
)
_log.debug(res.document._export_to_indented_text(max_text_len=16))
# Export Docling document to Markdown:
with (out_path / f"{res.input.file.stem}.md").open("w") as fp:
fp.write(res.document.export_to_markdown())
with (out_path / f"{res.input.file.stem}.json").open("w") as fp:
fp.write(json.dumps(res.document.export_to_dict()))
with (out_path / f"{res.input.file.stem}.yaml").open("w") as fp:
fp.write(yaml.safe_dump(res.document.export_to_dict()))
if __name__ == "__main__":
main()
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docs/examples/mlx_whisper_example.py | docs/examples/mlx_whisper_example.py | #!/usr/bin/env python3
"""
Example script demonstrating MLX Whisper integration for Apple Silicon.
This script shows how to use the MLX Whisper models for speech recognition
on Apple Silicon devices with optimized performance.
"""
import argparse
import sys
from pathlib import Path
# Add the repository root to the path so we can import docling
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
from docling.datamodel.accelerator_options import AcceleratorDevice, AcceleratorOptions
from docling.datamodel.asr_model_specs import (
WHISPER_BASE,
WHISPER_LARGE,
WHISPER_MEDIUM,
WHISPER_SMALL,
WHISPER_TINY,
WHISPER_TURBO,
)
from docling.datamodel.base_models import InputFormat
from docling.datamodel.pipeline_options import AsrPipelineOptions
from docling.document_converter import AudioFormatOption, DocumentConverter
from docling.pipeline.asr_pipeline import AsrPipeline
def transcribe_audio_with_mlx_whisper(audio_file_path: str, model_size: str = "base"):
"""
Transcribe audio using Whisper models with automatic MLX optimization for Apple Silicon.
Args:
audio_file_path: Path to the audio file to transcribe
model_size: Size of the Whisper model to use
("tiny", "base", "small", "medium", "large", "turbo")
Note: MLX optimization is automatically used on Apple Silicon when available
Returns:
The transcribed text
"""
# Select the appropriate Whisper model (automatically uses MLX on Apple Silicon)
model_map = {
"tiny": WHISPER_TINY,
"base": WHISPER_BASE,
"small": WHISPER_SMALL,
"medium": WHISPER_MEDIUM,
"large": WHISPER_LARGE,
"turbo": WHISPER_TURBO,
}
if model_size not in model_map:
raise ValueError(
f"Invalid model size: {model_size}. Choose from: {list(model_map.keys())}"
)
asr_options = model_map[model_size]
# Configure accelerator options for Apple Silicon
accelerator_options = AcceleratorOptions(device=AcceleratorDevice.MPS)
# Create pipeline options
pipeline_options = AsrPipelineOptions(
asr_options=asr_options,
accelerator_options=accelerator_options,
)
# Create document converter with MLX Whisper configuration
converter = DocumentConverter(
format_options={
InputFormat.AUDIO: AudioFormatOption(
pipeline_cls=AsrPipeline,
pipeline_options=pipeline_options,
)
}
)
# Run transcription
result = converter.convert(Path(audio_file_path))
if result.status.value == "success":
# Extract text from the document
text_content = []
for item in result.document.texts:
text_content.append(item.text)
return "\n".join(text_content)
else:
raise RuntimeError(f"Transcription failed: {result.status}")
def parse_args():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(
description="MLX Whisper example for Apple Silicon speech recognition",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Use default test audio file
python mlx_whisper_example.py
# Use your own audio file
python mlx_whisper_example.py --audio /path/to/your/audio.mp3
# Use specific model size
python mlx_whisper_example.py --audio audio.wav --model tiny
# Use default test file with specific model
python mlx_whisper_example.py --model turbo
""",
)
parser.add_argument(
"--audio",
type=str,
help="Path to audio file for transcription (default: tests/data/audio/sample_10s.mp3)",
)
parser.add_argument(
"--model",
type=str,
choices=["tiny", "base", "small", "medium", "large", "turbo"],
default="base",
help="Whisper model size to use (default: base)",
)
return parser.parse_args()
def main():
"""Main function to demonstrate MLX Whisper usage."""
args = parse_args()
# Determine audio file path
if args.audio:
audio_file_path = args.audio
else:
# Use default test audio file if no audio file specified
default_audio = (
Path(__file__).parent.parent.parent
/ "tests"
/ "data"
/ "audio"
/ "sample_10s.mp3"
)
if default_audio.exists():
audio_file_path = str(default_audio)
print("No audio file specified, using default test file:")
print(f" Audio file: {audio_file_path}")
print(f" Model size: {args.model}")
print()
else:
print("Error: No audio file specified and default test file not found.")
print(
"Please specify an audio file with --audio or ensure tests/data/audio/sample_10s.mp3 exists."
)
sys.exit(1)
if not Path(audio_file_path).exists():
print(f"Error: Audio file '{audio_file_path}' not found.")
sys.exit(1)
try:
print(f"Transcribing '{audio_file_path}' using Whisper {args.model} model...")
print(
"Note: MLX optimization is automatically used on Apple Silicon when available."
)
print()
transcribed_text = transcribe_audio_with_mlx_whisper(
audio_file_path, args.model
)
print("Transcription Result:")
print("=" * 50)
print(transcribed_text)
print("=" * 50)
except ImportError as e:
print(f"Error: {e}")
print("Please install mlx-whisper: pip install mlx-whisper")
print("Or install with uv: uv sync --extra asr")
sys.exit(1)
except Exception as e:
print(f"Error during transcription: {e}")
sys.exit(1)
if __name__ == "__main__":
main()
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docs/examples/post_process_ocr_with_vlm.py | docs/examples/post_process_ocr_with_vlm.py | import argparse
import logging
import os
import re
from collections.abc import Iterable
from concurrent.futures import ThreadPoolExecutor
from pathlib import Path
from typing import Any, Optional, Union
import numpy as np
from docling_core.types.doc import (
DoclingDocument,
ImageRefMode,
NodeItem,
TextItem,
)
from docling_core.types.doc.document import (
ContentLayer,
DocItem,
FormItem,
GraphCell,
KeyValueItem,
PictureItem,
RichTableCell,
TableCell,
TableItem,
)
from PIL import Image, ImageFilter
from PIL.ImageOps import crop
from pydantic import BaseModel, ConfigDict
from tqdm import tqdm
from docling.backend.json.docling_json_backend import DoclingJSONBackend
from docling.datamodel.accelerator_options import AcceleratorOptions
from docling.datamodel.base_models import InputFormat, ItemAndImageEnrichmentElement
from docling.datamodel.document import ConversionResult
from docling.datamodel.pipeline_options import (
ConvertPipelineOptions,
PdfPipelineOptions,
PictureDescriptionApiOptions,
)
from docling.document_converter import DocumentConverter, FormatOption, PdfFormatOption
from docling.exceptions import OperationNotAllowed
from docling.models.base_model import BaseModelWithOptions, GenericEnrichmentModel
from docling.pipeline.simple_pipeline import SimplePipeline
from docling.pipeline.standard_pdf_pipeline import StandardPdfPipeline
from docling.utils.api_image_request import api_image_request
from docling.utils.profiling import ProfilingScope, TimeRecorder
from docling.utils.utils import chunkify
# Example on how to apply to Docling Document OCR as a post-processing with "nanonets-ocr2-3b" via LM Studio
# Requires LM Studio running inference server with "nanonets-ocr2-3b" model pre-loaded
# To run:
# uv run python docs/examples/post_process_ocr_with_vlm.py
LM_STUDIO_URL = "http://localhost:1234/v1/chat/completions"
LM_STUDIO_MODEL = "nanonets-ocr2-3b"
DEFAULT_PROMPT = "Extract the text from the above document as if you were reading it naturally. Output pure text, no html and no markdown. Pay attention on line breaks and don't miss text after line break. Put all text in one line."
VERBOSE = True
SHOW_IMAGE = False
SHOW_EMPTY_CROPS = False
SHOW_NONEMPTY_CROPS = False
PRINT_RESULT_MARKDOWN = False
def is_empty_fast_with_lines_pil(
pil_img: Image.Image,
downscale_max_side: int = 48, # 64
grad_threshold: float = 15.0, # how strong a gradient must be to count as edge
min_line_coverage: float = 0.6, # line must cover 60% of height/width
max_allowed_lines: int = 10, # allow up to this many strong lines (default 4)
edge_fraction_threshold: float = 0.0035,
):
"""
Fast 'empty' detector using only PIL + NumPy.
Treats an image as empty if:
- It has very few edges overall, OR
- Edges can be explained by at most `max_allowed_lines` long vertical/horizontal lines.
Returns:
(is_empty: bool, remaining_edge_fraction: float, debug: dict)
"""
# 1) Convert to grayscale
gray = pil_img.convert("L")
# 2) Aggressive downscale, keeping aspect ratio
w0, h0 = gray.size
max_side = max(w0, h0)
if max_side > downscale_max_side:
# scale = downscale_max_side / max_side
# new_w = max(1, int(w0 * scale))
# new_h = max(1, int(h0 * scale))
new_w = downscale_max_side
new_h = downscale_max_side
gray = gray.resize((new_w, new_h), resample=Image.BILINEAR)
w, h = gray.size
if w == 0 or h == 0:
return True, 0.0, {"reason": "zero_size"}
# 3) Small blur to reduce noise
gray = gray.filter(ImageFilter.BoxBlur(1))
# 4) Convert to NumPy
arr = np.asarray(
gray, dtype=np.float32
) # shape (h, w) in PIL, but note: PIL size is (w, h)
H, W = arr.shape
# 5) Compute simple gradients (forward differences)
gx = np.zeros_like(arr)
gy = np.zeros_like(arr)
gx[:, :-1] = arr[:, 1:] - arr[:, :-1] # horizontal differences
gy[:-1, :] = arr[1:, :] - arr[:-1, :] # vertical differences
mag = np.hypot(gx, gy) # gradient magnitude
# 6) Threshold gradients to get edges (boolean mask)
edges = mag > grad_threshold
edge_fraction = edges.mean()
# Quick early-exit: almost no edges => empty
if edge_fraction < edge_fraction_threshold:
return True, float(edge_fraction), {"reason": "few_edges"}
# 7) Detect strong vertical & horizontal lines via edge sums
col_sum = edges.sum(axis=0) # per column
row_sum = edges.sum(axis=1) # per row
# Line must have edge pixels in at least `min_line_coverage` of the dimension
vert_line_cols = np.where(col_sum >= min_line_coverage * H)[0]
horiz_line_rows = np.where(row_sum >= min_line_coverage * W)[0]
num_lines = len(vert_line_cols) + len(horiz_line_rows)
# If we have more long lines than allowed => non-empty
if num_lines > max_allowed_lines:
return (
False,
float(edge_fraction),
{
"reason": "too_many_lines",
"num_lines": int(num_lines),
"edge_fraction": float(edge_fraction),
},
)
# 8) Mask out those lines and recompute remaining edges
line_mask = np.zeros_like(edges, dtype=bool)
if len(vert_line_cols) > 0:
line_mask[:, vert_line_cols] = True
if len(horiz_line_rows) > 0:
line_mask[horiz_line_rows, :] = True
remaining_edges = edges & ~line_mask
remaining_edge_fraction = remaining_edges.mean()
is_empty = remaining_edge_fraction < edge_fraction_threshold
debug = {
"original_edge_fraction": float(edge_fraction),
"remaining_edge_fraction": float(remaining_edge_fraction),
"num_vert_lines": len(vert_line_cols),
"num_horiz_lines": len(horiz_line_rows),
}
return is_empty, float(remaining_edge_fraction), debug
def remove_break_lines(text: str) -> str:
# Replace any newline types with a single space
cleaned = re.sub(r"[\r\n]+", " ", text)
# Collapse multiple spaces into one
cleaned = re.sub(r"\s+", " ", cleaned)
return cleaned.strip()
def safe_crop(img: Image.Image, bbox):
left, top, right, bottom = bbox
# Clamp to image boundaries
left = max(0, min(left, img.width))
top = max(0, min(top, img.height))
right = max(0, min(right, img.width))
bottom = max(0, min(bottom, img.height))
return img.crop((left, top, right, bottom))
def no_long_repeats(s: str, threshold: int) -> bool:
"""
Returns False if the string `s` contains more than `threshold`
identical characters in a row, otherwise True.
"""
pattern = r"(.)\1{" + str(threshold) + ",}"
return re.search(pattern, s) is None
class PostOcrEnrichmentElement(BaseModel):
model_config = ConfigDict(arbitrary_types_allowed=True)
item: Union[DocItem, TableCell, RichTableCell, GraphCell]
image: list[
Image.Image
] # Needs to be an a list of images for multi-provenance elements
class PostOcrEnrichmentPipelineOptions(ConvertPipelineOptions):
api_options: PictureDescriptionApiOptions
class PostOcrEnrichmentPipeline(SimplePipeline):
def __init__(self, pipeline_options: PostOcrEnrichmentPipelineOptions):
super().__init__(pipeline_options)
self.pipeline_options: PostOcrEnrichmentPipelineOptions
self.enrichment_pipe = [
PostOcrApiEnrichmentModel(
enabled=True,
enable_remote_services=True,
artifacts_path=None,
options=self.pipeline_options.api_options,
accelerator_options=AcceleratorOptions(),
)
]
@classmethod
def get_default_options(cls) -> PostOcrEnrichmentPipelineOptions:
return PostOcrEnrichmentPipelineOptions()
def _enrich_document(self, conv_res: ConversionResult) -> ConversionResult:
def _prepare_elements(
conv_res: ConversionResult, model: GenericEnrichmentModel[Any]
) -> Iterable[NodeItem]:
for doc_element, _level in conv_res.document.iterate_items(
traverse_pictures=True,
included_content_layers={
ContentLayer.BODY,
ContentLayer.FURNITURE,
},
): # With all content layers, with traverse_pictures=True
prepared_elements = (
model.prepare_element( # make this one yield multiple items.
conv_res=conv_res, element=doc_element
)
)
if prepared_elements is not None:
yield prepared_elements
with TimeRecorder(conv_res, "doc_enrich", scope=ProfilingScope.DOCUMENT):
for model in self.enrichment_pipe:
for element_batch in chunkify(
_prepare_elements(conv_res, model),
model.elements_batch_size,
):
for element in model(
doc=conv_res.document, element_batch=element_batch
): # Must exhaust!
pass
return conv_res
class PostOcrApiEnrichmentModel(
GenericEnrichmentModel[PostOcrEnrichmentElement], BaseModelWithOptions
):
expansion_factor: float = 0.001
def prepare_element(
self, conv_res: ConversionResult, element: NodeItem
) -> Optional[list[PostOcrEnrichmentElement]]:
if not self.is_processable(doc=conv_res.document, element=element):
return None
allowed = (DocItem, TableItem, GraphCell)
assert isinstance(element, allowed)
if isinstance(element, (KeyValueItem, FormItem)):
# Yield from the graphCells inside here.
result = []
for c in element.graph.cells:
element_prov = c.prov # Key / Value have only one provenance!
bbox = element_prov.bbox
page_ix = element_prov.page_no
bbox = bbox.scale_to_size(
old_size=conv_res.document.pages[page_ix].size,
new_size=conv_res.document.pages[page_ix].image.size,
)
expanded_bbox = bbox.expand_by_scale(
x_scale=self.expansion_factor, y_scale=self.expansion_factor
).to_top_left_origin(
page_height=conv_res.document.pages[page_ix].image.size.height
)
good_bbox = True
if (
expanded_bbox.l > expanded_bbox.r
or expanded_bbox.t > expanded_bbox.b
):
good_bbox = False
if good_bbox:
cropped_image = conv_res.document.pages[
page_ix
].image.pil_image.crop(expanded_bbox.as_tuple())
is_empty, rem_frac, debug = is_empty_fast_with_lines_pil(
cropped_image
)
if is_empty:
if SHOW_EMPTY_CROPS:
try:
cropped_image.show()
except Exception as e:
print(f"Error with image: {e}")
print(
f"Detected empty form item image crop: {rem_frac} - {debug}"
)
else:
result.append(
PostOcrEnrichmentElement(item=c, image=[cropped_image])
)
return result
elif isinstance(element, TableItem):
element_prov = element.prov[0]
page_ix = element_prov.page_no
result = []
for i, row in enumerate(element.data.grid):
for j, cell in enumerate(row):
if hasattr(cell, "bbox"):
if cell.bbox:
bbox = cell.bbox
bbox = bbox.scale_to_size(
old_size=conv_res.document.pages[page_ix].size,
new_size=conv_res.document.pages[page_ix].image.size,
)
expanded_bbox = bbox.expand_by_scale(
x_scale=self.table_cell_expansion_factor,
y_scale=self.table_cell_expansion_factor,
).to_top_left_origin(
page_height=conv_res.document.pages[
page_ix
].image.size.height
)
good_bbox = True
if (
expanded_bbox.l > expanded_bbox.r
or expanded_bbox.t > expanded_bbox.b
):
good_bbox = False
if good_bbox:
cropped_image = conv_res.document.pages[
page_ix
].image.pil_image.crop(expanded_bbox.as_tuple())
is_empty, rem_frac, debug = (
is_empty_fast_with_lines_pil(cropped_image)
)
if is_empty:
if SHOW_EMPTY_CROPS:
try:
cropped_image.show()
except Exception as e:
print(f"Error with image: {e}")
print(
f"Detected empty table cell image crop: {rem_frac} - {debug}"
)
else:
if SHOW_NONEMPTY_CROPS:
cropped_image.show()
result.append(
PostOcrEnrichmentElement(
item=cell, image=[cropped_image]
)
)
return result
else:
multiple_crops = []
# Crop the image form the page
for element_prov in element.prov:
# Iterate over provenances
bbox = element_prov.bbox
page_ix = element_prov.page_no
bbox = bbox.scale_to_size(
old_size=conv_res.document.pages[page_ix].size,
new_size=conv_res.document.pages[page_ix].image.size,
)
expanded_bbox = bbox.expand_by_scale(
x_scale=self.expansion_factor, y_scale=self.expansion_factor
).to_top_left_origin(
page_height=conv_res.document.pages[page_ix].image.size.height
)
good_bbox = True
if (
expanded_bbox.l > expanded_bbox.r
or expanded_bbox.t > expanded_bbox.b
):
good_bbox = False
if hasattr(element, "text"):
if good_bbox:
cropped_image = conv_res.document.pages[
page_ix
].image.pil_image.crop(expanded_bbox.as_tuple())
is_empty, rem_frac, debug = is_empty_fast_with_lines_pil(
cropped_image
)
if is_empty:
if SHOW_EMPTY_CROPS:
try:
cropped_image.show()
except Exception as e:
print(f"Error with image: {e}")
print(f"Detected empty text crop: {rem_frac} - {debug}")
else:
multiple_crops.append(cropped_image)
if hasattr(element, "text"):
print(f"\nOLD TEXT: {element.text}")
else:
print("Not a text element")
if len(multiple_crops) > 0:
# good crops
return [PostOcrEnrichmentElement(item=element, image=multiple_crops)]
else:
# nothing
return []
@classmethod
def get_options_type(cls) -> type[PictureDescriptionApiOptions]:
return PictureDescriptionApiOptions
def __init__(
self,
*,
enabled: bool,
enable_remote_services: bool,
artifacts_path: Optional[Union[Path, str]],
options: PictureDescriptionApiOptions,
accelerator_options: AcceleratorOptions,
):
self.enabled = enabled
self.options = options
self.concurrency = 2
self.expansion_factor = 0.05
self.table_cell_expansion_factor = 0.0 # do not modify table cell size
self.elements_batch_size = 4
self._accelerator_options = accelerator_options
self._artifacts_path = (
Path(artifacts_path) if isinstance(artifacts_path, str) else artifacts_path
)
if self.enabled and not enable_remote_services:
raise OperationNotAllowed(
"Enable remote services by setting pipeline_options.enable_remote_services=True."
)
def is_processable(self, doc: DoclingDocument, element: NodeItem) -> bool:
return self.enabled
def _annotate_images(self, images: Iterable[Image.Image]) -> Iterable[str]:
def _api_request(image: Image.Image) -> str:
res = api_image_request(
image=image,
prompt=self.options.prompt,
url=self.options.url,
# timeout=self.options.timeout,
timeout=30,
headers=self.options.headers,
**self.options.params,
)
return res[0]
with ThreadPoolExecutor(max_workers=self.concurrency) as executor:
yield from executor.map(_api_request, images)
def __call__(
self,
doc: DoclingDocument,
element_batch: Iterable[ItemAndImageEnrichmentElement],
) -> Iterable[NodeItem]:
if not self.enabled:
for element in element_batch:
yield element.item
return
elements: list[TextItem] = []
images: list[Image.Image] = []
img_ind_per_element: list[int] = []
for element_stack in element_batch:
for element in element_stack:
allowed = (DocItem, TableCell, RichTableCell, GraphCell)
assert isinstance(element.item, allowed)
for ind, img in enumerate(element.image):
elements.append(element.item)
images.append(img)
# images.append(element.image)
img_ind_per_element.append(ind)
if not images:
return
outputs = list(self._annotate_images(images))
for item, output, img_ind in zip(elements, outputs, img_ind_per_element):
# Sometimes model can return html tags, which are not strictly needed in our, so it's better to clean them
def clean_html_tags(text):
for tag in [
"<table>",
"<tr>",
"<td>",
"<strong>",
"</table>",
"</tr>",
"</td>",
"</strong>",
"<th>",
"</th>",
"<tbody>",
"<tbody>",
"<thead>",
"</thead>",
]:
text = text.replace(tag, "")
return text
output = clean_html_tags(output).strip()
output = remove_break_lines(output)
# The last measure against hallucinations
# Detect hallucinated string...
if output.startswith("The first of these"):
output = ""
if no_long_repeats(output, 50):
if VERBOSE:
if isinstance(item, (TextItem)):
print(f"\nOLD TEXT: {item.text}")
# Re-populate text
if isinstance(item, (TextItem, GraphCell)):
if img_ind > 0:
# Concat texts across several provenances
item.text += " " + output
# item.orig += " " + output
else:
item.text = output
# item.orig = output
elif isinstance(item, (TableCell, RichTableCell)):
item.text = output
elif isinstance(item, PictureItem):
pass
else:
raise ValueError(f"Unknown item type: {type(item)}")
if VERBOSE:
if isinstance(item, (TextItem)):
print(f"NEW TEXT: {item.text}")
# Take care of charspans for relevant types
if isinstance(item, GraphCell):
item.prov.charspan = (0, len(item.text))
elif isinstance(item, TextItem):
item.prov[0].charspan = (0, len(item.text))
yield item
def convert_pdf(pdf_path: Path, out_intermediate_json: Path):
# Let's prepare a Docling document json with embedded page images
pipeline_options = PdfPipelineOptions()
pipeline_options.generate_page_images = True
pipeline_options.generate_picture_images = True
# pipeline_options.images_scale = 4.0
pipeline_options.images_scale = 2.0
doc_converter = (
DocumentConverter( # all of the below is optional, has internal defaults.
allowed_formats=[InputFormat.PDF],
format_options={
InputFormat.PDF: PdfFormatOption(
pipeline_cls=StandardPdfPipeline, pipeline_options=pipeline_options
)
},
)
)
if VERBOSE:
print(
"Converting PDF to get a Docling document json with embedded page images..."
)
conv_result = doc_converter.convert(pdf_path)
conv_result.document.save_as_json(
filename=out_intermediate_json, image_mode=ImageRefMode.EMBEDDED
)
if PRINT_RESULT_MARKDOWN:
md1 = conv_result.document.export_to_markdown()
print("*** ORIGINAL MARKDOWN ***")
print(md1)
def post_process_json(in_json: Path, out_final_json: Path):
# Post-Process OCR on top of existing Docling document, per element's bounding box:
print(f"Post-process all bounding boxes with OCR... {os.path.basename(in_json)}")
pipeline_options = PostOcrEnrichmentPipelineOptions(
api_options=PictureDescriptionApiOptions(
url=LM_STUDIO_URL,
prompt=DEFAULT_PROMPT,
provenance="lm-studio-ocr",
batch_size=4,
concurrency=2,
scale=2.0,
params={"model": LM_STUDIO_MODEL},
)
)
doc_converter = DocumentConverter(
format_options={
InputFormat.JSON_DOCLING: FormatOption(
pipeline_cls=PostOcrEnrichmentPipeline,
pipeline_options=pipeline_options,
backend=DoclingJSONBackend,
)
}
)
result = doc_converter.convert(in_json)
if SHOW_IMAGE:
result.document.pages[1].image.pil_image.show()
result.document.save_as_json(out_final_json)
if PRINT_RESULT_MARKDOWN:
md = result.document.export_to_markdown()
print("*** MARKDOWN ***")
print(md)
def process_pdf(pdf_path: Path, scratch_dir: Path, out_dir: Path):
inter_json = scratch_dir / (pdf_path.stem + ".json")
final_json = out_dir / (pdf_path.stem + ".json")
inter_json.parent.mkdir(parents=True, exist_ok=True)
final_json.parent.mkdir(parents=True, exist_ok=True)
if final_json.exists() and final_json.stat().st_size > 0:
print(f"Result already found here: '{final_json}', aborting...")
return # already done
convert_pdf(pdf_path, inter_json)
post_process_json(inter_json, final_json)
def process_json(json_path: Path, out_dir: Path):
final_json = out_dir / (json_path.stem + ".json")
final_json.parent.mkdir(parents=True, exist_ok=True)
if final_json.exists() and final_json.stat().st_size > 0:
return # already done
post_process_json(json_path, final_json)
def filter_jsons_by_ocr_list(jsons, folder):
"""
jsons: list[Path] - JSON files
folder: Path - folder containing ocr_documents.txt
"""
ocr_file = folder / "ocr_documents.txt"
# If the file doesn't exist, return the list unchanged
if not ocr_file.exists():
return jsons
# Read file names (strip whitespace, ignore empty lines)
with ocr_file.open("r", encoding="utf-8") as f:
allowed = {line.strip() for line in f if line.strip()}
# Keep only JSONs whose stem is in allowed list
filtered = [p for p in jsons if p.stem in allowed]
return filtered
def run_jsons(in_path: Path, out_dir: Path):
if in_path.is_dir():
jsons = sorted(in_path.glob("*.json"))
if not jsons:
raise SystemExit("Folder mode expects one or more .json files")
# Look for ocr_documents.txt, in case found, respect only the jsons
filtered_jsons = filter_jsons_by_ocr_list(jsons, in_path)
for j in tqdm(filtered_jsons):
print("")
print("Processing file...")
print(j)
process_json(j, out_dir)
else:
raise SystemExit("Invalid --in path")
def main():
logging.getLogger().setLevel(logging.ERROR)
p = argparse.ArgumentParser(description="PDF/JSON -> final JSON pipeline")
p.add_argument(
"--in",
dest="in_path",
default="tests/data/pdf/2305.03393v1-pg9.pdf",
required=False,
help="Path to a PDF/JSON file or a folder of JSONs",
)
p.add_argument(
"--out",
dest="out_dir",
default="scratch/",
required=False,
help="Folder for final JSONs (scratch goes inside)",
)
args = p.parse_args()
in_path = Path(args.in_path).expanduser().resolve()
out_dir = Path(args.out_dir).expanduser().resolve()
print(f"in_path: {in_path}")
print(f"out_dir: {out_dir}")
scratch_dir = out_dir / "temp"
if not in_path.exists():
raise SystemExit(f"Input not found: {in_path}")
if in_path.is_file():
if in_path.suffix.lower() == ".pdf":
process_pdf(in_path, scratch_dir, out_dir)
elif in_path.suffix.lower() == ".json":
process_json(in_path, out_dir)
else:
raise SystemExit("Single-file mode expects a .pdf or .json")
else:
run_jsons(in_path, out_dir)
if __name__ == "__main__":
main()
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docs/examples/gpu_standard_pipeline.py | docs/examples/gpu_standard_pipeline.py | # %% [markdown]
#
# What this example does
# - Run a conversion using the best setup for GPU for the standard pipeline
#
# Requirements
# - Python 3.9+
# - Install Docling: `pip install docling`
#
# How to run
# - `python docs/examples/gpu_standard_pipeline.py`
#
# This example is part of a set of GPU optimization strategies. Read more about it in [GPU support](../../usage/gpu/)
#
# ## Example code
# %%
import datetime
import logging
import time
from pathlib import Path
import numpy as np
from pydantic import TypeAdapter
from docling.datamodel.accelerator_options import AcceleratorDevice, AcceleratorOptions
from docling.datamodel.base_models import ConversionStatus, InputFormat
from docling.datamodel.pipeline_options import (
ThreadedPdfPipelineOptions,
)
from docling.document_converter import DocumentConverter, PdfFormatOption
from docling.pipeline.threaded_standard_pdf_pipeline import ThreadedStandardPdfPipeline
from docling.utils.profiling import ProfilingItem
_log = logging.getLogger(__name__)
def main():
logging.getLogger("docling").setLevel(logging.WARNING)
_log.setLevel(logging.INFO)
data_folder = Path(__file__).parent / "../../tests/data"
# input_doc_path = data_folder / "pdf" / "2305.03393v1.pdf" # 14 pages
input_doc_path = data_folder / "pdf" / "redp5110_sampled.pdf" # 18 pages
pipeline_options = ThreadedPdfPipelineOptions(
accelerator_options=AcceleratorOptions(
device=AcceleratorDevice.CUDA,
),
ocr_batch_size=4,
layout_batch_size=64,
table_batch_size=4,
)
pipeline_options.do_ocr = False
doc_converter = DocumentConverter(
format_options={
InputFormat.PDF: PdfFormatOption(
pipeline_cls=ThreadedStandardPdfPipeline,
pipeline_options=pipeline_options,
)
}
)
start_time = time.time()
doc_converter.initialize_pipeline(InputFormat.PDF)
init_runtime = time.time() - start_time
_log.info(f"Pipeline initialized in {init_runtime:.2f} seconds.")
start_time = time.time()
conv_result = doc_converter.convert(input_doc_path)
pipeline_runtime = time.time() - start_time
assert conv_result.status == ConversionStatus.SUCCESS
num_pages = len(conv_result.pages)
_log.info(f"Document converted in {pipeline_runtime:.2f} seconds.")
_log.info(f" {num_pages / pipeline_runtime:.2f} pages/second.")
if __name__ == "__main__":
main()
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docs/examples/export_tables.py | docs/examples/export_tables.py | # %% [markdown]
# Extract tables from a PDF and export them as CSV and HTML.
#
# What this example does
# - Converts a PDF and iterates detected tables.
# - Prints each table as Markdown to stdout, and saves CSV/HTML to `scratch/`.
#
# Prerequisites
# - Install Docling and `pandas`.
#
# How to run
# - From the repo root: `python docs/examples/export_tables.py`.
# - Outputs are written to `scratch/`.
#
# Input document
# - Defaults to `tests/data/pdf/2206.01062.pdf`. Change `input_doc_path` as needed.
#
# Notes
# - `table.export_to_dataframe()` returns a pandas DataFrame for convenient export/processing.
# - Printing via `DataFrame.to_markdown()` may require the optional `tabulate` package
# (`pip install tabulate`). If unavailable, skip the print or use `to_csv()`.
# %%
import logging
import time
from pathlib import Path
import pandas as pd
from docling.document_converter import DocumentConverter
_log = logging.getLogger(__name__)
def main():
logging.basicConfig(level=logging.INFO)
data_folder = Path(__file__).parent / "../../tests/data"
input_doc_path = data_folder / "pdf/2206.01062.pdf"
output_dir = Path("scratch")
doc_converter = DocumentConverter()
start_time = time.time()
conv_res = doc_converter.convert(input_doc_path)
output_dir.mkdir(parents=True, exist_ok=True)
doc_filename = conv_res.input.file.stem
# Export tables
for table_ix, table in enumerate(conv_res.document.tables):
table_df: pd.DataFrame = table.export_to_dataframe(doc=conv_res.document)
print(f"## Table {table_ix}")
print(table_df.to_markdown())
# Save the table as CSV
element_csv_filename = output_dir / f"{doc_filename}-table-{table_ix + 1}.csv"
_log.info(f"Saving CSV table to {element_csv_filename}")
table_df.to_csv(element_csv_filename)
# Save the table as HTML
element_html_filename = output_dir / f"{doc_filename}-table-{table_ix + 1}.html"
_log.info(f"Saving HTML table to {element_html_filename}")
with element_html_filename.open("w") as fp:
fp.write(table.export_to_html(doc=conv_res.document))
end_time = time.time() - start_time
_log.info(f"Document converted and tables exported in {end_time:.2f} seconds.")
if __name__ == "__main__":
main()
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docs/examples/granitedocling_repetition_stopping.py | docs/examples/granitedocling_repetition_stopping.py | # %% [markdown]
# Experimental VLM pipeline with custom repetition stopping criteria.
#
# This script demonstrates the use of custom stopping criteria that detect
# repetitive location coordinate patterns in generated text and stop generation
# when such patterns are found.
#
# What this example does
# - Uses the GraniteDocling model with custom repetition stopping criteria injected
# - Processes a PDF document or image and monitors for repetitive coordinate patterns
# - Stops generation early when repetitive patterns are detected
# %%
import logging
from docling.datamodel import vlm_model_specs
from docling.datamodel.base_models import InputFormat
from docling.datamodel.pipeline_options import VlmPipelineOptions
from docling.document_converter import DocumentConverter, PdfFormatOption
from docling.models.utils.generation_utils import (
DocTagsRepetitionStopper,
)
from docling.pipeline.vlm_pipeline import VlmPipeline
logging.basicConfig(level=logging.INFO, format="%(levelname)s:%(name)s:%(message)s")
# Set up logging to see when repetition stopping is triggered
logging.basicConfig(level=logging.INFO)
# Replace with a local path if preferred.
# source = "https://ibm.biz/docling-page-with-table" # Example that shows no repetitions.
source = "tests/data_scanned/old_newspaper.png" # Example that creates repetitions.
print(f"Processing document: {source}")
###### USING GRANITEDOCLING WITH CUSTOM REPETITION STOPPING
## Using standard Huggingface Transformers (most portable, slowest)
custom_vlm_options = vlm_model_specs.GRANITEDOCLING_TRANSFORMERS.model_copy()
# Uncomment this to use MLX-accelerated version on Apple Silicon
# custom_vlm_options = vlm_model_specs.GRANITEDOCLING_MLX.model_copy() # use this for Apple Silicon
# Create custom VLM options with repetition stopping criteria
custom_vlm_options.custom_stopping_criteria = [
DocTagsRepetitionStopper(N=32)
] # check for repetitions for every 32 new tokens decoded.
pipeline_options = VlmPipelineOptions(
vlm_options=custom_vlm_options,
)
converter = DocumentConverter(
format_options={
InputFormat.IMAGE: PdfFormatOption(
pipeline_cls=VlmPipeline,
pipeline_options=pipeline_options,
),
}
)
doc = converter.convert(source=source).document
print(doc.export_to_markdown())
## Using a remote VLM inference service (for example VLLM) - uncomment to use
# custom_vlm_options = ApiVlmOptions(
# url="http://localhost:8000/v1/chat/completions", # LM studio defaults to port 1234, VLLM to 8000
# params=dict(
# model=vlm_model_specs.GRANITEDOCLING_TRANSFORMERS.repo_id,
# max_tokens=8192,
# skip_special_tokens=True, # needed for VLLM
# ),
# headers={
# "Authorization": "Bearer YOUR_API_KEY",
# },
# prompt=vlm_model_specs.GRANITEDOCLING_TRANSFORMERS.prompt,
# timeout=90,
# scale=2.0,
# temperature=0.0,
# response_format=ResponseFormat.DOCTAGS,
# custom_stopping_criteria=[
# DocTagsRepetitionStopper(N=1)
# ], # check for repetitions for every new chunk of the response stream
# )
# pipeline_options = VlmPipelineOptions(
# vlm_options=custom_vlm_options,
# enable_remote_services=True, # required when using a remote inference service.
# )
# converter = DocumentConverter(
# format_options={
# InputFormat.IMAGE: PdfFormatOption(
# pipeline_cls=VlmPipeline,
# pipeline_options=pipeline_options,
# ),
# }
# )
# doc = converter.convert(source=source).document
# print(doc.export_to_markdown())
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docs/examples/asr_pipeline_performance_comparison.py | docs/examples/asr_pipeline_performance_comparison.py | #!/usr/bin/env python3
"""
Performance comparison between CPU and MLX Whisper on Apple Silicon.
This script compares the performance of:
1. Native Whisper (forced to CPU)
2. MLX Whisper (Apple Silicon optimized)
Both use the same model size for fair comparison.
"""
import argparse
import sys
import time
from pathlib import Path
# Add the repository root to the path so we can import docling
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
from docling.datamodel.accelerator_options import AcceleratorDevice, AcceleratorOptions
from docling.datamodel.base_models import InputFormat
from docling.datamodel.pipeline_options import AsrPipelineOptions
from docling.datamodel.pipeline_options_asr_model import (
InferenceAsrFramework,
InlineAsrMlxWhisperOptions,
InlineAsrNativeWhisperOptions,
)
from docling.document_converter import AudioFormatOption, DocumentConverter
from docling.pipeline.asr_pipeline import AsrPipeline
def create_cpu_whisper_options(model_size: str = "turbo"):
"""Create native Whisper options forced to CPU."""
return InlineAsrNativeWhisperOptions(
repo_id=model_size,
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
def create_mlx_whisper_options(model_size: str = "turbo"):
"""Create MLX Whisper options for Apple Silicon."""
model_map = {
"tiny": "mlx-community/whisper-tiny-mlx",
"small": "mlx-community/whisper-small-mlx",
"base": "mlx-community/whisper-base-mlx",
"medium": "mlx-community/whisper-medium-mlx-8bit",
"large": "mlx-community/whisper-large-mlx-8bit",
"turbo": "mlx-community/whisper-turbo",
}
return InlineAsrMlxWhisperOptions(
repo_id=model_map[model_size],
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
def run_transcription_test(
audio_file: Path, asr_options, device: AcceleratorDevice, test_name: str
):
"""Run a single transcription test and return timing results."""
print(f"\n{'=' * 60}")
print(f"Running {test_name}")
print(f"Device: {device}")
print(f"Model: {asr_options.repo_id}")
print(f"Framework: {asr_options.inference_framework}")
print(f"{'=' * 60}")
# Create pipeline options
pipeline_options = AsrPipelineOptions(
accelerator_options=AcceleratorOptions(device=device),
asr_options=asr_options,
)
# Create document converter
converter = DocumentConverter(
format_options={
InputFormat.AUDIO: AudioFormatOption(
pipeline_cls=AsrPipeline,
pipeline_options=pipeline_options,
)
}
)
# Run transcription with timing
start_time = time.time()
try:
result = converter.convert(audio_file)
end_time = time.time()
duration = end_time - start_time
if result.status.value == "success":
# Extract text for verification
text_content = []
for item in result.document.texts:
text_content.append(item.text)
print(f"✅ Success! Duration: {duration:.2f} seconds")
print(f"Transcribed text: {''.join(text_content)[:100]}...")
return duration, True
else:
print(f"❌ Failed! Status: {result.status}")
return duration, False
except Exception as e:
end_time = time.time()
duration = end_time - start_time
print(f"❌ Error: {e}")
return duration, False
def parse_args():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(
description="Performance comparison between CPU and MLX Whisper on Apple Silicon",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Use default test audio file
python asr_pipeline_performance_comparison.py
# Use your own audio file
python asr_pipeline_performance_comparison.py --audio /path/to/your/audio.mp3
# Use a different audio file from the tests directory
python asr_pipeline_performance_comparison.py --audio tests/data/audio/another_sample.wav
""",
)
parser.add_argument(
"--audio",
type=str,
help="Path to audio file for testing (default: tests/data/audio/sample_10s.mp3)",
)
return parser.parse_args()
def main():
"""Run performance comparison between CPU and MLX Whisper."""
args = parse_args()
# Check if we're on Apple Silicon
try:
import torch
has_mps = torch.backends.mps.is_built() and torch.backends.mps.is_available()
except ImportError:
has_mps = False
try:
import mlx_whisper
has_mlx_whisper = True
except ImportError:
has_mlx_whisper = False
print("ASR Pipeline Performance Comparison")
print("=" * 50)
print(f"Apple Silicon (MPS) available: {has_mps}")
print(f"MLX Whisper available: {has_mlx_whisper}")
if not has_mps:
print("⚠️ Apple Silicon (MPS) not available - running CPU-only comparison")
print(" For MLX Whisper performance benefits, run on Apple Silicon devices")
print(" MLX Whisper is optimized for Apple Silicon devices.")
if not has_mlx_whisper:
print("⚠️ MLX Whisper not installed - running CPU-only comparison")
print(" Install with: pip install mlx-whisper")
print(" Or: uv sync --extra asr")
print(" For MLX Whisper performance benefits, install the dependency")
# Determine audio file path
if args.audio:
audio_file = Path(args.audio)
if not audio_file.is_absolute():
# If relative path, make it relative to the script's directory
audio_file = Path(__file__).parent.parent.parent / audio_file
else:
# Use default test audio file
audio_file = (
Path(__file__).parent.parent.parent
/ "tests"
/ "data"
/ "audio"
/ "sample_10s.mp3"
)
if not audio_file.exists():
print(f"❌ Audio file not found: {audio_file}")
print(" Please check the path and try again.")
sys.exit(1)
print(f"Using test audio: {audio_file}")
print(f"File size: {audio_file.stat().st_size / 1024:.1f} KB")
# Test different model sizes
model_sizes = ["tiny", "base", "turbo"]
results = {}
for model_size in model_sizes:
print(f"\n{'#' * 80}")
print(f"Testing model size: {model_size}")
print(f"{'#' * 80}")
model_results = {}
# Test 1: Native Whisper (forced to CPU)
cpu_options = create_cpu_whisper_options(model_size)
cpu_duration, cpu_success = run_transcription_test(
audio_file,
cpu_options,
AcceleratorDevice.CPU,
f"Native Whisper {model_size} (CPU)",
)
model_results["cpu"] = {"duration": cpu_duration, "success": cpu_success}
# Test 2: MLX Whisper (Apple Silicon optimized) - only if available
if has_mps and has_mlx_whisper:
mlx_options = create_mlx_whisper_options(model_size)
mlx_duration, mlx_success = run_transcription_test(
audio_file,
mlx_options,
AcceleratorDevice.MPS,
f"MLX Whisper {model_size} (MPS)",
)
model_results["mlx"] = {"duration": mlx_duration, "success": mlx_success}
else:
print(f"\n{'=' * 60}")
print(f"Skipping MLX Whisper {model_size} (MPS) - not available")
print(f"{'=' * 60}")
model_results["mlx"] = {"duration": 0.0, "success": False}
results[model_size] = model_results
# Print summary
print(f"\n{'#' * 80}")
print("PERFORMANCE COMPARISON SUMMARY")
print(f"{'#' * 80}")
print(
f"{'Model':<10} {'CPU (sec)':<12} {'MLX (sec)':<12} {'Speedup':<12} {'Status':<10}"
)
print("-" * 80)
for model_size, model_results in results.items():
cpu_duration = model_results["cpu"]["duration"]
mlx_duration = model_results["mlx"]["duration"]
cpu_success = model_results["cpu"]["success"]
mlx_success = model_results["mlx"]["success"]
if cpu_success and mlx_success:
speedup = cpu_duration / mlx_duration
status = "✅ Both OK"
elif cpu_success:
speedup = float("inf")
status = "❌ MLX Failed"
elif mlx_success:
speedup = 0
status = "❌ CPU Failed"
else:
speedup = 0
status = "❌ Both Failed"
print(
f"{model_size:<10} {cpu_duration:<12.2f} {mlx_duration:<12.2f} {speedup:<12.2f}x {status:<10}"
)
# Calculate overall improvement
successful_tests = [
(r["cpu"]["duration"], r["mlx"]["duration"])
for r in results.values()
if r["cpu"]["success"] and r["mlx"]["success"]
]
if successful_tests:
avg_cpu = sum(cpu for cpu, mlx in successful_tests) / len(successful_tests)
avg_mlx = sum(mlx for cpu, mlx in successful_tests) / len(successful_tests)
avg_speedup = avg_cpu / avg_mlx
print("-" * 80)
print(
f"{'AVERAGE':<10} {avg_cpu:<12.2f} {avg_mlx:<12.2f} {avg_speedup:<12.2f}x {'Overall':<10}"
)
print(f"\n🎯 MLX Whisper provides {avg_speedup:.1f}x average speedup over CPU!")
else:
if has_mps and has_mlx_whisper:
print("\n❌ No successful comparisons available.")
else:
print("\n⚠️ MLX Whisper not available - only CPU results shown.")
print(
" Install MLX Whisper and run on Apple Silicon for performance comparison."
)
if __name__ == "__main__":
main()
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docs/examples/demo_layout_vlm.py | docs/examples/demo_layout_vlm.py | #!/usr/bin/env python3
"""Demo script for the new ThreadedLayoutVlmPipeline.
This script demonstrates the usage of the experimental ThreadedLayoutVlmPipeline pipeline
that combines layout model preprocessing with VLM processing in a threaded manner.
"""
import argparse
import logging
import traceback
from pathlib import Path
from docling.datamodel.base_models import ConversionStatus, InputFormat
from docling.datamodel.pipeline_options_vlm_model import ApiVlmOptions, ResponseFormat
from docling.datamodel.vlm_model_specs import GRANITEDOCLING_TRANSFORMERS
from docling.document_converter import DocumentConverter, PdfFormatOption
from docling.experimental.datamodel.threaded_layout_vlm_pipeline_options import (
ThreadedLayoutVlmPipelineOptions,
)
from docling.experimental.pipeline.threaded_layout_vlm_pipeline import (
ThreadedLayoutVlmPipeline,
)
_log = logging.getLogger(__name__)
def _parse_args():
parser = argparse.ArgumentParser(
description="Demo script for the experimental ThreadedLayoutVlmPipeline"
)
parser.add_argument(
"--input-file",
type=str,
default="tests/data/pdf/code_and_formula.pdf",
help="Path to a PDF file",
)
parser.add_argument(
"--output-dir",
type=str,
default="scratch/demo_layout_vlm/",
help="Output directory for converted files",
)
return parser.parse_args()
# Can be used to read multiple pdf files under a folder
# def _get_docs(input_doc_path):
# """Yield DocumentStream objects from list of input document paths"""
# for path in input_doc_path:
# buf = BytesIO(path.read_bytes())
# stream = DocumentStream(name=path.name, stream=buf)
# yield stream
def openai_compatible_vlm_options(
model: str,
prompt: str,
format: ResponseFormat,
hostname_and_port,
temperature: float = 0.7,
max_tokens: int = 4096,
api_key: str = "",
skip_special_tokens=False,
):
headers = {}
if api_key:
headers["Authorization"] = f"Bearer {api_key}"
options = ApiVlmOptions(
url=f"http://{hostname_and_port}/v1/chat/completions", # LM studio defaults to port 1234, VLLM to 8000
params=dict(
model=model,
max_tokens=max_tokens,
skip_special_tokens=skip_special_tokens, # needed for VLLM
),
headers=headers,
prompt=prompt,
timeout=90,
scale=2.0,
temperature=temperature,
response_format=format,
)
return options
def demo_threaded_layout_vlm_pipeline(
input_doc_path: Path, out_dir_layout_aware: Path, use_api_vlm: bool
):
"""Demonstrate the threaded layout+VLM pipeline."""
vlm_options = GRANITEDOCLING_TRANSFORMERS.model_copy()
if use_api_vlm:
vlm_options = openai_compatible_vlm_options(
model="granite-docling-258m-mlx", # For VLLM use "ibm-granite/granite-docling-258M"
hostname_and_port="localhost:1234", # LM studio defaults to port 1234, VLLM to 8000
prompt="Convert this page to docling.",
format=ResponseFormat.DOCTAGS,
api_key="",
)
vlm_options.track_input_prompt = True
# Configure pipeline options
print("Configuring pipeline options...")
pipeline_options_layout_aware = ThreadedLayoutVlmPipelineOptions(
# VLM configuration - defaults to GRANITEDOCLING_TRANSFORMERS
vlm_options=vlm_options,
# Layout configuration - defaults to DOCLING_LAYOUT_HERON
# Batch sizes for parallel processing
layout_batch_size=2,
vlm_batch_size=1,
# Queue configuration
queue_max_size=10,
# Image processing
images_scale=vlm_options.scale,
generate_page_images=True,
enable_remote_services=use_api_vlm,
)
# Create converter with the new pipeline
print("Initializing DocumentConverter (this may take a while - loading models)...")
doc_converter_layout_enhanced = DocumentConverter(
format_options={
InputFormat.PDF: PdfFormatOption(
pipeline_cls=ThreadedLayoutVlmPipeline,
pipeline_options=pipeline_options_layout_aware,
)
}
)
result_layout_aware = doc_converter_layout_enhanced.convert(
source=input_doc_path, raises_on_error=False
)
if result_layout_aware.status == ConversionStatus.FAILURE:
_log.error(f"Conversion failed: {result_layout_aware.status}")
doc_filename = result_layout_aware.input.file.stem
result_layout_aware.document.save_as_json(
out_dir_layout_aware / f"{doc_filename}.json"
)
result_layout_aware.document.save_as_html(
out_dir_layout_aware / f"{doc_filename}.html", split_page_view=True
)
for page in result_layout_aware.pages:
_log.info("Page %s of VLM response:", page.page_no)
if page.predictions.vlm_response:
_log.info(page.predictions.vlm_response)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
try:
args = _parse_args()
_log.info(
f"Parsed arguments: input={args.input_file}, output={args.output_dir}"
)
input_path = Path(args.input_file)
if not input_path.exists():
raise FileNotFoundError(f"Input file does not exist: {input_path}")
if input_path.suffix.lower() != ".pdf":
raise ValueError(f"Input file must be a PDF: {input_path}")
out_dir_layout_aware = Path(args.output_dir) / "layout_aware/"
out_dir_layout_aware.mkdir(parents=True, exist_ok=True)
use_api_vlm = False # Set to False to use inline VLM model
demo_threaded_layout_vlm_pipeline(input_path, out_dir_layout_aware, use_api_vlm)
except Exception:
traceback.print_exc()
raise
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docs/examples/pii_obfuscate.py | docs/examples/pii_obfuscate.py | # %% [markdown]
# Detect and obfuscate PII using a Hugging Face NER model.
#
# What this example does
# - Converts a PDF and saves original Markdown with embedded images.
# - Runs a HF token-classification pipeline (NER) to detect PII-like entities.
# - Obfuscates occurrences in TextItem and TableItem by stable, type-based IDs.
#
# Prerequisites
# - Install Docling. Install Transformers: `pip install transformers`.
# - Optional (advanced): Install GLiNER for richer PII labels:
# `pip install gliner`
# If needed for CPU-only envs:
# `pip install torch --extra-index-url https://download.pytorch.org/whl/cpu`
# - Optionally, set `HF_MODEL` to a different NER/PII model.
#
# How to run
# - From the repo root: `python docs/examples/pii_obfuscate.py`.
# - To use GLiNER instead of HF pipeline:
# python docs/examples/pii_obfuscate.py --engine gliner
# or set env var `PII_ENGINE=gliner`.
# - The script writes original and obfuscated Markdown to `scratch/`.
#
# Notes
# - This is a simple demonstration. For production PII detection, consider
# specialized models/pipelines and thorough evaluation.
# %%
import argparse
import logging
import os
import re
from pathlib import Path
from typing import Dict, List, Tuple
from docling_core.types.doc import ImageRefMode, TableItem, TextItem
from tabulate import tabulate
from docling.datamodel.base_models import InputFormat
from docling.datamodel.pipeline_options import PdfPipelineOptions
from docling.document_converter import DocumentConverter, PdfFormatOption
_log = logging.getLogger(__name__)
IMAGE_RESOLUTION_SCALE = 2.0
HF_MODEL = "dslim/bert-base-NER" # Swap with another HF NER/PII model if desired, eg https://huggingface.co/urchade/gliner_multi_pii-v1 looks very promising too!
GLINER_MODEL = "urchade/gliner_multi_pii-v1"
def _build_simple_ner_pipeline():
"""Create a Hugging Face token-classification pipeline for NER.
Returns a callable like: ner(text) -> List[dict]
"""
try:
from transformers import (
AutoModelForTokenClassification,
AutoTokenizer,
pipeline,
)
except Exception:
_log.error("Transformers not installed. Please run: pip install transformers")
raise
tokenizer = AutoTokenizer.from_pretrained(HF_MODEL)
model = AutoModelForTokenClassification.from_pretrained(HF_MODEL)
ner = pipeline(
"token-classification",
model=model,
tokenizer=tokenizer,
aggregation_strategy="simple", # groups subwords into complete entities
# Note: modern Transformers returns `start`/`end` when possible with aggregation
)
return ner
class SimplePiiObfuscator:
"""Tracks PII strings and replaces them with stable IDs per entity type."""
def __init__(self, ner_callable):
self.ner = ner_callable
self.entity_map: Dict[str, str] = {}
self.counters: Dict[str, int] = {
"person": 0,
"org": 0,
"location": 0,
"misc": 0,
}
# Map model labels to our coarse types
self.label_map = {
"PER": "person",
"PERSON": "person",
"ORG": "org",
"ORGANIZATION": "org",
"LOC": "location",
"LOCATION": "location",
"GPE": "location",
# Fallbacks
"MISC": "misc",
"O": "misc",
}
# Only obfuscate these by default. Adjust as needed.
self.allowed_types = {"person", "org", "location"}
def _next_id(self, typ: str) -> str:
self.counters[typ] += 1
return f"{typ}-{self.counters[typ]}"
def _normalize(self, s: str) -> str:
return re.sub(r"\s+", " ", s).strip()
def _extract_entities(self, text: str) -> List[Tuple[str, str]]:
"""Run NER and return a list of (surface_text, type) to obfuscate."""
if not text:
return []
results = self.ner(text)
# Collect normalized items with optional span info
items = []
for r in results:
raw_label = r.get("entity_group") or r.get("entity") or "MISC"
label = self.label_map.get(raw_label, "misc")
if label not in self.allowed_types:
continue
start = r.get("start")
end = r.get("end")
word = self._normalize(r.get("word") or r.get("text") or "")
items.append({"label": label, "start": start, "end": end, "word": word})
found: List[Tuple[str, str]] = []
# If the pipeline provides character spans, merge consecutive/overlapping
# entities of the same type into a single span, then take the substring
# from the original text. This handles cases like subword tokenization
# where multiple adjacent pieces belong to the same named entity.
have_spans = any(i["start"] is not None and i["end"] is not None for i in items)
if have_spans:
spans = [
i for i in items if i["start"] is not None and i["end"] is not None
]
# Ensure processing order by start (then end)
spans.sort(key=lambda x: (x["start"], x["end"]))
merged = []
for s in spans:
if not merged:
merged.append(dict(s))
continue
last = merged[-1]
if s["label"] == last["label"] and s["start"] <= last["end"]:
# Merge identical, overlapping, or touching spans of same type
last["start"] = min(last["start"], s["start"])
last["end"] = max(last["end"], s["end"])
else:
merged.append(dict(s))
for m in merged:
surface = self._normalize(text[m["start"] : m["end"]])
if surface:
found.append((surface, m["label"]))
# Include any items lacking spans as-is (fallback)
for i in items:
if i["start"] is None or i["end"] is None:
if i["word"]:
found.append((i["word"], i["label"]))
else:
# Fallback when spans aren't provided: return normalized words
for i in items:
if i["word"]:
found.append((i["word"], i["label"]))
return found
def obfuscate_text(self, text: str) -> str:
if not text:
return text
entities = self._extract_entities(text)
if not entities:
return text
# Deduplicate per text, keep stable global mapping
unique_words: Dict[str, str] = {}
for word, label in entities:
if word not in self.entity_map:
replacement = self._next_id(label)
self.entity_map[word] = replacement
unique_words[word] = self.entity_map[word]
# Replace longer matches first to avoid partial overlaps
sorted_pairs = sorted(
unique_words.items(), key=lambda x: len(x[0]), reverse=True
)
def replace_once(s: str, old: str, new: str) -> str:
# Use simple substring replacement; for stricter matching, use word boundaries
# when appropriate (e.g., names). This is a demo, keep it simple.
pattern = re.escape(old)
return re.sub(pattern, new, s)
obfuscated = text
for old, new in sorted_pairs:
obfuscated = replace_once(obfuscated, old, new)
return obfuscated
def _build_gliner_model():
"""Create a GLiNER model for PII-like entity extraction.
Returns a tuple (model, labels) where model.predict_entities(text, labels)
yields entities with "text" and "label" fields.
"""
try:
from gliner import GLiNER # type: ignore
except Exception:
_log.error(
"GLiNER not installed. Please run: pip install gliner torch --extra-index-url https://download.pytorch.org/whl/cpu"
)
raise
model = GLiNER.from_pretrained(GLINER_MODEL)
# Curated set of labels for PII detection. Adjust as needed.
labels = [
# "work",
"booking number",
"personally identifiable information",
"driver licence",
"person",
"full address",
"company",
# "actor",
# "character",
"email",
"passport number",
"Social Security Number",
"phone number",
]
return model, labels
class AdvancedPIIObfuscator:
"""PII obfuscator powered by GLiNER with fine-grained labels.
- Uses GLiNER's `predict_entities(text, labels)` to detect entities.
- Obfuscates with stable IDs per fine-grained label, e.g. `email-1`.
"""
def __init__(self, gliner_model, labels: List[str]):
self.model = gliner_model
self.labels = labels
self.entity_map: Dict[str, str] = {}
self.counters: Dict[str, int] = {}
def _normalize(self, s: str) -> str:
return re.sub(r"\s+", " ", s).strip()
def _norm_label(self, label: str) -> str:
return (
re.sub(
r"[^a-z0-9_]+", "_", label.lower().replace(" ", "_").replace("-", "_")
).strip("_")
or "pii"
)
def _next_id(self, typ: str) -> str:
self.cc(typ)
self.counters[typ] += 1
return f"{typ}-{self.counters[typ]}"
def cc(self, typ: str) -> None:
if typ not in self.counters:
self.counters[typ] = 0
def _extract_entities(self, text: str) -> List[Tuple[str, str]]:
if not text:
return []
results = self.model.predict_entities(
text, self.labels
) # expects dicts with text/label
found: List[Tuple[str, str]] = []
for r in results:
label = self._norm_label(str(r.get("label", "pii")))
surface = self._normalize(str(r.get("text", "")))
if surface:
found.append((surface, label))
return found
def obfuscate_text(self, text: str) -> str:
if not text:
return text
entities = self._extract_entities(text)
if not entities:
return text
unique_words: Dict[str, str] = {}
for word, label in entities:
if word not in self.entity_map:
replacement = self._next_id(label)
self.entity_map[word] = replacement
unique_words[word] = self.entity_map[word]
sorted_pairs = sorted(
unique_words.items(), key=lambda x: len(x[0]), reverse=True
)
def replace_once(s: str, old: str, new: str) -> str:
pattern = re.escape(old)
return re.sub(pattern, new, s)
obfuscated = text
for old, new in sorted_pairs:
obfuscated = replace_once(obfuscated, old, new)
return obfuscated
def main():
logging.basicConfig(level=logging.INFO)
data_folder = Path(__file__).parent / "../../tests/data"
input_doc_path = data_folder / "pdf/2206.01062.pdf"
output_dir = Path("scratch") # ensure this directory exists before saving
# Choose engine via CLI flag or env var (default: hf)
parser = argparse.ArgumentParser(description="PII obfuscation example")
parser.add_argument(
"--engine",
choices=["hf", "gliner"],
default=os.getenv("PII_ENGINE", "hf"),
help="NER engine: 'hf' (Transformers) or 'gliner' (GLiNER)",
)
args = parser.parse_args()
# Ensure output dir exists
output_dir.mkdir(parents=True, exist_ok=True)
# Keep and generate images so Markdown can embed them
pipeline_options = PdfPipelineOptions()
pipeline_options.images_scale = IMAGE_RESOLUTION_SCALE
pipeline_options.generate_page_images = True
pipeline_options.generate_picture_images = True
doc_converter = DocumentConverter(
format_options={
InputFormat.PDF: PdfFormatOption(pipeline_options=pipeline_options)
}
)
conv_res = doc_converter.convert(input_doc_path)
conv_doc = conv_res.document
doc_filename = conv_res.input.file.name
# Save markdown with embedded pictures in original text
md_filename = output_dir / f"{doc_filename}-with-images-orig.md"
conv_doc.save_as_markdown(md_filename, image_mode=ImageRefMode.EMBEDDED)
# Build NER pipeline and obfuscator
if args.engine == "gliner":
_log.info("Using GLiNER-based AdvancedPIIObfuscator")
gliner_model, gliner_labels = _build_gliner_model()
obfuscator = AdvancedPIIObfuscator(gliner_model, gliner_labels)
else:
_log.info("Using HF Transformers-based SimplePiiObfuscator")
ner = _build_simple_ner_pipeline()
obfuscator = SimplePiiObfuscator(ner)
for element, _level in conv_res.document.iterate_items():
if isinstance(element, TextItem):
element.orig = element.text
element.text = obfuscator.obfuscate_text(element.text)
# print(element.orig, " => ", element.text)
elif isinstance(element, TableItem):
for cell in element.data.table_cells:
cell.text = obfuscator.obfuscate_text(cell.text)
# Save markdown with embedded pictures and obfuscated text
md_filename = output_dir / f"{doc_filename}-with-images-pii-obfuscated.md"
conv_doc.save_as_markdown(md_filename, image_mode=ImageRefMode.EMBEDDED)
# Optional: log mapping summary
if obfuscator.entity_map:
data = []
for key, val in obfuscator.entity_map.items():
data.append([key, val])
_log.info(
f"Obfuscated entities:\n\n{tabulate(data)}",
)
if __name__ == "__main__":
main()
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docs/examples/minimal_vlm_pipeline.py | docs/examples/minimal_vlm_pipeline.py | # %% [markdown]
# Minimal VLM pipeline example: convert a PDF using a vision-language model.
#
# What this example does
# - Runs the VLM-powered pipeline on a PDF (by URL) and prints Markdown output.
# - Shows two setups: default (Transformers/GraniteDocling) and macOS MPS/MLX.
#
# Prerequisites
# - Install Docling with VLM extras and the appropriate backend (Transformers or MLX).
# - Ensure your environment can download model weights (e.g., from Hugging Face).
#
# How to run
# - From the repository root, run: `python docs/examples/minimal_vlm_pipeline.py`.
# - The script prints the converted Markdown to stdout.
#
# Notes
# - `source` may be a local path or a URL to a PDF.
# - The second section demonstrates macOS MPS acceleration via MLX (`vlm_model_specs.GRANITEDOCLING_MLX`).
# - For more configurations and model comparisons, see `docs/examples/compare_vlm_models.py`.
# %%
from docling.datamodel import vlm_model_specs
from docling.datamodel.base_models import InputFormat
from docling.datamodel.pipeline_options import (
VlmPipelineOptions,
)
from docling.document_converter import DocumentConverter, PdfFormatOption
from docling.pipeline.vlm_pipeline import VlmPipeline
# Convert a public arXiv PDF; replace with a local path if preferred.
source = "https://arxiv.org/pdf/2501.17887"
###### USING SIMPLE DEFAULT VALUES
# - GraniteDocling model
# - Using the transformers framework
converter = DocumentConverter(
format_options={
InputFormat.PDF: PdfFormatOption(
pipeline_cls=VlmPipeline,
),
}
)
doc = converter.convert(source=source).document
print(doc.export_to_markdown())
###### USING MACOS MPS ACCELERATOR
# Demonstrates using MLX on macOS with MPS acceleration (macOS only).
# For more options see the `compare_vlm_models.py` example.
pipeline_options = VlmPipelineOptions(
vlm_options=vlm_model_specs.GRANITEDOCLING_MLX,
)
converter = DocumentConverter(
format_options={
InputFormat.PDF: PdfFormatOption(
pipeline_cls=VlmPipeline,
pipeline_options=pipeline_options,
),
}
)
doc = converter.convert(source=source).document
print(doc.export_to_markdown())
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docs/examples/batch_convert.py | docs/examples/batch_convert.py | # %% [markdown]
# Batch convert multiple PDF files and export results in several formats.
# What this example does
# - Loads a small set of sample PDFs.
# - Runs the Docling PDF pipeline once per file.
# - Writes outputs to `scratch/` in multiple formats (JSON, HTML, Markdown, text, doctags, YAML).
# Prerequisites
# - Install Docling and dependencies as described in the repository README.
# - Ensure you can import `docling` from your Python environment.
# <!-- YAML export requires `PyYAML` (`pip install pyyaml`). -->
# Input documents
# - By default, this example uses a few PDFs from `tests/data/pdf/` in the repo.
# - If you cloned without test data, or want to use your own files, edit
# `input_doc_paths` below to point to PDFs on your machine.
# Output formats (controlled by flags)
# - `USE_V2 = True` enables the current Docling document exports (recommended).
# - `USE_LEGACY = False` keeps legacy Deep Search exports disabled.
# You can set it to `True` if you need legacy formats for compatibility tests.
# Notes
# - Set `pipeline_options.generate_page_images = True` to include page images in HTML.
# - The script logs conversion progress and raises if any documents fail.
# <!-- This example shows both helper methods like `save_as_*` and lower-level
# `export_to_*` + manual file writes; outputs may overlap intentionally. -->
# %%
import json
import logging
import time
from collections.abc import Iterable
from pathlib import Path
import yaml
from docling_core.types.doc import ImageRefMode
from docling.backend.docling_parse_v4_backend import DoclingParseV4DocumentBackend
from docling.datamodel.base_models import ConversionStatus, InputFormat
from docling.datamodel.document import ConversionResult
from docling.datamodel.pipeline_options import PdfPipelineOptions
from docling.document_converter import DocumentConverter, PdfFormatOption
_log = logging.getLogger(__name__)
# Export toggles:
# - USE_V2 controls modern Docling document exports.
# - USE_LEGACY enables legacy Deep Search exports for comparison or migration.
USE_V2 = True
USE_LEGACY = False
def export_documents(
conv_results: Iterable[ConversionResult],
output_dir: Path,
):
output_dir.mkdir(parents=True, exist_ok=True)
success_count = 0
failure_count = 0
partial_success_count = 0
for conv_res in conv_results:
if conv_res.status == ConversionStatus.SUCCESS:
success_count += 1
doc_filename = conv_res.input.file.stem
if USE_V2:
# Recommended modern Docling exports. These helpers mirror the
# lower-level "export_to_*" methods used below, but handle
# common details like image handling.
conv_res.document.save_as_json(
output_dir / f"{doc_filename}.json",
image_mode=ImageRefMode.PLACEHOLDER,
)
conv_res.document.save_as_html(
output_dir / f"{doc_filename}.html",
image_mode=ImageRefMode.EMBEDDED,
)
conv_res.document.save_as_doctags(
output_dir / f"{doc_filename}.doctags.txt"
)
conv_res.document.save_as_markdown(
output_dir / f"{doc_filename}.md",
image_mode=ImageRefMode.PLACEHOLDER,
)
conv_res.document.save_as_markdown(
output_dir / f"{doc_filename}.txt",
image_mode=ImageRefMode.PLACEHOLDER,
strict_text=True,
)
# Export Docling document format to YAML:
with (output_dir / f"{doc_filename}.yaml").open("w") as fp:
fp.write(yaml.safe_dump(conv_res.document.export_to_dict()))
# Export Docling document format to doctags:
with (output_dir / f"{doc_filename}.doctags.txt").open("w") as fp:
fp.write(conv_res.document.export_to_doctags())
# Export Docling document format to markdown:
with (output_dir / f"{doc_filename}.md").open("w") as fp:
fp.write(conv_res.document.export_to_markdown())
# Export Docling document format to text:
with (output_dir / f"{doc_filename}.txt").open("w") as fp:
fp.write(conv_res.document.export_to_markdown(strict_text=True))
if USE_LEGACY:
# Export Deep Search document JSON format:
with (output_dir / f"{doc_filename}.legacy.json").open(
"w", encoding="utf-8"
) as fp:
fp.write(json.dumps(conv_res.legacy_document.export_to_dict()))
# Export Text format:
with (output_dir / f"{doc_filename}.legacy.txt").open(
"w", encoding="utf-8"
) as fp:
fp.write(
conv_res.legacy_document.export_to_markdown(strict_text=True)
)
# Export Markdown format:
with (output_dir / f"{doc_filename}.legacy.md").open(
"w", encoding="utf-8"
) as fp:
fp.write(conv_res.legacy_document.export_to_markdown())
# Export Document Tags format:
with (output_dir / f"{doc_filename}.legacy.doctags.txt").open(
"w", encoding="utf-8"
) as fp:
fp.write(conv_res.legacy_document.export_to_document_tokens())
elif conv_res.status == ConversionStatus.PARTIAL_SUCCESS:
_log.info(
f"Document {conv_res.input.file} was partially converted with the following errors:"
)
for item in conv_res.errors:
_log.info(f"\t{item.error_message}")
partial_success_count += 1
else:
_log.info(f"Document {conv_res.input.file} failed to convert.")
failure_count += 1
_log.info(
f"Processed {success_count + partial_success_count + failure_count} docs, "
f"of which {failure_count} failed "
f"and {partial_success_count} were partially converted."
)
return success_count, partial_success_count, failure_count
def main():
logging.basicConfig(level=logging.INFO)
# Location of sample PDFs used by this example. If your checkout does not
# include test data, change `data_folder` or point `input_doc_paths` to
# your own files.
data_folder = Path(__file__).parent / "../../tests/data"
input_doc_paths = [
data_folder / "pdf/2206.01062.pdf",
data_folder / "pdf/2203.01017v2.pdf",
data_folder / "pdf/2305.03393v1.pdf",
data_folder / "pdf/redp5110_sampled.pdf",
]
# buf = BytesIO((data_folder / "pdf/2206.01062.pdf").open("rb").read())
# docs = [DocumentStream(name="my_doc.pdf", stream=buf)]
# input = DocumentConversionInput.from_streams(docs)
# # Turn on inline debug visualizations:
# settings.debug.visualize_layout = True
# settings.debug.visualize_ocr = True
# settings.debug.visualize_tables = True
# settings.debug.visualize_cells = True
# Configure the PDF pipeline. Enabling page image generation improves HTML
# previews (embedded images) but adds processing time.
pipeline_options = PdfPipelineOptions()
pipeline_options.generate_page_images = True
doc_converter = DocumentConverter(
format_options={
InputFormat.PDF: PdfFormatOption(
pipeline_options=pipeline_options, backend=DoclingParseV4DocumentBackend
)
}
)
start_time = time.time()
# Convert all inputs. Set `raises_on_error=False` to keep processing other
# files even if one fails; errors are summarized after the run.
conv_results = doc_converter.convert_all(
input_doc_paths,
raises_on_error=False, # to let conversion run through all and examine results at the end
)
# Write outputs to ./scratch and log a summary.
_success_count, _partial_success_count, failure_count = export_documents(
conv_results, output_dir=Path("scratch")
)
end_time = time.time() - start_time
_log.info(f"Document conversion complete in {end_time:.2f} seconds.")
if failure_count > 0:
raise RuntimeError(
f"The example failed converting {failure_count} on {len(input_doc_paths)}."
)
if __name__ == "__main__":
main()
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docs/examples/export_figures.py | docs/examples/export_figures.py | # %% [markdown]
# Export page, figure, and table images from a PDF and save rich outputs.
#
# What this example does
# - Converts a PDF, keeps page/element images, and writes them to `scratch/`.
# - Exports Markdown and HTML with either embedded or referenced images.
#
# Prerequisites
# - Install Docling and image dependencies. Pillow is used for image saves
# (`pip install pillow`) if not already available via Docling's deps.
# - Ensure you can import `docling` from your Python environment.
#
# How to run
# - From the repo root: `python docs/examples/export_figures.py`.
# - Outputs (PNG, MD, HTML) are written to `scratch/`.
#
# Key options
# - `IMAGE_RESOLUTION_SCALE`: increase to render higher-resolution images (e.g., 2.0).
# - `PdfPipelineOptions.generate_page_images`/`generate_picture_images`: preserve images for export.
# - `ImageRefMode`: choose `EMBEDDED` or `REFERENCED` when saving Markdown/HTML.
#
# Input document
# - Defaults to `tests/data/pdf/2206.01062.pdf`. Change `input_doc_path` as needed.
# %%
import logging
import time
from pathlib import Path
from docling_core.types.doc import ImageRefMode, PictureItem, TableItem
from docling.datamodel.base_models import InputFormat
from docling.datamodel.pipeline_options import PdfPipelineOptions
from docling.document_converter import DocumentConverter, PdfFormatOption
_log = logging.getLogger(__name__)
IMAGE_RESOLUTION_SCALE = 2.0
def main():
logging.basicConfig(level=logging.INFO)
data_folder = Path(__file__).parent / "../../tests/data"
input_doc_path = data_folder / "pdf/2206.01062.pdf"
output_dir = Path("scratch")
# Keep page/element images so they can be exported. The `images_scale` controls
# the rendered image resolution (scale=1 ~ 72 DPI). The `generate_*` toggles
# decide which elements are enriched with images.
pipeline_options = PdfPipelineOptions()
pipeline_options.images_scale = IMAGE_RESOLUTION_SCALE
pipeline_options.generate_page_images = True
pipeline_options.generate_picture_images = True
doc_converter = DocumentConverter(
format_options={
InputFormat.PDF: PdfFormatOption(pipeline_options=pipeline_options)
}
)
start_time = time.time()
conv_res = doc_converter.convert(input_doc_path)
output_dir.mkdir(parents=True, exist_ok=True)
doc_filename = conv_res.input.file.stem
# Save page images
for page_no, page in conv_res.document.pages.items():
page_no = page.page_no
page_image_filename = output_dir / f"{doc_filename}-{page_no}.png"
with page_image_filename.open("wb") as fp:
page.image.pil_image.save(fp, format="PNG")
# Save images of figures and tables
table_counter = 0
picture_counter = 0
for element, _level in conv_res.document.iterate_items():
if isinstance(element, TableItem):
table_counter += 1
element_image_filename = (
output_dir / f"{doc_filename}-table-{table_counter}.png"
)
with element_image_filename.open("wb") as fp:
element.get_image(conv_res.document).save(fp, "PNG")
if isinstance(element, PictureItem):
picture_counter += 1
element_image_filename = (
output_dir / f"{doc_filename}-picture-{picture_counter}.png"
)
with element_image_filename.open("wb") as fp:
element.get_image(conv_res.document).save(fp, "PNG")
# Save markdown with embedded pictures
md_filename = output_dir / f"{doc_filename}-with-images.md"
conv_res.document.save_as_markdown(md_filename, image_mode=ImageRefMode.EMBEDDED)
# Save markdown with externally referenced pictures
md_filename = output_dir / f"{doc_filename}-with-image-refs.md"
conv_res.document.save_as_markdown(md_filename, image_mode=ImageRefMode.REFERENCED)
# Save HTML with externally referenced pictures
html_filename = output_dir / f"{doc_filename}-with-image-refs.html"
conv_res.document.save_as_html(html_filename, image_mode=ImageRefMode.REFERENCED)
end_time = time.time() - start_time
_log.info(f"Document converted and figures exported in {end_time:.2f} seconds.")
if __name__ == "__main__":
main()
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docs/examples/gpu_vlm_pipeline.py | docs/examples/gpu_vlm_pipeline.py | # %% [markdown]
#
# What this example does
# - Run a conversion using the best setup for GPU using VLM models
#
# Requirements
# - Python 3.10+
# - Install Docling: `pip install docling`
# - Install vLLM: `pip install vllm`
#
# How to run
# - `python docs/examples/gpu_vlm_pipeline.py`
#
# This example is part of a set of GPU optimization strategies. Read more about it in [GPU support](../../usage/gpu/)
#
# ### Start models with vllm
#
# ```console
# vllm serve ibm-granite/granite-docling-258M \
# --host 127.0.0.1 --port 8000 \
# --max-num-seqs 512 \
# --max-num-batched-tokens 8192 \
# --enable-chunked-prefill \
# --gpu-memory-utilization 0.9
# ```
#
# ## Example code
# %%
import datetime
import logging
import time
from pathlib import Path
import numpy as np
from pydantic import TypeAdapter
from docling.datamodel import vlm_model_specs
from docling.datamodel.base_models import ConversionStatus, InputFormat
from docling.datamodel.pipeline_options import (
VlmPipelineOptions,
)
from docling.datamodel.pipeline_options_vlm_model import ApiVlmOptions, ResponseFormat
from docling.datamodel.settings import settings
from docling.document_converter import DocumentConverter, PdfFormatOption
from docling.pipeline.vlm_pipeline import VlmPipeline
from docling.utils.profiling import ProfilingItem
_log = logging.getLogger(__name__)
def main():
logging.getLogger("docling").setLevel(logging.WARNING)
_log.setLevel(logging.INFO)
BATCH_SIZE = 64
settings.perf.page_batch_size = BATCH_SIZE
settings.debug.profile_pipeline_timings = True
data_folder = Path(__file__).parent / "../../tests/data"
# input_doc_path = data_folder / "pdf" / "2305.03393v1.pdf" # 14 pages
input_doc_path = data_folder / "pdf" / "redp5110_sampled.pdf" # 18 pages
vlm_options = vlm_model_specs.GRANITEDOCLING_VLLM_API
vlm_options.concurrency = BATCH_SIZE
pipeline_options = VlmPipelineOptions(
vlm_options=vlm_options,
enable_remote_services=True, # required when using a remote inference service.
)
doc_converter = DocumentConverter(
format_options={
InputFormat.PDF: PdfFormatOption(
pipeline_cls=VlmPipeline,
pipeline_options=pipeline_options,
),
}
)
start_time = time.time()
doc_converter.initialize_pipeline(InputFormat.PDF)
end_time = time.time() - start_time
_log.info(f"Pipeline initialized in {end_time:.2f} seconds.")
now = datetime.datetime.now()
conv_result = doc_converter.convert(input_doc_path)
assert conv_result.status == ConversionStatus.SUCCESS
num_pages = len(conv_result.pages)
pipeline_runtime = conv_result.timings["pipeline_total"].times[0]
_log.info(f"Document converted in {pipeline_runtime:.2f} seconds.")
_log.info(f" [efficiency]: {num_pages / pipeline_runtime:.2f} pages/second.")
for stage in ("page_init", "vlm"):
values = np.array(conv_result.timings[stage].times)
_log.info(
f" [{stage}]: {np.min(values):.2f} / {np.median(values):.2f} / {np.max(values):.2f} seconds/page"
)
TimingsT = TypeAdapter(dict[str, ProfilingItem])
timings_file = Path(f"result-timings-gpu-vlm-{now:%Y-%m-%d_%H-%M-%S}.json")
with timings_file.open("wb") as fp:
r = TimingsT.dump_json(conv_result.timings, indent=2)
fp.write(r)
_log.info(f"Profile details in {timings_file}.")
if __name__ == "__main__":
main()
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docs/examples/suryaocr_with_custom_models.py | docs/examples/suryaocr_with_custom_models.py | # Example: Integrating SuryaOCR with Docling for PDF OCR and Markdown Export
#
# Overview:
# - Configures SuryaOCR options for OCR.
# - Executes PDF pipeline with SuryaOCR integration.
# - Models auto-download from Hugging Face on first run.
#
# Prerequisites:
# - Install: `pip install docling-surya`
# - Ensure `docling` imports successfully.
#
# Execution:
# - Run from repo root: `python docs/examples/suryaocr_with_custom_models.py`
# - Outputs Markdown to stdout.
#
# Notes:
# - Default source: EPA PDF URL; substitute with local path as needed.
# - Models cached in `~/.cache/huggingface`; override with HF_HOME env var.
# - Use proxy config for restricted networks.
# - **Important Licensing Note**: The `docling-surya` package integrates SuryaOCR, which is licensed under the GNU General Public License (GPL).
# Using this integration may impose GPL obligations on your project. Review the license terms carefully.
# Requires `pip install docling-surya`
# See https://pypi.org/project/docling-surya/
from docling_surya import SuryaOcrOptions
from docling.datamodel.base_models import InputFormat
from docling.datamodel.pipeline_options import PdfPipelineOptions
from docling.document_converter import DocumentConverter, PdfFormatOption
def main():
source = "https://19january2021snapshot.epa.gov/sites/static/files/2016-02/documents/epa_sample_letter_sent_to_commissioners_dated_february_29_2015.pdf"
pipeline_options = PdfPipelineOptions(
do_ocr=True,
ocr_model="suryaocr",
allow_external_plugins=True,
ocr_options=SuryaOcrOptions(lang=["en"]),
)
converter = DocumentConverter(
format_options={
InputFormat.PDF: PdfFormatOption(pipeline_options=pipeline_options),
InputFormat.IMAGE: PdfFormatOption(pipeline_options=pipeline_options),
}
)
result = converter.convert(source)
print(result.document.export_to_markdown())
if __name__ == "__main__":
main()
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docs/examples/minimal.py | docs/examples/minimal.py | # %% [markdown]
# What this example does
# - Converts a single source (URL or local file path) to a unified Docling
# document and prints Markdown to stdout.
#
# Requirements
# - Python 3.9+
# - Install Docling: `pip install docling`
#
# How to run
# - Use the default sample URL: `python docs/examples/minimal.py`
# - To use your own file or URL, edit the `source` variable below.
#
# Notes
# - The converter auto-detects supported formats (PDF, DOCX, HTML, PPTX, images, etc.).
# - For batch processing or saving outputs to files, see `docs/examples/batch_convert.py`.
# %%
from docling.document_converter import DocumentConverter
# Change this to a local path or another URL if desired.
# Note: using the default URL requires network access; if offline, provide a
# local file path (e.g., Path("/path/to/file.pdf")).
source = "https://arxiv.org/pdf/2408.09869"
converter = DocumentConverter()
result = converter.convert(source)
# Print Markdown to stdout.
print(result.document.export_to_markdown())
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docs/examples/inspect_picture_content.py | docs/examples/inspect_picture_content.py | # %% [markdown]
# Inspect the contents associated with each picture in a converted document.
#
# What this example does
# - Converts a PDF and iterates over each PictureItem.
# - Prints the caption and the textual items contained within the picture region.
#
# How to run
# - From the repo root: `python docs/examples/inspect_picture_content.py`.
#
# Notes
# - Uncomment `picture.get_image(doc).show()` to visually inspect each picture.
# - Adjust `source` to point to a different PDF if desired.
# %%
from docling_core.types.doc import TextItem
from docling.datamodel.base_models import InputFormat
from docling.datamodel.pipeline_options import PdfPipelineOptions
from docling.document_converter import DocumentConverter, PdfFormatOption
# Change this to a local path if desired
source = "tests/data/pdf/amt_handbook_sample.pdf"
pipeline_options = PdfPipelineOptions()
# Higher scale yields sharper crops when inspecting picture content.
pipeline_options.images_scale = 2
pipeline_options.generate_page_images = True
doc_converter = DocumentConverter(
format_options={InputFormat.PDF: PdfFormatOption(pipeline_options=pipeline_options)}
)
result = doc_converter.convert(source)
doc = result.document
for picture in doc.pictures:
# picture.get_image(doc).show() # display the picture
print(picture.caption_text(doc), " contains these elements:")
for item, level in doc.iterate_items(root=picture, traverse_pictures=True):
if isinstance(item, TextItem):
print(item.text)
print("\n")
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docs/examples/enrich_doclingdocument.py | docs/examples/enrich_doclingdocument.py | # %% [markdown]
# Enrich an existing DoclingDocument JSON with a custom model (post-conversion).
#
# What this example does
# - Loads a previously converted DoclingDocument from JSON (no reconversion).
# - Uses a backend to crop images for items and runs an enrichment model in batches.
# - Prints a few example annotations to stdout.
#
# Prerequisites
# - A DoclingDocument JSON produced by another conversion (path configured below).
# - Install Docling and dependencies for the chosen enrichment model.
# - Ensure the JSON and the referenced PDF match (same document/version), so
# provenance bounding boxes line up for accurate cropping.
#
# How to run
# - From the repo root: `python docs/examples/enrich_doclingdocument.py`.
# - Adjust `input_doc_path` and `input_pdf_path` if your data is elsewhere.
#
# Notes
# - `BATCH_SIZE` controls how many elements are passed to the model at once.
# - `prepare_element()` crops context around elements based on the model's expansion.
# %%
### Load modules
from pathlib import Path
from typing import Iterable, Optional
from docling_core.types.doc import BoundingBox, DocItem, DoclingDocument, NodeItem
from rich.pretty import pprint
from docling.backend.pypdfium2_backend import PyPdfiumDocumentBackend
from docling.datamodel.accelerator_options import AcceleratorOptions
from docling.datamodel.base_models import InputFormat, ItemAndImageEnrichmentElement
from docling.datamodel.document import InputDocument
from docling.models.base_model import BaseItemAndImageEnrichmentModel
from docling.models.document_picture_classifier import (
DocumentPictureClassifier,
DocumentPictureClassifierOptions,
)
from docling.utils.utils import chunkify
### Define batch size used for processing
BATCH_SIZE = 4
# Trade-off: larger batches improve throughput but increase memory usage.
### From DocItem to the model inputs
# The following function is responsible for taking an item and applying the required pre-processing for the model.
# In this case we generate a cropped image from the document backend.
def prepare_element(
doc: DoclingDocument,
backend: PyPdfiumDocumentBackend,
model: BaseItemAndImageEnrichmentModel,
element: NodeItem,
) -> Optional[ItemAndImageEnrichmentElement]:
if not model.is_processable(doc=doc, element=element):
return None
assert isinstance(element, DocItem)
element_prov = element.prov[0]
bbox = element_prov.bbox
width = bbox.r - bbox.l
height = bbox.t - bbox.b
expanded_bbox = BoundingBox(
l=bbox.l - width * model.expansion_factor,
t=bbox.t + height * model.expansion_factor,
r=bbox.r + width * model.expansion_factor,
b=bbox.b - height * model.expansion_factor,
coord_origin=bbox.coord_origin,
)
page_ix = element_prov.page_no - 1
page_backend = backend.load_page(page_no=page_ix)
cropped_image = page_backend.get_page_image(
scale=model.images_scale, cropbox=expanded_bbox
)
return ItemAndImageEnrichmentElement(item=element, image=cropped_image)
### Iterate through the document
# This block defines the `enrich_document()` which is responsible for iterating through the document
# and batch the selected document items for running through the model.
def enrich_document(
doc: DoclingDocument,
backend: PyPdfiumDocumentBackend,
model: BaseItemAndImageEnrichmentModel,
) -> DoclingDocument:
def _prepare_elements(
doc: DoclingDocument,
backend: PyPdfiumDocumentBackend,
model: BaseItemAndImageEnrichmentModel,
) -> Iterable[NodeItem]:
for doc_element, _level in doc.iterate_items():
prepared_element = prepare_element(
doc=doc, backend=backend, model=model, element=doc_element
)
if prepared_element is not None:
yield prepared_element
for element_batch in chunkify(
_prepare_elements(doc, backend, model),
BATCH_SIZE,
):
for element in model(doc=doc, element_batch=element_batch): # Must exhaust!
pass
return doc
### Open and process
# The `main()` function which initializes the document and model objects for calling `enrich_document()`.
def main():
data_folder = Path(__file__).parent / "../../tests/data"
input_pdf_path = data_folder / "pdf/2206.01062.pdf"
input_doc_path = data_folder / "groundtruth/docling_v2/2206.01062.json"
doc = DoclingDocument.load_from_json(input_doc_path)
in_pdf_doc = InputDocument(
input_pdf_path,
format=InputFormat.PDF,
backend=PyPdfiumDocumentBackend,
filename=input_pdf_path.name,
)
backend = in_pdf_doc._backend
model = DocumentPictureClassifier(
enabled=True,
artifacts_path=None,
options=DocumentPictureClassifierOptions(),
accelerator_options=AcceleratorOptions(),
)
doc = enrich_document(doc=doc, backend=backend, model=model)
for pic in doc.pictures[:5]:
print(pic.self_ref)
pprint(pic.annotations)
if __name__ == "__main__":
main()
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docs/examples/rapidocr_with_custom_models.py | docs/examples/rapidocr_with_custom_models.py | # %% [markdown]
# Use RapidOCR with custom ONNX models to OCR a PDF page and print Markdown.
#
# What this example does
# - Downloads RapidOCR models from Hugging Face via ModelScope.
# - Configures `RapidOcrOptions` with explicit det/rec/cls model paths.
# - Runs the PDF pipeline with RapidOCR and prints Markdown output.
#
# Prerequisites
# - Install Docling, `modelscope`, and have network access to download models.
# - Ensure your environment can import `docling` and `modelscope`.
#
# How to run
# - From the repo root: `python docs/examples/rapidocr_with_custom_models.py`.
# - The script prints the recognized text as Markdown to stdout.
#
# Notes
# - The default `source` points to an arXiv PDF URL; replace with a local path if desired.
# - Model paths are derived from the downloaded snapshot directory.
# - ModelScope caches downloads (typically under `~/.cache/modelscope`); set a proxy
# or pre-download models if running in a restricted network environment.
# %%
import os
from modelscope import snapshot_download
from docling.datamodel.base_models import InputFormat
from docling.datamodel.document import ConversionResult
from docling.datamodel.pipeline_options import PdfPipelineOptions, RapidOcrOptions
from docling.document_converter import DocumentConverter, PdfFormatOption
def main():
# Source document to convert
source = "https://arxiv.org/pdf/2408.09869v4"
# Download RapidOCR models from Hugging Face
print("Downloading RapidOCR models")
download_path = snapshot_download(repo_id="RapidAI/RapidOCR")
# Setup RapidOcrOptions for English detection
det_model_path = os.path.join(
download_path, "onnx", "PP-OCRv5", "det", "ch_PP-OCRv5_server_det.onnx"
)
rec_model_path = os.path.join(
download_path, "onnx", "PP-OCRv5", "rec", "ch_PP-OCRv5_rec_server_infer.onnx"
)
cls_model_path = os.path.join(
download_path, "onnx", "PP-OCRv4", "cls", "ch_ppocr_mobile_v2.0_cls_infer.onnx"
)
ocr_options = RapidOcrOptions(
det_model_path=det_model_path,
rec_model_path=rec_model_path,
cls_model_path=cls_model_path,
)
pipeline_options = PdfPipelineOptions(
ocr_options=ocr_options,
)
# Convert the document
converter = DocumentConverter(
format_options={
InputFormat.PDF: PdfFormatOption(
pipeline_options=pipeline_options,
),
},
)
conversion_result: ConversionResult = converter.convert(source=source)
doc = conversion_result.document
md = doc.export_to_markdown()
print(md)
if __name__ == "__main__":
main()
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docs/examples/custom_convert.py | docs/examples/custom_convert.py | # %% [markdown]
# Customize PDF conversion by toggling OCR/backends and pipeline options.
#
# What this example does
# - Shows several alternative configurations for the Docling PDF pipeline.
# - Lets you try OCR engines (EasyOCR, Tesseract, system OCR) or no OCR.
# - Converts a single sample PDF and exports results to `scratch/`.
#
# Prerequisites
# - Install Docling and its optional OCR backends per the docs.
# - Ensure you can import `docling` from your Python environment.
#
# How to run
# - From the repository root, run: `python docs/examples/custom_convert.py`.
# - Outputs are written under `scratch/` next to where you run the script.
#
# Choosing a configuration
# - Only one configuration block should be active at a time.
# - Uncomment exactly one of the sections below to experiment.
# - The file ships with "Docling Parse with EasyOCR" enabled as a sensible default.
# - If you uncomment a backend or OCR option that is not imported above, also
# import its class, e.g.:
# - `from docling.backend.pypdfium2_backend import PyPdfiumDocumentBackend`
# - `from docling.datamodel.pipeline_options import TesseractOcrOptions, TesseractCliOcrOptions, OcrMacOptions`
#
# Input document
# - Defaults to a single PDF from `tests/data/pdf/` in the repo.
# - If you don't have the test data, update `input_doc_path` to a local PDF.
#
# Notes
# - EasyOCR language: adjust `pipeline_options.ocr_options.lang` (e.g., ["en"], ["es"], ["en", "de"]).
# - Accelerators: tune `AcceleratorOptions` to select CPU/GPU or threads.
# - Exports: JSON, plain text, Markdown, and doctags are saved in `scratch/`.
# %%
import json
import logging
import time
from pathlib import Path
from docling.datamodel.accelerator_options import AcceleratorDevice, AcceleratorOptions
from docling.datamodel.base_models import InputFormat
from docling.datamodel.pipeline_options import (
PdfPipelineOptions,
)
from docling.document_converter import DocumentConverter, PdfFormatOption
_log = logging.getLogger(__name__)
def main():
logging.basicConfig(level=logging.INFO)
data_folder = Path(__file__).parent / "../../tests/data"
input_doc_path = data_folder / "pdf/2206.01062.pdf"
###########################################################################
# The sections below demo combinations of PdfPipelineOptions and backends.
# Tip: Uncomment exactly one section at a time to compare outputs.
# PyPdfium without EasyOCR
# --------------------
# pipeline_options = PdfPipelineOptions()
# pipeline_options.do_ocr = False
# pipeline_options.do_table_structure = True
# pipeline_options.table_structure_options.do_cell_matching = False
# doc_converter = DocumentConverter(
# format_options={
# InputFormat.PDF: PdfFormatOption(
# pipeline_options=pipeline_options, backend=PyPdfiumDocumentBackend
# )
# }
# )
# PyPdfium with EasyOCR
# -----------------
# pipeline_options = PdfPipelineOptions()
# pipeline_options.do_ocr = True
# pipeline_options.do_table_structure = True
# pipeline_options.table_structure_options.do_cell_matching = True
# doc_converter = DocumentConverter(
# format_options={
# InputFormat.PDF: PdfFormatOption(
# pipeline_options=pipeline_options, backend=PyPdfiumDocumentBackend
# )
# }
# )
# Docling Parse without EasyOCR
# -------------------------
# pipeline_options = PdfPipelineOptions()
# pipeline_options.do_ocr = False
# pipeline_options.do_table_structure = True
# pipeline_options.table_structure_options.do_cell_matching = True
# doc_converter = DocumentConverter(
# format_options={
# InputFormat.PDF: PdfFormatOption(pipeline_options=pipeline_options)
# }
# )
# Docling Parse with EasyOCR (default)
# -------------------------------
# Enables OCR and table structure with EasyOCR, using automatic device
# selection via AcceleratorOptions. Adjust languages as needed.
pipeline_options = PdfPipelineOptions()
pipeline_options.do_ocr = True
pipeline_options.do_table_structure = True
pipeline_options.table_structure_options.do_cell_matching = True
pipeline_options.ocr_options.lang = ["es"]
pipeline_options.accelerator_options = AcceleratorOptions(
num_threads=4, device=AcceleratorDevice.AUTO
)
doc_converter = DocumentConverter(
format_options={
InputFormat.PDF: PdfFormatOption(pipeline_options=pipeline_options)
}
)
# Docling Parse with EasyOCR (CPU only)
# -------------------------------------
# pipeline_options = PdfPipelineOptions()
# pipeline_options.do_ocr = True
# pipeline_options.ocr_options.use_gpu = False # <-- set this.
# pipeline_options.do_table_structure = True
# pipeline_options.table_structure_options.do_cell_matching = True
# doc_converter = DocumentConverter(
# format_options={
# InputFormat.PDF: PdfFormatOption(pipeline_options=pipeline_options)
# }
# )
# Docling Parse with Tesseract
# ----------------------------
# pipeline_options = PdfPipelineOptions()
# pipeline_options.do_ocr = True
# pipeline_options.do_table_structure = True
# pipeline_options.table_structure_options.do_cell_matching = True
# pipeline_options.ocr_options = TesseractOcrOptions()
# doc_converter = DocumentConverter(
# format_options={
# InputFormat.PDF: PdfFormatOption(pipeline_options=pipeline_options)
# }
# )
# Docling Parse with Tesseract CLI
# --------------------------------
# pipeline_options = PdfPipelineOptions()
# pipeline_options.do_ocr = True
# pipeline_options.do_table_structure = True
# pipeline_options.table_structure_options.do_cell_matching = True
# pipeline_options.ocr_options = TesseractCliOcrOptions()
# doc_converter = DocumentConverter(
# format_options={
# InputFormat.PDF: PdfFormatOption(pipeline_options=pipeline_options)
# }
# )
# Docling Parse with ocrmac (macOS only)
# --------------------------------------
# pipeline_options = PdfPipelineOptions()
# pipeline_options.do_ocr = True
# pipeline_options.do_table_structure = True
# pipeline_options.table_structure_options.do_cell_matching = True
# pipeline_options.ocr_options = OcrMacOptions()
# doc_converter = DocumentConverter(
# format_options={
# InputFormat.PDF: PdfFormatOption(pipeline_options=pipeline_options)
# }
# )
###########################################################################
start_time = time.time()
conv_result = doc_converter.convert(input_doc_path)
end_time = time.time() - start_time
_log.info(f"Document converted in {end_time:.2f} seconds.")
## Export results
output_dir = Path("scratch")
output_dir.mkdir(parents=True, exist_ok=True)
doc_filename = conv_result.input.file.stem
# Export Docling document JSON format:
with (output_dir / f"{doc_filename}.json").open("w", encoding="utf-8") as fp:
fp.write(json.dumps(conv_result.document.export_to_dict()))
# Export Text format (plain text via Markdown export):
with (output_dir / f"{doc_filename}.txt").open("w", encoding="utf-8") as fp:
fp.write(conv_result.document.export_to_markdown(strict_text=True))
# Export Markdown format:
with (output_dir / f"{doc_filename}.md").open("w", encoding="utf-8") as fp:
fp.write(conv_result.document.export_to_markdown())
# Export Document Tags format:
with (output_dir / f"{doc_filename}.doctags").open("w", encoding="utf-8") as fp:
fp.write(conv_result.document.export_to_doctags())
if __name__ == "__main__":
main()
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docs/examples/export_multimodal.py | docs/examples/export_multimodal.py | # %% [markdown]
# Export multimodal page data (image bytes, text, segments) to a Parquet file.
#
# What this example does
# - Converts a PDF and assembles per-page multimodal records: image, cells, text, segments.
# - Normalizes records to a pandas DataFrame and writes a timestamped `.parquet` in `scratch/`.
#
# Prerequisites
# - Install Docling and `pandas`. Optional: `datasets` and `Pillow` for the commented demo.
#
# How to run
# - From the repo root: `python docs/examples/export_multimodal.py`.
# - Output parquet is written to `scratch/`.
#
# Key options
# - `IMAGE_RESOLUTION_SCALE`: page rendering scale (1 ~ 72 DPI).
# - `PdfPipelineOptions.generate_page_images`: keep page images for export.
#
# Requirements
# - Writing Parquet requires an engine such as `pyarrow` or `fastparquet`
# (`pip install pyarrow` is the most common choice).
#
# Input document
# - Defaults to `tests/data/pdf/2206.01062.pdf`. Change `input_doc_path` as needed.
#
# Notes
# - The commented block at the bottom shows how to load the Parquet with HF Datasets
# and reconstruct images from raw bytes.
# %%
import datetime
import logging
import time
from pathlib import Path
import pandas as pd
from docling.datamodel.base_models import InputFormat
from docling.datamodel.pipeline_options import PdfPipelineOptions
from docling.document_converter import DocumentConverter, PdfFormatOption
from docling.utils.export import generate_multimodal_pages
from docling.utils.utils import create_hash
_log = logging.getLogger(__name__)
IMAGE_RESOLUTION_SCALE = 2.0
def main():
logging.basicConfig(level=logging.INFO)
data_folder = Path(__file__).parent / "../../tests/data"
input_doc_path = data_folder / "pdf/2206.01062.pdf"
output_dir = Path("scratch")
# Keep page images so they can be exported to the multimodal rows.
# Use PdfPipelineOptions.images_scale to control the render scale (1 ~ 72 DPI).
pipeline_options = PdfPipelineOptions()
pipeline_options.images_scale = IMAGE_RESOLUTION_SCALE
pipeline_options.generate_page_images = True
doc_converter = DocumentConverter(
format_options={
InputFormat.PDF: PdfFormatOption(pipeline_options=pipeline_options)
}
)
start_time = time.time()
conv_res = doc_converter.convert(input_doc_path)
output_dir.mkdir(parents=True, exist_ok=True)
rows = []
for (
content_text,
content_md,
content_dt,
page_cells,
page_segments,
page,
) in generate_multimodal_pages(conv_res):
dpi = page._default_image_scale * 72
rows.append(
{
"document": conv_res.input.file.name,
"hash": conv_res.input.document_hash,
"page_hash": create_hash(
conv_res.input.document_hash + ":" + str(page.page_no - 1)
),
"image": {
"width": page.image.width,
"height": page.image.height,
"bytes": page.image.tobytes(),
},
"cells": page_cells,
"contents": content_text,
"contents_md": content_md,
"contents_dt": content_dt,
"segments": page_segments,
"extra": {
"page_num": page.page_no + 1,
"width_in_points": page.size.width,
"height_in_points": page.size.height,
"dpi": dpi,
},
}
)
# Generate one parquet from all documents
df_result = pd.json_normalize(rows)
now = datetime.datetime.now()
output_filename = output_dir / f"multimodal_{now:%Y-%m-%d_%H%M%S}.parquet"
df_result.to_parquet(output_filename)
end_time = time.time() - start_time
_log.info(
f"Document converted and multimodal pages generated in {end_time:.2f} seconds."
)
# This block demonstrates how the file can be opened with the HF datasets library
# from datasets import Dataset
# from PIL import Image
# multimodal_df = pd.read_parquet(output_filename)
# # Convert pandas DataFrame to Hugging Face Dataset and load bytes into image
# dataset = Dataset.from_pandas(multimodal_df)
# def transforms(examples):
# examples["image"] = Image.frombytes('RGB', (examples["image.width"], examples["image.height"]), examples["image.bytes"], 'raw')
# return examples
# dataset = dataset.map(transforms)
if __name__ == "__main__":
main()
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docs/examples/full_page_ocr.py | docs/examples/full_page_ocr.py | # %% [markdown]
# Force full-page OCR on a PDF using different OCR backends.
#
# What this example does
# - Enables full-page OCR and table structure extraction for a sample PDF.
# - Demonstrates how to switch between OCR backends via `ocr_options`.
#
# Prerequisites
# - Install Docling and the desired OCR backend's dependencies (Tesseract, EasyOCR,
# RapidOCR, or macOS OCR).
#
# How to run
# - From the repo root: `python docs/examples/full_page_ocr.py`.
# - The script prints Markdown text to stdout.
#
# Choosing an OCR backend
# - Uncomment one `ocr_options = ...` line below. Exactly one should be active.
# - `force_full_page_ocr=True` processes each page purely via OCR (often slower
# than hybrid detection). Use when layout extraction is unreliable or the PDF
# contains scanned pages.
# - If you switch OCR backends, ensure the corresponding option class is imported,
# e.g., `EasyOcrOptions`, `TesseractOcrOptions`, `OcrMacOptions`, `RapidOcrOptions`.
#
# Input document
# - Defaults to `tests/data/pdf/2206.01062.pdf`. Change `input_doc_path` as needed.
# %%
from pathlib import Path
from docling.datamodel.base_models import InputFormat
from docling.datamodel.pipeline_options import (
PdfPipelineOptions,
TesseractCliOcrOptions,
)
from docling.document_converter import DocumentConverter, PdfFormatOption
def main():
data_folder = Path(__file__).parent / "../../tests/data"
input_doc_path = data_folder / "pdf/2206.01062.pdf"
pipeline_options = PdfPipelineOptions()
pipeline_options.do_ocr = True
pipeline_options.do_table_structure = True
pipeline_options.table_structure_options.do_cell_matching = True
# Any of the OCR options can be used: EasyOcrOptions, TesseractOcrOptions,
# TesseractCliOcrOptions, OcrMacOptions (macOS only), RapidOcrOptions
# ocr_options = EasyOcrOptions(force_full_page_ocr=True)
# ocr_options = TesseractOcrOptions(force_full_page_ocr=True)
# ocr_options = OcrMacOptions(force_full_page_ocr=True)
# ocr_options = RapidOcrOptions(force_full_page_ocr=True)
ocr_options = TesseractCliOcrOptions(force_full_page_ocr=True)
pipeline_options.ocr_options = ocr_options
converter = DocumentConverter(
format_options={
InputFormat.PDF: PdfFormatOption(
pipeline_options=pipeline_options,
)
}
)
doc = converter.convert(input_doc_path).document
md = doc.export_to_markdown()
print(md)
if __name__ == "__main__":
main()
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docs/examples/minimal_asr_pipeline.py | docs/examples/minimal_asr_pipeline.py | # %% [markdown]
# Minimal ASR pipeline example: transcribe an audio file to Markdown text.
#
# What this example does
# - Configures the ASR pipeline with a default model spec and converts one audio file.
# - Prints the recognized speech segments in Markdown with timestamps.
#
# Prerequisites
# - Install Docling with ASR extras and any audio dependencies (ffmpeg, etc.).
# - Ensure your environment can download or access the configured ASR model.
# - Some formats require ffmpeg codecs; install ffmpeg and ensure it's on PATH.
#
# How to run
# - From the repository root, run: `python docs/examples/minimal_asr_pipeline.py`.
# - The script prints the transcription to stdout.
#
# Customizing the model
# - The script automatically selects the best model for your hardware (MLX Whisper for Apple Silicon, native Whisper otherwise).
# - Edit `get_asr_converter()` to manually override `pipeline_options.asr_options` with any model from `asr_model_specs`.
# - Keep `InputFormat.AUDIO` and `AsrPipeline` unchanged for a minimal setup.
#
# Input audio
# - Defaults to `tests/data/audio/sample_10s.mp3`. Update `audio_path` to your own file if needed.
# %%
from pathlib import Path
from docling_core.types.doc import DoclingDocument
from docling.datamodel import asr_model_specs
from docling.datamodel.base_models import ConversionStatus, InputFormat
from docling.datamodel.document import ConversionResult
from docling.datamodel.pipeline_options import AsrPipelineOptions
from docling.document_converter import AudioFormatOption, DocumentConverter
from docling.pipeline.asr_pipeline import AsrPipeline
def get_asr_converter():
"""Create a DocumentConverter configured for ASR with automatic model selection.
Uses `asr_model_specs.WHISPER_TURBO` which automatically selects the best
implementation for your hardware:
- MLX Whisper Turbo for Apple Silicon (M1/M2/M3) with mlx-whisper installed
- Native Whisper Turbo as fallback
You can swap in another model spec from `docling.datamodel.asr_model_specs`
to experiment with different model sizes.
"""
pipeline_options = AsrPipelineOptions()
pipeline_options.asr_options = asr_model_specs.WHISPER_TURBO
converter = DocumentConverter(
format_options={
InputFormat.AUDIO: AudioFormatOption(
pipeline_cls=AsrPipeline,
pipeline_options=pipeline_options,
)
}
)
return converter
def asr_pipeline_conversion(audio_path: Path) -> DoclingDocument:
"""Run the ASR pipeline and return a `DoclingDocument` transcript."""
# Check if the test audio file exists
assert audio_path.exists(), f"Test audio file not found: {audio_path}"
converter = get_asr_converter()
# Convert the audio file
result: ConversionResult = converter.convert(audio_path)
# Verify conversion was successful
assert result.status == ConversionStatus.SUCCESS, (
f"Conversion failed with status: {result.status}"
)
return result.document
if __name__ == "__main__":
audio_path = Path("tests/data/audio/sample_10s.mp3")
doc = asr_pipeline_conversion(audio_path=audio_path)
print(doc.export_to_markdown())
# Expected output:
#
# [time: 0.0-4.0] Shakespeare on Scenery by Oscar Wilde
#
# [time: 5.28-9.96] This is a LibriVox recording. All LibriVox recordings are in the public domain.
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docs/examples/experimental/process_table_crops.py | docs/examples/experimental/process_table_crops.py | """Run Docling on an image using the experimental TableCrops layout model."""
from __future__ import annotations
from pathlib import Path
import docling
from docling.datamodel.document import InputFormat
from docling.datamodel.pipeline_options import ThreadedPdfPipelineOptions
from docling.document_converter import DocumentConverter, ImageFormatOption
from docling.experimental.datamodel.table_crops_layout_options import (
TableCropsLayoutOptions,
)
from docling.experimental.models.table_crops_layout_model import TableCropsLayoutModel
from docling.models.factories import get_layout_factory
def main() -> None:
sample_image = "tests/data/2305.03393v1-table_crop.png"
pipeline_options = ThreadedPdfPipelineOptions(
layout_options=TableCropsLayoutOptions(),
do_table_structure=True,
generate_page_images=True,
)
converter = DocumentConverter(
allowed_formats=[InputFormat.IMAGE],
format_options={
InputFormat.IMAGE: ImageFormatOption(pipeline_options=pipeline_options)
},
)
conv_res = converter.convert(sample_image)
print(conv_res.document.tables[0].export_to_markdown())
if __name__ == "__main__":
main()
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/exceptions.py | docling/exceptions.py | class BaseError(RuntimeError):
pass
class ConversionError(BaseError):
pass
class OperationNotAllowed(BaseError):
pass
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/document_extractor.py | docling/document_extractor.py | import hashlib
import logging
import sys
import threading
import time
import warnings
from collections.abc import Iterable, Iterator
from concurrent.futures import ThreadPoolExecutor
from functools import partial
from pathlib import Path
from typing import Optional, Type, Union
from pydantic import ConfigDict, model_validator, validate_call
from typing_extensions import Self
from docling.backend.abstract_backend import AbstractDocumentBackend
from docling.backend.image_backend import ImageDocumentBackend
from docling.backend.pypdfium2_backend import PyPdfiumDocumentBackend
from docling.datamodel.base_models import (
BaseFormatOption,
ConversionStatus,
DoclingComponentType,
DocumentStream,
ErrorItem,
InputFormat,
)
from docling.datamodel.document import (
InputDocument,
_DocumentConversionInput, # intentionally reused builder
)
from docling.datamodel.extraction import ExtractionResult, ExtractionTemplateType
from docling.datamodel.pipeline_options import PipelineOptions
from docling.datamodel.settings import (
DEFAULT_PAGE_RANGE,
DocumentLimits,
PageRange,
settings,
)
from docling.exceptions import ConversionError
from docling.pipeline.base_extraction_pipeline import BaseExtractionPipeline
from docling.pipeline.extraction_vlm_pipeline import ExtractionVlmPipeline
from docling.utils.utils import chunkify
_log = logging.getLogger(__name__)
_PIPELINE_CACHE_LOCK = threading.Lock()
class ExtractionFormatOption(BaseFormatOption):
"""Per-format configuration for extraction.
Notes:
- `pipeline_cls` must subclass `BaseExtractionPipeline`.
- `pipeline_options` is typed as `PipelineOptions` which MUST inherit from
`BaseOptions` (as used by `BaseExtractionPipeline`).
- `backend` is the document-opening backend used by `_DocumentConversionInput`.
"""
pipeline_cls: Type[BaseExtractionPipeline]
@model_validator(mode="after")
def set_optional_field_default(self) -> Self:
if self.pipeline_options is None:
# `get_default_options` comes from BaseExtractionPipeline
self.pipeline_options = self.pipeline_cls.get_default_options() # type: ignore[assignment]
return self
def _get_default_extraction_option(fmt: InputFormat) -> ExtractionFormatOption:
"""Return the default extraction option for a given input format.
Defaults mirror the converter's *backend* choices, while the pipeline is
the VLM extractor. This duplication will be removed when we deduplicate
the format registry between convert/extract.
"""
format_to_default_backend: dict[InputFormat, Type[AbstractDocumentBackend]] = {
InputFormat.IMAGE: ImageDocumentBackend,
InputFormat.PDF: PyPdfiumDocumentBackend,
}
backend = format_to_default_backend.get(fmt)
if backend is None:
raise RuntimeError(f"No default extraction backend configured for {fmt}")
return ExtractionFormatOption(
pipeline_cls=ExtractionVlmPipeline,
backend=backend,
)
class DocumentExtractor:
"""Standalone extractor class.
Public API:
- `extract(...) -> ExtractionResult`
- `extract_all(...) -> Iterator[ExtractionResult]`
Implementation intentionally reuses `_DocumentConversionInput` to build
`InputDocument` with the correct backend per format.
"""
def __init__(
self,
allowed_formats: Optional[list[InputFormat]] = None,
extraction_format_options: Optional[
dict[InputFormat, ExtractionFormatOption]
] = None,
) -> None:
self.allowed_formats: list[InputFormat] = (
allowed_formats if allowed_formats is not None else list(InputFormat)
)
# Build per-format options with defaults, then apply any user overrides
overrides = extraction_format_options or {}
self.extraction_format_to_options: dict[InputFormat, ExtractionFormatOption] = {
fmt: overrides.get(fmt, _get_default_extraction_option(fmt))
for fmt in self.allowed_formats
}
# Cache pipelines by (class, options-hash)
self._initialized_pipelines: dict[
tuple[Type[BaseExtractionPipeline], str], BaseExtractionPipeline
] = {}
# ---------------------------- Public API ---------------------------------
@validate_call(config=ConfigDict(strict=True))
def extract(
self,
source: Union[Path, str, DocumentStream],
template: ExtractionTemplateType,
headers: Optional[dict[str, str]] = None,
raises_on_error: bool = True,
max_num_pages: int = sys.maxsize,
max_file_size: int = sys.maxsize,
page_range: PageRange = DEFAULT_PAGE_RANGE,
) -> ExtractionResult:
all_res = self.extract_all(
source=[source],
headers=headers,
raises_on_error=raises_on_error,
max_num_pages=max_num_pages,
max_file_size=max_file_size,
page_range=page_range,
template=template,
)
return next(all_res)
@validate_call(config=ConfigDict(strict=True))
def extract_all(
self,
source: Iterable[Union[Path, str, DocumentStream]],
template: ExtractionTemplateType,
headers: Optional[dict[str, str]] = None,
raises_on_error: bool = True,
max_num_pages: int = sys.maxsize,
max_file_size: int = sys.maxsize,
page_range: PageRange = DEFAULT_PAGE_RANGE,
) -> Iterator[ExtractionResult]:
warnings.warn(
"The extract API is currently experimental and may change without prior notice.\n"
"Only PDF and image formats are supported.",
UserWarning,
stacklevel=2,
)
limits = DocumentLimits(
max_num_pages=max_num_pages,
max_file_size=max_file_size,
page_range=page_range,
)
conv_input = _DocumentConversionInput(
path_or_stream_iterator=source, limits=limits, headers=headers
)
ext_res_iter = self._extract(
conv_input, raises_on_error=raises_on_error, template=template
)
had_result = False
for ext_res in ext_res_iter:
had_result = True
if raises_on_error and ext_res.status not in {
ConversionStatus.SUCCESS,
ConversionStatus.PARTIAL_SUCCESS,
}:
raise ConversionError(
f"Extraction failed for: {ext_res.input.file} with status: {ext_res.status}"
)
else:
yield ext_res
if not had_result and raises_on_error:
raise ConversionError(
"Extraction failed because the provided file has no recognizable format or it wasn't in the list of allowed formats."
)
# --------------------------- Internal engine ------------------------------
def _extract(
self,
conv_input: _DocumentConversionInput,
raises_on_error: bool,
template: ExtractionTemplateType,
) -> Iterator[ExtractionResult]:
start_time = time.monotonic()
for input_batch in chunkify(
conv_input.docs(self.extraction_format_to_options),
settings.perf.doc_batch_size,
):
_log.info("Going to extract document batch...")
process_func = partial(
self._process_document_extraction,
raises_on_error=raises_on_error,
template=template,
)
if (
settings.perf.doc_batch_concurrency > 1
and settings.perf.doc_batch_size > 1
):
with ThreadPoolExecutor(
max_workers=settings.perf.doc_batch_concurrency
) as pool:
for item in pool.map(
process_func,
input_batch,
):
yield item
else:
for item in map(
process_func,
input_batch,
):
elapsed = time.monotonic() - start_time
start_time = time.monotonic()
_log.info(
f"Finished extracting document {item.input.file.name} in {elapsed:.2f} sec."
)
yield item
def _process_document_extraction(
self,
in_doc: InputDocument,
raises_on_error: bool,
template: ExtractionTemplateType,
) -> ExtractionResult:
valid = (
self.allowed_formats is not None and in_doc.format in self.allowed_formats
)
if valid:
return self._execute_extraction_pipeline(
in_doc, raises_on_error=raises_on_error, template=template
)
else:
error_message = f"File format not allowed: {in_doc.file}"
if raises_on_error:
raise ConversionError(error_message)
else:
error_item = ErrorItem(
component_type=DoclingComponentType.USER_INPUT,
module_name="",
error_message=error_message,
)
return ExtractionResult(
input=in_doc, status=ConversionStatus.SKIPPED, errors=[error_item]
)
def _execute_extraction_pipeline(
self,
in_doc: InputDocument,
raises_on_error: bool,
template: ExtractionTemplateType,
) -> ExtractionResult:
if not in_doc.valid:
if raises_on_error:
raise ConversionError(f"Input document {in_doc.file} is not valid.")
else:
return ExtractionResult(input=in_doc, status=ConversionStatus.FAILURE)
pipeline = self._get_pipeline(in_doc.format)
if pipeline is None:
if raises_on_error:
raise ConversionError(
f"No extraction pipeline could be initialized for {in_doc.file}."
)
else:
return ExtractionResult(input=in_doc, status=ConversionStatus.FAILURE)
return pipeline.execute(
in_doc, raises_on_error=raises_on_error, template=template
)
def _get_pipeline(
self, doc_format: InputFormat
) -> Optional[BaseExtractionPipeline]:
"""Retrieve or initialize a pipeline, reusing instances based on class and options."""
fopt = self.extraction_format_to_options.get(doc_format)
if fopt is None or fopt.pipeline_options is None:
return None
pipeline_class = fopt.pipeline_cls
pipeline_options = fopt.pipeline_options
options_hash = self._get_pipeline_options_hash(pipeline_options)
cache_key = (pipeline_class, options_hash)
with _PIPELINE_CACHE_LOCK:
if cache_key not in self._initialized_pipelines:
_log.info(
f"Initializing extraction pipeline for {pipeline_class.__name__} with options hash {options_hash}"
)
self._initialized_pipelines[cache_key] = pipeline_class(
pipeline_options=pipeline_options # type: ignore[arg-type]
)
else:
_log.debug(
f"Reusing cached extraction pipeline for {pipeline_class.__name__} with options hash {options_hash}"
)
return self._initialized_pipelines[cache_key]
@staticmethod
def _get_pipeline_options_hash(pipeline_options: PipelineOptions) -> str:
"""Generate a stable hash of pipeline options to use as part of the cache key."""
options_str = str(pipeline_options.model_dump())
return hashlib.md5(
options_str.encode("utf-8"), usedforsecurity=False
).hexdigest()
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/__init__.py | docling/__init__.py | python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false | |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/document_converter.py | docling/document_converter.py | import hashlib
import logging
import sys
import threading
import time
import warnings
from collections.abc import Iterable, Iterator
from concurrent.futures import ThreadPoolExecutor
from datetime import datetime
from functools import partial
from io import BytesIO
from pathlib import Path
from typing import Optional, Type, Union
from pydantic import ConfigDict, model_validator, validate_call
from typing_extensions import Self
from docling.backend.abstract_backend import (
AbstractDocumentBackend,
)
from docling.backend.asciidoc_backend import AsciiDocBackend
from docling.backend.csv_backend import CsvDocumentBackend
from docling.backend.docling_parse_v4_backend import DoclingParseV4DocumentBackend
from docling.backend.html_backend import HTMLDocumentBackend
from docling.backend.image_backend import ImageDocumentBackend
from docling.backend.json.docling_json_backend import DoclingJSONBackend
from docling.backend.md_backend import MarkdownDocumentBackend
from docling.backend.mets_gbs_backend import MetsGbsDocumentBackend
from docling.backend.msexcel_backend import MsExcelDocumentBackend
from docling.backend.mspowerpoint_backend import MsPowerpointDocumentBackend
from docling.backend.msword_backend import MsWordDocumentBackend
from docling.backend.noop_backend import NoOpBackend
from docling.backend.webvtt_backend import WebVTTDocumentBackend
from docling.backend.xml.jats_backend import JatsDocumentBackend
from docling.backend.xml.uspto_backend import PatentUsptoDocumentBackend
from docling.datamodel.backend_options import (
BackendOptions,
HTMLBackendOptions,
MarkdownBackendOptions,
PdfBackendOptions,
)
from docling.datamodel.base_models import (
BaseFormatOption,
ConversionStatus,
DoclingComponentType,
DocumentStream,
ErrorItem,
InputFormat,
)
from docling.datamodel.document import (
ConversionResult,
InputDocument,
_DocumentConversionInput,
)
from docling.datamodel.pipeline_options import PipelineOptions
from docling.datamodel.settings import (
DEFAULT_PAGE_RANGE,
DocumentLimits,
PageRange,
settings,
)
from docling.exceptions import ConversionError
from docling.pipeline.asr_pipeline import AsrPipeline
from docling.pipeline.base_pipeline import BasePipeline
from docling.pipeline.simple_pipeline import SimplePipeline
from docling.pipeline.standard_pdf_pipeline import StandardPdfPipeline
from docling.utils.utils import chunkify
_log = logging.getLogger(__name__)
_PIPELINE_CACHE_LOCK = threading.Lock()
class FormatOption(BaseFormatOption):
pipeline_cls: Type[BasePipeline]
backend_options: Optional[BackendOptions] = None
@model_validator(mode="after")
def set_optional_field_default(self) -> Self:
if self.pipeline_options is None:
self.pipeline_options = self.pipeline_cls.get_default_options()
return self
class CsvFormatOption(FormatOption):
pipeline_cls: Type = SimplePipeline
backend: Type[AbstractDocumentBackend] = CsvDocumentBackend
class ExcelFormatOption(FormatOption):
pipeline_cls: Type = SimplePipeline
backend: Type[AbstractDocumentBackend] = MsExcelDocumentBackend
class WordFormatOption(FormatOption):
pipeline_cls: Type = SimplePipeline
backend: Type[AbstractDocumentBackend] = MsWordDocumentBackend
class PowerpointFormatOption(FormatOption):
pipeline_cls: Type = SimplePipeline
backend: Type[AbstractDocumentBackend] = MsPowerpointDocumentBackend
class MarkdownFormatOption(FormatOption):
pipeline_cls: Type = SimplePipeline
backend: Type[AbstractDocumentBackend] = MarkdownDocumentBackend
backend_options: Optional[MarkdownBackendOptions] = None
class AsciiDocFormatOption(FormatOption):
pipeline_cls: Type = SimplePipeline
backend: Type[AbstractDocumentBackend] = AsciiDocBackend
class HTMLFormatOption(FormatOption):
pipeline_cls: Type = SimplePipeline
backend: Type[AbstractDocumentBackend] = HTMLDocumentBackend
backend_options: Optional[HTMLBackendOptions] = None
class PatentUsptoFormatOption(FormatOption):
pipeline_cls: Type = SimplePipeline
backend: Type[PatentUsptoDocumentBackend] = PatentUsptoDocumentBackend
class XMLJatsFormatOption(FormatOption):
pipeline_cls: Type = SimplePipeline
backend: Type[AbstractDocumentBackend] = JatsDocumentBackend
class ImageFormatOption(FormatOption):
pipeline_cls: Type = StandardPdfPipeline
backend: Type[AbstractDocumentBackend] = ImageDocumentBackend
class PdfFormatOption(FormatOption):
pipeline_cls: Type = StandardPdfPipeline
backend: Type[AbstractDocumentBackend] = DoclingParseV4DocumentBackend
backend_options: Optional[PdfBackendOptions] = None
class AudioFormatOption(FormatOption):
pipeline_cls: Type = AsrPipeline
backend: Type[AbstractDocumentBackend] = NoOpBackend
def _get_default_option(format: InputFormat) -> FormatOption:
format_to_default_options = {
InputFormat.CSV: CsvFormatOption(),
InputFormat.XLSX: ExcelFormatOption(),
InputFormat.DOCX: WordFormatOption(),
InputFormat.PPTX: PowerpointFormatOption(),
InputFormat.MD: MarkdownFormatOption(),
InputFormat.ASCIIDOC: AsciiDocFormatOption(),
InputFormat.HTML: HTMLFormatOption(),
InputFormat.XML_USPTO: PatentUsptoFormatOption(),
InputFormat.XML_JATS: XMLJatsFormatOption(),
InputFormat.METS_GBS: FormatOption(
pipeline_cls=StandardPdfPipeline, backend=MetsGbsDocumentBackend
),
InputFormat.IMAGE: ImageFormatOption(),
InputFormat.PDF: PdfFormatOption(),
InputFormat.JSON_DOCLING: FormatOption(
pipeline_cls=SimplePipeline, backend=DoclingJSONBackend
),
InputFormat.AUDIO: AudioFormatOption(),
InputFormat.VTT: FormatOption(
pipeline_cls=SimplePipeline, backend=WebVTTDocumentBackend
),
}
if (options := format_to_default_options.get(format)) is not None:
return options
else:
raise RuntimeError(f"No default options configured for {format}")
class DocumentConverter:
"""Convert documents of various input formats to Docling documents.
`DocumentConverter` is the main entry point for converting documents in Docling.
It handles various input formats (PDF, DOCX, PPTX, images, HTML, Markdown, etc.)
and provides both single-document and batch conversion capabilities.
The conversion methods return a `ConversionResult` instance for each document,
which wraps a `DoclingDocument` object if the conversion was successful, along
with metadata about the conversion process.
Attributes:
allowed_formats: Allowed input formats.
format_to_options: Mapping of formats to their options.
initialized_pipelines: Cache of initialized pipelines keyed by
(pipeline class, options hash).
"""
_default_download_filename = "file"
def __init__(
self,
allowed_formats: Optional[list[InputFormat]] = None,
format_options: Optional[dict[InputFormat, FormatOption]] = None,
) -> None:
"""Initialize the converter based on format preferences.
Args:
allowed_formats: List of allowed input formats. By default, any
format supported by Docling is allowed.
format_options: Dictionary of format-specific options.
"""
self.allowed_formats: list[InputFormat] = (
allowed_formats if allowed_formats is not None else list(InputFormat)
)
# Normalize format options: ensure IMAGE format uses ImageDocumentBackend
# for backward compatibility (old code might use PdfFormatOption or other backends for images)
normalized_format_options: dict[InputFormat, FormatOption] = {}
if format_options:
for format, option in format_options.items():
if (
format == InputFormat.IMAGE
and option.backend is not ImageDocumentBackend
):
warnings.warn(
f"Using {option.backend.__name__} for InputFormat.IMAGE is deprecated. "
"Images should use ImageDocumentBackend via ImageFormatOption. "
"Automatically correcting the backend, please update your code to avoid this warning.",
DeprecationWarning,
stacklevel=2,
)
# Convert to ImageFormatOption while preserving pipeline and backend options
normalized_format_options[format] = ImageFormatOption(
pipeline_cls=option.pipeline_cls,
pipeline_options=option.pipeline_options,
backend_options=option.backend_options,
)
else:
normalized_format_options[format] = option
self.format_to_options: dict[InputFormat, FormatOption] = {
format: (
_get_default_option(format=format)
if (custom_option := normalized_format_options.get(format)) is None
else custom_option
)
for format in self.allowed_formats
}
self.initialized_pipelines: dict[
tuple[Type[BasePipeline], str], BasePipeline
] = {}
def _get_initialized_pipelines(
self,
) -> dict[tuple[Type[BasePipeline], str], BasePipeline]:
return self.initialized_pipelines
def _get_pipeline_options_hash(self, pipeline_options: PipelineOptions) -> str:
"""Generate a hash of pipeline options to use as part of the cache key."""
options_str = str(pipeline_options.model_dump())
return hashlib.md5(
options_str.encode("utf-8"), usedforsecurity=False
).hexdigest()
def initialize_pipeline(self, format: InputFormat):
"""Initialize the conversion pipeline for the selected format.
Args:
format: The input format for which to initialize the pipeline.
Raises:
ConversionError: If no pipeline could be initialized for the
given format.
RuntimeError: If `artifacts_path` is set in
`docling.datamodel.settings.settings` when required by
the pipeline, but points to a non-directory file.
FileNotFoundError: If local model files are not found.
"""
pipeline = self._get_pipeline(doc_format=format)
if pipeline is None:
raise ConversionError(
f"No pipeline could be initialized for format {format}"
)
@validate_call(config=ConfigDict(strict=True))
def convert(
self,
source: Union[Path, str, DocumentStream], # TODO review naming
headers: Optional[dict[str, str]] = None,
raises_on_error: bool = True,
max_num_pages: int = sys.maxsize,
max_file_size: int = sys.maxsize,
page_range: PageRange = DEFAULT_PAGE_RANGE,
) -> ConversionResult:
"""Convert one document fetched from a file path, URL, or DocumentStream.
Note: If the document content is given as a string (Markdown or HTML
content), use the `convert_string` method.
Args:
source: Source of input document given as file path, URL, or
DocumentStream.
headers: Optional headers given as a dictionary of string key-value pairs,
in case of URL input source.
raises_on_error: Whether to raise an error on the first conversion failure.
If False, errors are captured in the ConversionResult objects.
max_num_pages: Maximum number of pages accepted per document.
Documents exceeding this number will not be converted.
max_file_size: Maximum file size to convert.
page_range: Range of pages to convert.
Returns:
The conversion result, which contains a `DoclingDocument` in the `document`
attribute, and metadata about the conversion process.
Raises:
ConversionError: An error occurred during conversion.
"""
all_res = self.convert_all(
source=[source],
raises_on_error=raises_on_error,
max_num_pages=max_num_pages,
max_file_size=max_file_size,
headers=headers,
page_range=page_range,
)
return next(all_res)
@validate_call(config=ConfigDict(strict=True))
def convert_all(
self,
source: Iterable[Union[Path, str, DocumentStream]], # TODO review naming
headers: Optional[dict[str, str]] = None,
raises_on_error: bool = True,
max_num_pages: int = sys.maxsize,
max_file_size: int = sys.maxsize,
page_range: PageRange = DEFAULT_PAGE_RANGE,
) -> Iterator[ConversionResult]:
"""Convert multiple documents from file paths, URLs, or DocumentStreams.
Args:
source: Source of input documents given as an iterable of file paths, URLs,
or DocumentStreams.
headers: Optional headers given as a (single) dictionary of string
key-value pairs, in case of URL input source.
raises_on_error: Whether to raise an error on the first conversion failure.
max_num_pages: Maximum number of pages to convert.
max_file_size: Maximum number of pages accepted per document. Documents
exceeding this number will be skipped.
page_range: Range of pages to convert in each document.
Yields:
The conversion results, each containing a `DoclingDocument` in the
`document` attribute and metadata about the conversion process.
Raises:
ConversionError: An error occurred during conversion.
"""
limits = DocumentLimits(
max_num_pages=max_num_pages,
max_file_size=max_file_size,
page_range=page_range,
)
conv_input = _DocumentConversionInput(
path_or_stream_iterator=source, limits=limits, headers=headers
)
conv_res_iter = self._convert(conv_input, raises_on_error=raises_on_error)
had_result = False
for conv_res in conv_res_iter:
had_result = True
if raises_on_error and conv_res.status not in {
ConversionStatus.SUCCESS,
ConversionStatus.PARTIAL_SUCCESS,
}:
error_details = ""
if conv_res.errors:
error_messages = [err.error_message for err in conv_res.errors]
error_details = f" Errors: {'; '.join(error_messages)}"
raise ConversionError(
f"Conversion failed for: {conv_res.input.file} with status: "
f"{conv_res.status}.{error_details}"
)
else:
yield conv_res
if not had_result and raises_on_error:
raise ConversionError(
"Conversion failed because the provided file has no recognizable "
"format or it wasn't in the list of allowed formats."
)
@validate_call(config=ConfigDict(strict=True))
def convert_string(
self,
content: str,
format: InputFormat,
name: Optional[str] = None,
) -> ConversionResult:
"""Convert a document given as a string using the specified format.
Only Markdown (`InputFormat.MD`) and HTML (`InputFormat.HTML`) formats
are supported. The content is wrapped in a `DocumentStream` and passed
to the main conversion pipeline.
Args:
content: The document content as a string.
format: The format of the input content.
name: The filename to associate with the document. If not provided, a
timestamp-based name is generated. The appropriate file extension (`md`
or `html`) is appended if missing.
Returns:
The conversion result, which contains a `DoclingDocument` in the `document`
attribute, and metadata about the conversion process.
Raises:
ValueError: If format is neither `InputFormat.MD` nor `InputFormat.HTML`.
ConversionError: An error occurred during conversion.
"""
name = name or datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
if format == InputFormat.MD:
if not name.endswith(".md"):
name += ".md"
buff = BytesIO(content.encode("utf-8"))
doc_stream = DocumentStream(name=name, stream=buff)
return self.convert(doc_stream)
elif format == InputFormat.HTML:
if not name.endswith(".html"):
name += ".html"
buff = BytesIO(content.encode("utf-8"))
doc_stream = DocumentStream(name=name, stream=buff)
return self.convert(doc_stream)
else:
raise ValueError(f"format {format} is not supported in `convert_string`")
def _convert(
self, conv_input: _DocumentConversionInput, raises_on_error: bool
) -> Iterator[ConversionResult]:
start_time = time.monotonic()
for input_batch in chunkify(
conv_input.docs(self.format_to_options),
settings.perf.doc_batch_size, # pass format_options
):
_log.info("Going to convert document batch...")
process_func = partial(
self._process_document, raises_on_error=raises_on_error
)
if (
settings.perf.doc_batch_concurrency > 1
and settings.perf.doc_batch_size > 1
):
with ThreadPoolExecutor(
max_workers=settings.perf.doc_batch_concurrency
) as pool:
for item in pool.map(
process_func,
input_batch,
):
yield item
else:
for item in map(
process_func,
input_batch,
):
elapsed = time.monotonic() - start_time
start_time = time.monotonic()
_log.info(
f"Finished converting document {item.input.file.name} in {elapsed:.2f} sec."
)
yield item
def _get_pipeline(self, doc_format: InputFormat) -> Optional[BasePipeline]:
"""Retrieve or initialize a pipeline, reusing instances based on class and options."""
fopt = self.format_to_options.get(doc_format)
if fopt is None or fopt.pipeline_options is None:
return None
pipeline_class = fopt.pipeline_cls
pipeline_options = fopt.pipeline_options
options_hash = self._get_pipeline_options_hash(pipeline_options)
# Use a composite key to cache pipelines
cache_key = (pipeline_class, options_hash)
with _PIPELINE_CACHE_LOCK:
if cache_key not in self.initialized_pipelines:
_log.info(
f"Initializing pipeline for {pipeline_class.__name__} with options hash {options_hash}"
)
self.initialized_pipelines[cache_key] = pipeline_class(
pipeline_options=pipeline_options
)
else:
_log.debug(
f"Reusing cached pipeline for {pipeline_class.__name__} with options hash {options_hash}"
)
return self.initialized_pipelines[cache_key]
def _process_document(
self, in_doc: InputDocument, raises_on_error: bool
) -> ConversionResult:
valid = (
self.allowed_formats is not None and in_doc.format in self.allowed_formats
)
if valid:
conv_res = self._execute_pipeline(in_doc, raises_on_error=raises_on_error)
else:
error_message = f"File format not allowed: {in_doc.file}"
if raises_on_error:
raise ConversionError(error_message)
else:
error_item = ErrorItem(
component_type=DoclingComponentType.USER_INPUT,
module_name="",
error_message=error_message,
)
conv_res = ConversionResult(
input=in_doc, status=ConversionStatus.SKIPPED, errors=[error_item]
)
return conv_res
def _execute_pipeline(
self, in_doc: InputDocument, raises_on_error: bool
) -> ConversionResult:
if in_doc.valid:
pipeline = self._get_pipeline(in_doc.format)
if pipeline is not None:
conv_res = pipeline.execute(in_doc, raises_on_error=raises_on_error)
else:
if raises_on_error:
raise ConversionError(
f"No pipeline could be initialized for {in_doc.file}."
)
else:
conv_res = ConversionResult(
input=in_doc,
status=ConversionStatus.FAILURE,
)
else:
if raises_on_error:
raise ConversionError(f"Input document {in_doc.file} is not valid.")
else:
# invalid doc or not of desired format
conv_res = ConversionResult(
input=in_doc,
status=ConversionStatus.FAILURE,
)
# TODO add error log why it failed.
return conv_res
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/datamodel/accelerator_options.py | docling/datamodel/accelerator_options.py | import logging
import os
import re
from enum import Enum
from typing import Any, Union
from pydantic import field_validator, model_validator
from pydantic_settings import BaseSettings, SettingsConfigDict
_log = logging.getLogger(__name__)
class AcceleratorDevice(str, Enum):
"""Devices to run model inference"""
AUTO = "auto"
CPU = "cpu"
CUDA = "cuda"
MPS = "mps"
class AcceleratorOptions(BaseSettings):
model_config = SettingsConfigDict(
env_prefix="DOCLING_", env_nested_delimiter="_", populate_by_name=True
)
num_threads: int = 4
device: Union[str, AcceleratorDevice] = "auto"
cuda_use_flash_attention2: bool = False
@field_validator("device")
def validate_device(cls, value):
# "auto", "cpu", "cuda", "mps", or "cuda:N"
if value in {d.value for d in AcceleratorDevice} or re.match(
r"^cuda(:\d+)?$", value
):
return value
raise ValueError(
"Invalid device option. Use 'auto', 'cpu', 'mps', 'cuda', or 'cuda:N'."
)
@model_validator(mode="before")
@classmethod
def check_alternative_envvars(cls, data: Any) -> Any:
r"""
Set num_threads from the "alternative" envvar OMP_NUM_THREADS.
The alternative envvar is used only if it is valid and the regular envvar is not set.
Notice: The standard pydantic settings mechanism with parameter "aliases" does not provide
the same functionality. In case the alias envvar is set and the user tries to override the
parameter in settings initialization, Pydantic treats the parameter provided in __init__()
as an extra input instead of simply overwriting the evvar value for that parameter.
"""
if isinstance(data, dict):
input_num_threads = data.get("num_threads")
# Check if to set the num_threads from the alternative envvar
if input_num_threads is None:
docling_num_threads = os.getenv("DOCLING_NUM_THREADS")
omp_num_threads = os.getenv("OMP_NUM_THREADS")
if docling_num_threads is None and omp_num_threads is not None:
try:
data["num_threads"] = int(omp_num_threads)
except ValueError:
_log.error(
"Ignoring misformatted envvar OMP_NUM_THREADS '%s'",
omp_num_threads,
)
return data
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/datamodel/extraction.py | docling/datamodel/extraction.py | """Data models for document extraction functionality."""
from typing import Any, Dict, List, Optional, Type, Union
from pydantic import BaseModel, Field
from docling.datamodel.base_models import ConversionStatus, ErrorItem, VlmStopReason
from docling.datamodel.document import InputDocument
class ExtractedPageData(BaseModel):
"""Data model for extracted content from a single page."""
page_no: int = Field(..., description="1-indexed page number")
extracted_data: Optional[Dict[str, Any]] = Field(
None, description="Extracted structured data from the page"
)
raw_text: Optional[str] = Field(None, description="Raw extracted text")
errors: List[str] = Field(
default_factory=list,
description="Any errors encountered during extraction for this page",
)
class ExtractionResult(BaseModel):
"""Result of document extraction."""
input: InputDocument
status: ConversionStatus = ConversionStatus.PENDING
errors: List[ErrorItem] = []
# Pages field - always a list for consistency
pages: List[ExtractedPageData] = Field(
default_factory=list, description="Extracted data from each page"
)
# Type alias for template parameters that can be string, dict, or BaseModel
ExtractionTemplateType = Union[str, Dict[str, Any], BaseModel, Type[BaseModel]]
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/datamodel/document.py | docling/datamodel/document.py | import csv
import importlib
import json
import logging
import platform
import re
import sys
import tarfile
import zipfile
from collections.abc import Iterable, Mapping
from datetime import datetime
from enum import Enum
from io import BytesIO
from pathlib import Path, PurePath
from typing import (
TYPE_CHECKING,
Annotated,
Literal,
Optional,
Type,
Union,
cast,
)
import filetype
# DO NOT REMOVE; explicitly exposed from this location
from docling_core.types.doc import (
DocItem,
DocItemLabel,
DoclingDocument,
PictureItem,
SectionHeaderItem,
TableItem,
TextItem,
)
from docling_core.types.doc.document import ListItem
from docling_core.types.legacy_doc.base import (
BaseText,
Figure,
GlmTableCell,
PageDimensions,
PageReference,
Prov,
Ref,
Table as DsSchemaTable,
TableCell,
)
from docling_core.types.legacy_doc.document import (
CCSDocumentDescription as DsDocumentDescription,
CCSFileInfoObject as DsFileInfoObject,
ExportedCCSDocument as DsDocument,
)
from docling_core.utils.file import resolve_source_to_stream
from docling_core.utils.legacy import docling_document_to_legacy
from pydantic import BaseModel, Field
from typing_extensions import deprecated
from docling.backend.abstract_backend import (
AbstractDocumentBackend,
DeclarativeDocumentBackend,
PaginatedDocumentBackend,
)
from docling.datamodel.backend_options import BackendOptions
from docling.datamodel.base_models import (
AssembledUnit,
ConfidenceReport,
ConversionStatus,
DocumentStream,
ErrorItem,
FormatToExtensions,
FormatToMimeType,
InputFormat,
MimeTypeToFormat,
Page,
)
from docling.datamodel.settings import DocumentLimits
from docling.utils.profiling import ProfilingItem
from docling.utils.utils import create_file_hash
if TYPE_CHECKING:
from docling.datamodel.base_models import BaseFormatOption
from docling.document_converter import FormatOption
_log = logging.getLogger(__name__)
layout_label_to_ds_type = {
DocItemLabel.TITLE: "title",
DocItemLabel.DOCUMENT_INDEX: "table",
DocItemLabel.SECTION_HEADER: "subtitle-level-1",
DocItemLabel.CHECKBOX_SELECTED: "checkbox-selected",
DocItemLabel.CHECKBOX_UNSELECTED: "checkbox-unselected",
DocItemLabel.CAPTION: "caption",
DocItemLabel.PAGE_HEADER: "page-header",
DocItemLabel.PAGE_FOOTER: "page-footer",
DocItemLabel.FOOTNOTE: "footnote",
DocItemLabel.TABLE: "table",
DocItemLabel.FORMULA: "equation",
DocItemLabel.LIST_ITEM: "paragraph",
DocItemLabel.CODE: "paragraph",
DocItemLabel.PICTURE: "figure",
DocItemLabel.TEXT: "paragraph",
DocItemLabel.PARAGRAPH: "paragraph",
DocItemLabel.FORM: DocItemLabel.FORM.value,
DocItemLabel.KEY_VALUE_REGION: DocItemLabel.KEY_VALUE_REGION.value,
}
_EMPTY_DOCLING_DOC = DoclingDocument(name="dummy")
class InputDocument(BaseModel):
"""A document as an input of a Docling conversion."""
file: Annotated[
PurePath, Field(description="A path representation the input document.")
]
document_hash: Annotated[
str,
Field(description="A stable hash of the path or stream of the input document."),
]
valid: bool = Field(True, description="Whether this is is a valid input document.")
backend_options: Optional[BackendOptions] = Field(
None, description="Custom options for backends."
)
limits: DocumentLimits = Field(
DocumentLimits(), description="Limits in the input document for the conversion."
)
format: Annotated[InputFormat, Field(description="The document format.")]
filesize: Optional[int] = Field(
None, description="Size of the input file, in bytes."
)
page_count: int = Field(0, description="Number of pages in the input document.")
_backend: AbstractDocumentBackend
def __init__(
self,
path_or_stream: Union[BytesIO, Path],
format: InputFormat,
backend: Type[AbstractDocumentBackend],
backend_options: Optional[BackendOptions] = None,
filename: Optional[str] = None,
limits: Optional[DocumentLimits] = None,
) -> None:
super().__init__(
file="",
document_hash="",
format=InputFormat.PDF,
backend_options=backend_options,
) # initialize with dummy values
self.limits = limits or DocumentLimits()
self.format = format
try:
if isinstance(path_or_stream, Path):
self.file = path_or_stream
self.filesize = path_or_stream.stat().st_size
if self.filesize > self.limits.max_file_size:
self.valid = False
else:
self.document_hash = create_file_hash(path_or_stream)
self._init_doc(backend, path_or_stream)
elif isinstance(path_or_stream, BytesIO):
assert filename is not None, (
"Can't construct InputDocument from stream without providing "
"filename arg."
)
self.file = PurePath(filename)
self.filesize = path_or_stream.getbuffer().nbytes
if self.filesize > self.limits.max_file_size:
self.valid = False
else:
self.document_hash = create_file_hash(path_or_stream)
self._init_doc(backend, path_or_stream)
else:
raise RuntimeError(
f"Unexpected type path_or_stream: {type(path_or_stream)}"
)
# For paginated backends, check if the maximum page count is exceeded.
if self.valid and self._backend.is_valid():
if self._backend.supports_pagination() and isinstance(
self._backend, PaginatedDocumentBackend
):
self.page_count = self._backend.page_count()
if not self.page_count <= self.limits.max_num_pages:
self.valid = False
elif self.page_count < self.limits.page_range[0]:
self.valid = False
except (FileNotFoundError, OSError) as e:
self.valid = False
_log.exception(
f"File {self.file.name} not found or cannot be opened.", exc_info=e
)
# raise
except RuntimeError as e:
self.valid = False
_log.exception(
"An unexpected error occurred while opening the document "
f"{self.file.name}",
exc_info=e,
)
# raise
def _init_doc(
self,
backend: Type[AbstractDocumentBackend],
path_or_stream: Union[BytesIO, Path],
) -> None:
if self.backend_options:
self._backend = backend(
self,
path_or_stream=path_or_stream,
options=self.backend_options,
)
else:
self._backend = backend(self, path_or_stream=path_or_stream)
if not self._backend.is_valid():
self.valid = False
class DocumentFormat(str, Enum):
V2 = "v2"
V1 = "v1"
class DoclingVersion(BaseModel):
docling_version: str = importlib.metadata.version("docling")
docling_core_version: str = importlib.metadata.version("docling-core")
docling_ibm_models_version: str = importlib.metadata.version("docling-ibm-models")
docling_parse_version: str = importlib.metadata.version("docling-parse")
platform_str: str = platform.platform()
py_impl_version: str = sys.implementation.cache_tag
py_lang_version: str = platform.python_version()
class ConversionAssets(BaseModel):
version: DoclingVersion = DoclingVersion()
# When the assets were saved (ISO string from datetime.now())
timestamp: Optional[str] = None
status: ConversionStatus = ConversionStatus.PENDING # failure, success
errors: list[ErrorItem] = [] # structure to keep errors
pages: list[Page] = []
timings: dict[str, ProfilingItem] = {}
confidence: ConfidenceReport = Field(default_factory=ConfidenceReport)
document: DoclingDocument = _EMPTY_DOCLING_DOC
@property
@deprecated("Use document instead.")
def legacy_document(self):
return docling_document_to_legacy(self.document)
def save(
self,
*,
filename: Union[str, Path],
indent: Optional[int] = 2,
):
"""Serialize the full ConversionAssets to JSON."""
if isinstance(filename, str):
filename = Path(filename)
# Build an in-memory ZIP archive containing JSON for each asset
buf = BytesIO()
def to_jsonable(obj):
try:
# pydantic v2 models
if hasattr(obj, "model_dump"):
return obj.model_dump(mode="json") # type: ignore[attr-defined]
except TypeError:
# some models may not accept mode argument
return obj.model_dump() # type: ignore[attr-defined]
# enums
try:
from enum import Enum
if isinstance(obj, Enum):
return obj.value
except Exception:
pass
# containers
if isinstance(obj, list):
return [to_jsonable(x) for x in obj]
if isinstance(obj, dict):
return {k: to_jsonable(v) for k, v in obj.items()}
# passthrough primitives
return obj
with zipfile.ZipFile(buf, mode="w", compression=zipfile.ZIP_DEFLATED) as zf:
def write_json(name: str, payload) -> None:
data = json.dumps(
to_jsonable(payload), ensure_ascii=False, indent=indent
)
zf.writestr(name, data.encode("utf-8"))
# Update and persist a save timestamp
self.timestamp = datetime.now().isoformat()
write_json("timestamp.json", self.timestamp)
# Store each component in its own JSON file
write_json("version.json", self.version)
write_json("status.json", self.status)
write_json("errors.json", self.errors)
write_json("pages.json", self.pages)
write_json("timings.json", self.timings)
write_json("confidence.json", self.confidence)
# For the document, ensure stable schema via export_to_dict
doc_dict = self.document.export_to_dict()
zf.writestr(
"document.json",
json.dumps(doc_dict, ensure_ascii=False, indent=indent).encode("utf-8"),
)
# Persist the ZIP to disk
buf.seek(0)
if filename.parent and not filename.parent.exists():
filename.parent.mkdir(parents=True, exist_ok=True)
with filename.open("wb") as f:
f.write(buf.getvalue())
@classmethod
def load(cls, filename: Union[str, Path]) -> "ConversionAssets":
"""Load a ConversionAssets."""
if isinstance(filename, str):
filename = Path(filename)
# Read the ZIP and deserialize all items
version_info: DoclingVersion = DoclingVersion()
timestamp: Optional[str] = None
status = ConversionStatus.PENDING
errors: list[ErrorItem] = []
pages: list[Page] = []
timings: dict[str, ProfilingItem] = {}
confidence = ConfidenceReport()
document: DoclingDocument = _EMPTY_DOCLING_DOC
with zipfile.ZipFile(filename, mode="r") as zf:
def read_json(name: str):
try:
with zf.open(name, "r") as fp:
return json.loads(fp.read().decode("utf-8"))
except KeyError:
return None
# version
if (data := read_json("version.json")) is not None:
try:
version_info = DoclingVersion.model_validate(data)
except Exception as exc:
_log.error(f"Could not read version: {exc}")
# timestamp
if (data := read_json("timestamp.json")) is not None:
if isinstance(data, str):
timestamp = data
# status
if (data := read_json("status.json")) is not None:
try:
status = ConversionStatus(data)
except Exception:
status = ConversionStatus.PENDING
# errors
if (data := read_json("errors.json")) is not None and isinstance(
data, list
):
errors = [ErrorItem.model_validate(item) for item in data]
# pages
if (data := read_json("pages.json")) is not None and isinstance(data, list):
pages = [Page.model_validate(item) for item in data]
# timings
if (data := read_json("timings.json")) is not None and isinstance(
data, dict
):
timings = {k: ProfilingItem.model_validate(v) for k, v in data.items()}
# confidence
if (data := read_json("confidence.json")) is not None and isinstance(
data, dict
):
confidence = ConfidenceReport.model_validate(data)
# document
if (data := read_json("document.json")) is not None and isinstance(
data, dict
):
document = DoclingDocument.model_validate(data)
return cls(
version=version_info,
timestamp=timestamp,
status=status,
errors=errors,
pages=pages,
timings=timings,
confidence=confidence,
document=document,
)
class ConversionResult(ConversionAssets):
input: InputDocument
assembled: AssembledUnit = AssembledUnit()
class _DummyBackend(AbstractDocumentBackend):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def is_valid(self) -> bool:
return False
@classmethod
def supported_formats(cls) -> set[InputFormat]:
return set()
@classmethod
def supports_pagination(cls) -> bool:
return False
def unload(self):
return super().unload()
class _DocumentConversionInput(BaseModel):
path_or_stream_iterator: Iterable[Union[Path, str, DocumentStream]]
headers: Optional[dict[str, str]] = None
limits: Optional[DocumentLimits] = DocumentLimits()
def docs(
self,
format_options: Mapping[InputFormat, "BaseFormatOption"],
) -> Iterable[InputDocument]:
for item in self.path_or_stream_iterator:
obj = (
resolve_source_to_stream(item, self.headers)
if isinstance(item, str)
else item
)
format = self._guess_format(obj)
backend: Type[AbstractDocumentBackend]
backend_options: Optional[BackendOptions] = None
if not format or format not in format_options:
_log.error(
f"Input document {obj.name} with format {format} does not match "
f"any allowed format: ({format_options.keys()})"
)
backend = _DummyBackend
else:
options = format_options[format]
backend = options.backend
if "backend_options" in options.model_fields_set:
backend_options = cast("FormatOption", options).backend_options
path_or_stream: Union[BytesIO, Path]
if isinstance(obj, Path):
path_or_stream = obj
elif isinstance(obj, DocumentStream):
path_or_stream = obj.stream
else:
raise RuntimeError(f"Unexpected obj type in iterator: {type(obj)}")
yield InputDocument(
path_or_stream=path_or_stream,
format=format, # type: ignore[arg-type]
filename=obj.name,
limits=self.limits,
backend=backend,
backend_options=backend_options,
)
def _guess_format(self, obj: Union[Path, DocumentStream]) -> Optional[InputFormat]:
content = b"" # empty binary blob
formats: list[InputFormat] = []
if isinstance(obj, Path):
mime = filetype.guess_mime(str(obj))
if mime is None:
ext = obj.suffix[1:]
mime = _DocumentConversionInput._mime_from_extension(ext)
if mime is None: # must guess from
with obj.open("rb") as f:
content = f.read(1024) # Read first 1KB
if mime is not None and mime.lower() == "application/zip":
mime_root = "application/vnd.openxmlformats-officedocument"
if obj.suffixes[-1].lower() == ".xlsx":
mime = mime_root + ".spreadsheetml.sheet"
elif obj.suffixes[-1].lower() == ".docx":
mime = mime_root + ".wordprocessingml.document"
elif obj.suffixes[-1].lower() == ".pptx":
mime = mime_root + ".presentationml.presentation"
elif isinstance(obj, DocumentStream):
content = obj.stream.read(8192)
obj.stream.seek(0)
mime = filetype.guess_mime(content)
if mime is None:
ext = (
obj.name.rsplit(".", 1)[-1]
if ("." in obj.name and not obj.name.startswith("."))
else ""
)
mime = _DocumentConversionInput._mime_from_extension(ext.lower())
if mime is not None and mime.lower() == "application/zip":
objname = obj.name.lower()
mime_root = "application/vnd.openxmlformats-officedocument"
if objname.endswith(".xlsx"):
mime = mime_root + ".spreadsheetml.sheet"
elif objname.endswith(".docx"):
mime = mime_root + ".wordprocessingml.document"
elif objname.endswith(".pptx"):
mime = mime_root + ".presentationml.presentation"
if mime is not None and mime.lower() == "application/gzip":
if detected_mime := _DocumentConversionInput._detect_mets_gbs(obj):
mime = detected_mime
mime = mime or _DocumentConversionInput._detect_html_xhtml(content)
mime = mime or _DocumentConversionInput._detect_csv(content)
mime = mime or "text/plain"
formats = MimeTypeToFormat.get(mime, [])
_log.info(f"detected formats: {formats}")
if formats:
if len(formats) == 1 and mime not in ("text/plain"):
return formats[0]
else: # ambiguity in formats
return _DocumentConversionInput._guess_from_content(
content, mime, formats
)
else:
return None
@staticmethod
def _guess_from_content(
content: bytes, mime: str, formats: list[InputFormat]
) -> Optional[InputFormat]:
"""Guess the input format of a document by checking part of its content."""
input_format: Optional[InputFormat] = None
if mime == "application/xml":
content_str = content.decode("utf-8")
match_doctype = re.search(r"<!DOCTYPE [^>]+>", content_str)
if match_doctype:
xml_doctype = match_doctype.group()
if InputFormat.XML_USPTO in formats and any(
item in xml_doctype
for item in (
"us-patent-application-v4",
"us-patent-grant-v4",
"us-grant-025",
"patent-application-publication",
)
):
input_format = InputFormat.XML_USPTO
if InputFormat.XML_JATS in formats and (
"JATS-journalpublishing" in xml_doctype
or "JATS-archive" in xml_doctype
):
input_format = InputFormat.XML_JATS
elif mime == "text/plain":
content_str = content.decode("utf-8")
if InputFormat.XML_USPTO in formats and content_str.startswith("PATN\r\n"):
input_format = InputFormat.XML_USPTO
return input_format
@staticmethod
def _mime_from_extension(ext):
mime = None
if ext in FormatToExtensions[InputFormat.ASCIIDOC]:
mime = FormatToMimeType[InputFormat.ASCIIDOC][0]
elif ext in FormatToExtensions[InputFormat.HTML]:
mime = FormatToMimeType[InputFormat.HTML][0]
elif ext in FormatToExtensions[InputFormat.MD]:
mime = FormatToMimeType[InputFormat.MD][0]
elif ext in FormatToExtensions[InputFormat.CSV]:
mime = FormatToMimeType[InputFormat.CSV][0]
elif ext in FormatToExtensions[InputFormat.JSON_DOCLING]:
mime = FormatToMimeType[InputFormat.JSON_DOCLING][0]
elif ext in FormatToExtensions[InputFormat.PDF]:
mime = FormatToMimeType[InputFormat.PDF][0]
elif ext in FormatToExtensions[InputFormat.DOCX]:
mime = FormatToMimeType[InputFormat.DOCX][0]
elif ext in FormatToExtensions[InputFormat.PPTX]:
mime = FormatToMimeType[InputFormat.PPTX][0]
elif ext in FormatToExtensions[InputFormat.XLSX]:
mime = FormatToMimeType[InputFormat.XLSX][0]
elif ext in FormatToExtensions[InputFormat.VTT]:
mime = FormatToMimeType[InputFormat.VTT][0]
return mime
@staticmethod
def _detect_html_xhtml(
content: bytes,
) -> Optional[Literal["application/xhtml+xml", "application/xml", "text/html"]]:
"""Guess the mime type of an XHTML, HTML, or XML file from its content.
Args:
content: A short piece of a document from its beginning.
Returns:
The mime type of an XHTML, HTML, or XML file, or None if the content does
not match any of these formats.
"""
content_str = content.decode("ascii", errors="ignore").lower()
# Remove XML comments
content_str = re.sub(r"<!--(.*?)-->", "", content_str, flags=re.DOTALL)
content_str = content_str.lstrip()
if re.match(r"<\?xml", content_str):
if "xhtml" in content_str[:1000]:
return "application/xhtml+xml"
else:
return "application/xml"
if re.match(
r"(<script.*?>.*?</script>\s*)?(<!doctype\s+html|<html|<head|<body)",
content_str,
re.DOTALL,
):
return "text/html"
p = re.compile(
r"<!doctype\s+(?P<root>[a-zA-Z_:][a-zA-Z0-9_:.-]*)\s+.*>\s*<(?P=root)\b"
)
if p.search(content_str):
return "application/xml"
return None
@staticmethod
def _detect_csv(
content: bytes,
) -> Optional[Literal["text/csv"]]:
"""Guess the mime type of a CSV file from its content.
Args:
content: A short piece of a document from its beginning.
Returns:
The mime type of a CSV file, or None if the content does
not match any of the format.
"""
content_str = content.decode("ascii", errors="ignore").strip()
# Ensure there's at least one newline (CSV is usually multi-line)
if "\n" not in content_str:
return None
# Use csv.Sniffer to detect CSV characteristics
try:
dialect = csv.Sniffer().sniff(content_str)
if dialect.delimiter in {",", ";", "\t", "|"}: # Common delimiters
return "text/csv"
except csv.Error:
return None
return None
@staticmethod
def _detect_mets_gbs(
obj: Union[Path, DocumentStream],
) -> Optional[Literal["application/mets+xml"]]:
content = obj if isinstance(obj, Path) else obj.stream
tar: tarfile.TarFile
member: tarfile.TarInfo
with tarfile.open(
name=content if isinstance(content, Path) else None,
fileobj=content if isinstance(content, BytesIO) else None,
mode="r:gz",
) as tar:
for member in tar.getmembers():
if member.name.endswith(".xml"):
file = tar.extractfile(member)
if file is not None:
content_str = file.read().decode(errors="ignore")
if "http://www.loc.gov/METS/" in content_str:
return "application/mets+xml"
return None
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/datamodel/vlm_model_specs.py | docling/datamodel/vlm_model_specs.py | import logging
from enum import Enum
from pydantic import (
AnyUrl,
)
from docling.datamodel.accelerator_options import AcceleratorDevice
from docling.datamodel.pipeline_options_vlm_model import (
ApiVlmOptions,
InferenceFramework,
InlineVlmOptions,
ResponseFormat,
TransformersModelType,
TransformersPromptStyle,
)
_log = logging.getLogger(__name__)
# Granite-Docling
GRANITEDOCLING_TRANSFORMERS = InlineVlmOptions(
repo_id="ibm-granite/granite-docling-258M",
prompt="Convert this page to docling.",
response_format=ResponseFormat.DOCTAGS,
inference_framework=InferenceFramework.TRANSFORMERS,
transformers_model_type=TransformersModelType.AUTOMODEL_IMAGETEXTTOTEXT,
supported_devices=[
AcceleratorDevice.CPU,
AcceleratorDevice.CUDA,
],
extra_generation_config=dict(skip_special_tokens=False),
scale=2.0,
temperature=0.0,
max_new_tokens=8192,
stop_strings=["</doctag>", "<|end_of_text|>"],
)
GRANITEDOCLING_VLLM = GRANITEDOCLING_TRANSFORMERS.model_copy()
GRANITEDOCLING_VLLM.inference_framework = InferenceFramework.VLLM
GRANITEDOCLING_MLX = InlineVlmOptions(
repo_id="ibm-granite/granite-docling-258M-mlx",
prompt="Convert this page to docling.",
response_format=ResponseFormat.DOCTAGS,
inference_framework=InferenceFramework.MLX,
supported_devices=[AcceleratorDevice.MPS],
scale=2.0,
temperature=0.0,
max_new_tokens=8192,
stop_strings=["</doctag>", "<|end_of_text|>"],
)
GRANITEDOCLING_VLLM_API = ApiVlmOptions(
url="http://localhost:8000/v1/chat/completions", # LM studio defaults to port 1234, VLLM to 8000
params=dict(
model=GRANITEDOCLING_TRANSFORMERS.repo_id,
max_tokens=4096,
skip_special_tokens=True,
),
prompt=GRANITEDOCLING_TRANSFORMERS.prompt,
timeout=90,
scale=2.0,
temperature=0.0,
concurrency=4,
stop_strings=["</doctag>", "<|end_of_text|>"],
response_format=ResponseFormat.DOCTAGS,
)
GRANITEDOCLING_OLLAMA = GRANITEDOCLING_VLLM_API.model_copy()
GRANITEDOCLING_OLLAMA.url = AnyUrl("http://localhost:11434/v1/chat/completions")
GRANITEDOCLING_OLLAMA.params["model"] = "ibm/granite-docling:258m"
# SmolDocling
SMOLDOCLING_MLX = InlineVlmOptions(
repo_id="docling-project/SmolDocling-256M-preview-mlx-bf16",
prompt="Convert this page to docling.",
response_format=ResponseFormat.DOCTAGS,
inference_framework=InferenceFramework.MLX,
supported_devices=[AcceleratorDevice.MPS],
scale=2.0,
temperature=0.0,
stop_strings=["</doctag>", "<end_of_utterance>"],
)
SMOLDOCLING_TRANSFORMERS = InlineVlmOptions(
repo_id="docling-project/SmolDocling-256M-preview",
prompt="Convert this page to docling.",
response_format=ResponseFormat.DOCTAGS,
inference_framework=InferenceFramework.TRANSFORMERS,
transformers_model_type=TransformersModelType.AUTOMODEL_IMAGETEXTTOTEXT,
supported_devices=[
AcceleratorDevice.CPU,
AcceleratorDevice.CUDA,
],
torch_dtype="bfloat16",
scale=2.0,
temperature=0.0,
stop_strings=["</doctag>", "<end_of_utterance>"],
)
SMOLDOCLING_VLLM = InlineVlmOptions(
repo_id="docling-project/SmolDocling-256M-preview",
prompt="Convert this page to docling.",
response_format=ResponseFormat.DOCTAGS,
inference_framework=InferenceFramework.VLLM,
supported_devices=[
AcceleratorDevice.CUDA,
],
scale=2.0,
temperature=0.0,
stop_strings=["</doctag>", "<end_of_utterance>"],
)
# SmolVLM-256M-Instruct
SMOLVLM256_TRANSFORMERS = InlineVlmOptions(
repo_id="HuggingFaceTB/SmolVLM-256M-Instruct",
prompt="Transcribe this image to plain text.",
response_format=ResponseFormat.PLAINTEXT,
inference_framework=InferenceFramework.TRANSFORMERS,
transformers_model_type=TransformersModelType.AUTOMODEL_IMAGETEXTTOTEXT,
supported_devices=[
AcceleratorDevice.CPU,
AcceleratorDevice.CUDA,
# AcceleratorDevice.MPS,
],
torch_dtype="bfloat16",
scale=2.0,
temperature=0.0,
)
# SmolVLM2-2.2b-Instruct
SMOLVLM256_MLX = InlineVlmOptions(
repo_id="moot20/SmolVLM-256M-Instruct-MLX",
prompt="Extract the text.",
response_format=ResponseFormat.DOCTAGS,
inference_framework=InferenceFramework.MLX,
transformers_model_type=TransformersModelType.AUTOMODEL_IMAGETEXTTOTEXT,
supported_devices=[
AcceleratorDevice.MPS,
],
scale=2.0,
temperature=0.0,
)
SMOLVLM256_VLLM = InlineVlmOptions(
repo_id="HuggingFaceTB/SmolVLM-256M-Instruct",
prompt="Transcribe this image to plain text.",
response_format=ResponseFormat.PLAINTEXT,
inference_framework=InferenceFramework.VLLM,
supported_devices=[
AcceleratorDevice.CUDA,
],
scale=2.0,
temperature=0.0,
)
# GraniteVision
GRANITE_VISION_TRANSFORMERS = InlineVlmOptions(
repo_id="ibm-granite/granite-vision-3.2-2b",
prompt="Convert this page to markdown. Do not miss any text and only output the bare markdown!",
response_format=ResponseFormat.MARKDOWN,
inference_framework=InferenceFramework.TRANSFORMERS,
transformers_model_type=TransformersModelType.AUTOMODEL_VISION2SEQ,
supported_devices=[
AcceleratorDevice.CPU,
AcceleratorDevice.CUDA,
AcceleratorDevice.MPS,
],
scale=2.0,
temperature=0.0,
)
GRANITE_VISION_VLLM = InlineVlmOptions(
repo_id="ibm-granite/granite-vision-3.2-2b",
prompt="Convert this page to markdown. Do not miss any text and only output the bare markdown!",
response_format=ResponseFormat.MARKDOWN,
inference_framework=InferenceFramework.VLLM,
supported_devices=[
AcceleratorDevice.CUDA,
],
scale=2.0,
temperature=0.0,
)
GRANITE_VISION_OLLAMA = ApiVlmOptions(
url=AnyUrl("http://localhost:11434/v1/chat/completions"),
params={"model": "granite3.2-vision:2b"},
prompt="Convert this page to markdown. Do not miss any text and only output the bare markdown!",
scale=1.0,
timeout=120,
response_format=ResponseFormat.MARKDOWN,
temperature=0.0,
)
# Pixtral
PIXTRAL_12B_TRANSFORMERS = InlineVlmOptions(
repo_id="mistral-community/pixtral-12b",
prompt="Convert this page to markdown. Do not miss any text and only output the bare markdown!",
response_format=ResponseFormat.MARKDOWN,
inference_framework=InferenceFramework.TRANSFORMERS,
transformers_model_type=TransformersModelType.AUTOMODEL_VISION2SEQ,
supported_devices=[AcceleratorDevice.CPU, AcceleratorDevice.CUDA],
scale=2.0,
temperature=0.0,
)
PIXTRAL_12B_MLX = InlineVlmOptions(
repo_id="mlx-community/pixtral-12b-bf16",
prompt="Convert this page to markdown. Do not miss any text and only output the bare markdown!",
response_format=ResponseFormat.MARKDOWN,
inference_framework=InferenceFramework.MLX,
supported_devices=[AcceleratorDevice.MPS],
scale=2.0,
temperature=0.0,
)
# Phi4
PHI4_TRANSFORMERS = InlineVlmOptions(
repo_id="microsoft/Phi-4-multimodal-instruct",
prompt="Convert this page to MarkDown. Do not miss any text and only output the bare markdown",
trust_remote_code=True,
response_format=ResponseFormat.MARKDOWN,
inference_framework=InferenceFramework.TRANSFORMERS,
transformers_model_type=TransformersModelType.AUTOMODEL_CAUSALLM,
supported_devices=[AcceleratorDevice.CPU, AcceleratorDevice.CUDA],
scale=2.0,
temperature=0.0,
extra_generation_config=dict(num_logits_to_keep=0),
)
# Qwen
QWEN25_VL_3B_MLX = InlineVlmOptions(
repo_id="mlx-community/Qwen2.5-VL-3B-Instruct-bf16",
prompt="Convert this page to markdown. Do not miss any text and only output the bare markdown!",
response_format=ResponseFormat.MARKDOWN,
inference_framework=InferenceFramework.MLX,
supported_devices=[AcceleratorDevice.MPS],
scale=2.0,
temperature=0.0,
)
# GoT 2.0
GOT2_TRANSFORMERS = InlineVlmOptions(
repo_id="stepfun-ai/GOT-OCR-2.0-hf",
prompt="",
response_format=ResponseFormat.MARKDOWN,
inference_framework=InferenceFramework.TRANSFORMERS,
transformers_prompt_style=TransformersPromptStyle.NONE,
transformers_model_type=TransformersModelType.AUTOMODEL_IMAGETEXTTOTEXT,
supported_devices=[
AcceleratorDevice.CPU,
AcceleratorDevice.CUDA,
# AcceleratorDevice.MPS,
],
scale=2.0,
temperature=0.0,
stop_strings=["<|im_end|>"],
extra_processor_kwargs={"format": True},
)
# Gemma-3
GEMMA3_12B_MLX = InlineVlmOptions(
repo_id="mlx-community/gemma-3-12b-it-bf16",
prompt="Convert this page to markdown. Do not miss any text and only output the bare markdown!",
response_format=ResponseFormat.MARKDOWN,
inference_framework=InferenceFramework.MLX,
supported_devices=[AcceleratorDevice.MPS],
scale=2.0,
temperature=0.0,
)
GEMMA3_27B_MLX = InlineVlmOptions(
repo_id="mlx-community/gemma-3-27b-it-bf16",
prompt="Convert this page to markdown. Do not miss any text and only output the bare markdown!",
response_format=ResponseFormat.MARKDOWN,
inference_framework=InferenceFramework.MLX,
supported_devices=[AcceleratorDevice.MPS],
scale=2.0,
temperature=0.0,
)
# Dolphin
DOLPHIN_TRANSFORMERS = InlineVlmOptions(
repo_id="ByteDance/Dolphin",
prompt="<s>Read text in the image. <Answer/>",
response_format=ResponseFormat.MARKDOWN,
inference_framework=InferenceFramework.TRANSFORMERS,
transformers_model_type=TransformersModelType.AUTOMODEL_IMAGETEXTTOTEXT,
transformers_prompt_style=TransformersPromptStyle.RAW,
supported_devices=[
AcceleratorDevice.CUDA,
AcceleratorDevice.CPU,
AcceleratorDevice.MPS,
],
scale=2.0,
temperature=0.0,
)
# NuExtract
NU_EXTRACT_2B_TRANSFORMERS = InlineVlmOptions(
repo_id="numind/NuExtract-2.0-2B",
revision="fe5b2f0b63b81150721435a3ca1129a75c59c74e", # 489efed leads to MPS issues
prompt="", # This won't be used, template is passed separately
torch_dtype="bfloat16",
inference_framework=InferenceFramework.TRANSFORMERS,
transformers_model_type=TransformersModelType.AUTOMODEL_IMAGETEXTTOTEXT,
response_format=ResponseFormat.PLAINTEXT,
supported_devices=[
AcceleratorDevice.CPU,
AcceleratorDevice.CUDA,
AcceleratorDevice.MPS,
],
scale=2.0,
temperature=0.0,
)
class VlmModelType(str, Enum):
SMOLDOCLING = "smoldocling"
SMOLDOCLING_VLLM = "smoldocling_vllm"
GRANITE_VISION = "granite_vision"
GRANITE_VISION_VLLM = "granite_vision_vllm"
GRANITE_VISION_OLLAMA = "granite_vision_ollama"
GOT_OCR_2 = "got_ocr_2"
GRANITEDOCLING = "granite_docling"
GRANITEDOCLING_VLLM = "granite_docling_vllm"
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/datamodel/settings.py | docling/datamodel/settings.py | import sys
from pathlib import Path
from typing import Annotated, Optional, Tuple
from pydantic import BaseModel, PlainValidator
from pydantic_settings import BaseSettings, SettingsConfigDict
def _validate_page_range(v: Tuple[int, int]) -> Tuple[int, int]:
if v[0] < 1 or v[1] < v[0]:
raise ValueError(
"Invalid page range: start must be ≥ 1 and end must be ≥ start."
)
return v
PageRange = Annotated[Tuple[int, int], PlainValidator(_validate_page_range)]
DEFAULT_PAGE_RANGE: PageRange = (1, sys.maxsize)
class DocumentLimits(BaseModel):
max_num_pages: int = sys.maxsize
max_file_size: int = sys.maxsize
page_range: PageRange = DEFAULT_PAGE_RANGE
class BatchConcurrencySettings(BaseModel):
doc_batch_size: int = 1 # Number of documents processed in one batch. Should be >= doc_batch_concurrency
doc_batch_concurrency: int = 1 # Number of parallel threads processing documents. Warning: Experimental! No benefit expected without free-threaded python.
page_batch_size: int = 4 # Number of pages processed in one batch.
page_batch_concurrency: int = 1 # Currently unused.
elements_batch_size: int = (
16 # Number of elements processed in one batch, in enrichment models.
)
# To force models into single core: export OMP_NUM_THREADS=1
class DebugSettings(BaseModel):
visualize_cells: bool = False
visualize_ocr: bool = False
visualize_layout: bool = False
visualize_raw_layout: bool = False
visualize_tables: bool = False
profile_pipeline_timings: bool = False
# Path used to output debug information.
debug_output_path: str = str(Path.cwd() / "debug")
class AppSettings(BaseSettings):
model_config = SettingsConfigDict(
env_prefix="DOCLING_", env_nested_delimiter="_", env_nested_max_split=1
)
perf: BatchConcurrencySettings = BatchConcurrencySettings()
debug: DebugSettings = DebugSettings()
cache_dir: Path = Path.home() / ".cache" / "docling"
artifacts_path: Optional[Path] = None
settings = AppSettings()
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/datamodel/base_models.py | docling/datamodel/base_models.py | from collections import defaultdict
from enum import Enum
from typing import TYPE_CHECKING, Optional, Type, Union
import numpy as np
from docling_core.types.doc import (
BoundingBox,
DocItemLabel,
NodeItem,
PictureDataType,
Size,
TableCell,
)
from docling_core.types.doc.base import PydanticSerCtxKey, round_pydantic_float
from docling_core.types.doc.page import SegmentedPdfPage, TextCell
from docling_core.types.io import DocumentStream
# DO NOT REMOVE; explicitly exposed from this location
from PIL.Image import Image
from pydantic import (
BaseModel,
ConfigDict,
Field,
FieldSerializationInfo,
computed_field,
field_serializer,
field_validator,
)
if TYPE_CHECKING:
from docling.backend.pdf_backend import PdfPageBackend
from docling.backend.abstract_backend import AbstractDocumentBackend
from docling.datamodel.pipeline_options import PipelineOptions
class BaseFormatOption(BaseModel):
"""Base class for format options used by _DocumentConversionInput."""
pipeline_options: Optional[PipelineOptions] = None
backend: Type[AbstractDocumentBackend]
model_config = ConfigDict(arbitrary_types_allowed=True)
class ConversionStatus(str, Enum):
PENDING = "pending"
STARTED = "started"
FAILURE = "failure"
SUCCESS = "success"
PARTIAL_SUCCESS = "partial_success"
SKIPPED = "skipped"
class InputFormat(str, Enum):
"""A document format supported by document backend parsers."""
DOCX = "docx"
PPTX = "pptx"
HTML = "html"
IMAGE = "image"
PDF = "pdf"
ASCIIDOC = "asciidoc"
MD = "md"
CSV = "csv"
XLSX = "xlsx"
XML_USPTO = "xml_uspto"
XML_JATS = "xml_jats"
METS_GBS = "mets_gbs"
JSON_DOCLING = "json_docling"
AUDIO = "audio"
VTT = "vtt"
class OutputFormat(str, Enum):
MARKDOWN = "md"
JSON = "json"
YAML = "yaml"
HTML = "html"
HTML_SPLIT_PAGE = "html_split_page"
TEXT = "text"
DOCTAGS = "doctags"
FormatToExtensions: dict[InputFormat, list[str]] = {
InputFormat.DOCX: ["docx", "dotx", "docm", "dotm"],
InputFormat.PPTX: ["pptx", "potx", "ppsx", "pptm", "potm", "ppsm"],
InputFormat.PDF: ["pdf"],
InputFormat.MD: ["md"],
InputFormat.HTML: ["html", "htm", "xhtml"],
InputFormat.XML_JATS: ["xml", "nxml"],
InputFormat.IMAGE: ["jpg", "jpeg", "png", "tif", "tiff", "bmp", "webp"],
InputFormat.ASCIIDOC: ["adoc", "asciidoc", "asc"],
InputFormat.CSV: ["csv"],
InputFormat.XLSX: ["xlsx", "xlsm"],
InputFormat.XML_USPTO: ["xml", "txt"],
InputFormat.METS_GBS: ["tar.gz"],
InputFormat.JSON_DOCLING: ["json"],
InputFormat.AUDIO: ["wav", "mp3", "m4a", "aac", "ogg", "flac", "mp4", "avi", "mov"],
InputFormat.VTT: ["vtt"],
}
FormatToMimeType: dict[InputFormat, list[str]] = {
InputFormat.DOCX: [
"application/vnd.openxmlformats-officedocument.wordprocessingml.document",
"application/vnd.openxmlformats-officedocument.wordprocessingml.template",
],
InputFormat.PPTX: [
"application/vnd.openxmlformats-officedocument.presentationml.template",
"application/vnd.openxmlformats-officedocument.presentationml.slideshow",
"application/vnd.openxmlformats-officedocument.presentationml.presentation",
],
InputFormat.HTML: ["text/html", "application/xhtml+xml"],
InputFormat.XML_JATS: ["application/xml"],
InputFormat.IMAGE: [
"image/png",
"image/jpeg",
"image/tiff",
"image/gif",
"image/bmp",
"image/webp",
],
InputFormat.PDF: ["application/pdf"],
InputFormat.ASCIIDOC: ["text/asciidoc"],
InputFormat.MD: ["text/markdown", "text/x-markdown"],
InputFormat.CSV: ["text/csv"],
InputFormat.XLSX: [
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
],
InputFormat.XML_USPTO: ["application/xml", "text/plain"],
InputFormat.METS_GBS: ["application/mets+xml"],
InputFormat.JSON_DOCLING: ["application/json"],
InputFormat.AUDIO: [
"audio/x-wav",
"audio/mpeg",
"audio/wav",
"audio/mp3",
"audio/mp4",
"audio/m4a",
"audio/aac",
"audio/ogg",
"audio/flac",
"audio/x-flac",
"video/mp4",
"video/avi",
"video/x-msvideo",
"video/quicktime",
],
InputFormat.VTT: ["text/vtt"],
}
MimeTypeToFormat: dict[str, list[InputFormat]] = {
mime: [fmt for fmt in FormatToMimeType if mime in FormatToMimeType[fmt]]
for value in FormatToMimeType.values()
for mime in value
}
class DocInputType(str, Enum):
PATH = "path"
STREAM = "stream"
class DoclingComponentType(str, Enum):
DOCUMENT_BACKEND = "document_backend"
MODEL = "model"
DOC_ASSEMBLER = "doc_assembler"
USER_INPUT = "user_input"
PIPELINE = "pipeline"
class VlmStopReason(str, Enum):
LENGTH = "length" # max tokens reached
STOP_SEQUENCE = "stop_sequence" # Custom stopping criteria met
END_OF_SEQUENCE = "end_of_sequence" # Model generated end-of-text token
UNSPECIFIED = "unspecified" # Defaul none value
class ErrorItem(BaseModel):
component_type: DoclingComponentType
module_name: str
error_message: str
class Cluster(BaseModel):
id: int
label: DocItemLabel
bbox: BoundingBox
confidence: float = 1.0
cells: list[TextCell] = []
children: list["Cluster"] = [] # Add child cluster support
@field_serializer("confidence")
def _serialize(self, value: float, info: FieldSerializationInfo) -> float:
return round_pydantic_float(value, info.context, PydanticSerCtxKey.CONFID_PREC)
class BasePageElement(BaseModel):
label: DocItemLabel
id: int
page_no: int
cluster: Cluster
text: Optional[str] = None
class LayoutPrediction(BaseModel):
clusters: list[Cluster] = []
class VlmPredictionToken(BaseModel):
text: str = ""
token: int = -1
logprob: float = -1
class VlmPrediction(BaseModel):
text: str = ""
generated_tokens: list[VlmPredictionToken] = []
generation_time: float = -1
num_tokens: Optional[int] = None
stop_reason: VlmStopReason = VlmStopReason.UNSPECIFIED
input_prompt: Optional[str] = None
class ContainerElement(
BasePageElement
): # Used for Form and Key-Value-Regions, only for typing.
pass
class Table(BasePageElement):
otsl_seq: list[str]
num_rows: int = 0
num_cols: int = 0
table_cells: list[TableCell]
class TableStructurePrediction(BaseModel):
table_map: dict[int, Table] = {}
class TextElement(BasePageElement):
text: str
class FigureElement(BasePageElement):
annotations: list[PictureDataType] = []
provenance: Optional[str] = None
predicted_class: Optional[str] = None
confidence: Optional[float] = None
@field_serializer("confidence")
def _serialize(
self, value: Optional[float], info: FieldSerializationInfo
) -> Optional[float]:
return (
round_pydantic_float(value, info.context, PydanticSerCtxKey.CONFID_PREC)
if value is not None
else None
)
class FigureClassificationPrediction(BaseModel):
figure_count: int = 0
figure_map: dict[int, FigureElement] = {}
class EquationPrediction(BaseModel):
equation_count: int = 0
equation_map: dict[int, TextElement] = {}
class PagePredictions(BaseModel):
layout: Optional[LayoutPrediction] = None
tablestructure: Optional[TableStructurePrediction] = None
figures_classification: Optional[FigureClassificationPrediction] = None
equations_prediction: Optional[EquationPrediction] = None
vlm_response: Optional[VlmPrediction] = None
PageElement = Union[TextElement, Table, FigureElement, ContainerElement]
class AssembledUnit(BaseModel):
elements: list[PageElement] = []
body: list[PageElement] = []
headers: list[PageElement] = []
class ItemAndImageEnrichmentElement(BaseModel):
model_config = ConfigDict(arbitrary_types_allowed=True)
item: NodeItem
image: Image
class Page(BaseModel):
model_config = ConfigDict(arbitrary_types_allowed=True)
page_no: int
# page_hash: Optional[str] = None
size: Optional[Size] = None
parsed_page: Optional[SegmentedPdfPage] = None
predictions: PagePredictions = PagePredictions()
assembled: Optional[AssembledUnit] = None
_backend: Optional["PdfPageBackend"] = (
None # Internal PDF backend. By default it is cleared during assembling.
)
_default_image_scale: float = 1.0 # Default image scale for external usage.
_image_cache: dict[
float, Image
] = {} # Cache of images in different scales. By default it is cleared during assembling.
@property
def cells(self) -> list[TextCell]:
"""Return text cells as a read-only view of parsed_page.textline_cells."""
if self.parsed_page is not None:
return self.parsed_page.textline_cells
else:
return []
def get_image(
self,
scale: float = 1.0,
max_size: Optional[int] = None,
cropbox: Optional[BoundingBox] = None,
) -> Optional[Image]:
if self._backend is None:
return self._image_cache.get(scale, None)
if max_size:
assert self.size is not None
scale = min(scale, max_size / max(self.size.as_tuple()))
if scale not in self._image_cache:
if cropbox is None:
self._image_cache[scale] = self._backend.get_page_image(scale=scale)
else:
return self._backend.get_page_image(scale=scale, cropbox=cropbox)
if cropbox is None:
return self._image_cache[scale]
else:
page_im = self._image_cache[scale]
assert self.size is not None
return page_im.crop(
cropbox.to_top_left_origin(page_height=self.size.height)
.scaled(scale=scale)
.as_tuple()
)
@property
def image(self) -> Optional[Image]:
return self.get_image(scale=self._default_image_scale)
## OpenAI API Request / Response Models ##
class OpenAiChatMessage(BaseModel):
role: str
content: str
class OpenAiResponseChoice(BaseModel):
index: int
message: OpenAiChatMessage
finish_reason: Optional[str]
class OpenAiResponseUsage(BaseModel):
prompt_tokens: int
completion_tokens: int
total_tokens: int
class OpenAiApiResponse(BaseModel):
model_config = ConfigDict(
protected_namespaces=(),
)
id: str
model: Optional[str] = None # returned by openai
choices: list[OpenAiResponseChoice]
created: int
usage: OpenAiResponseUsage
# Create a type alias for score values
ScoreValue = float
class QualityGrade(str, Enum):
POOR = "poor"
FAIR = "fair"
GOOD = "good"
EXCELLENT = "excellent"
UNSPECIFIED = "unspecified"
class PageConfidenceScores(BaseModel):
parse_score: ScoreValue = np.nan
layout_score: ScoreValue = np.nan
table_score: ScoreValue = np.nan
ocr_score: ScoreValue = np.nan
# Accept null/None or string "NaN" values on input and coerce to np.nan
@field_validator(
"parse_score", "layout_score", "table_score", "ocr_score", mode="before"
)
@classmethod
def _coerce_none_or_nan_str(cls, v):
if v is None:
return np.nan
if isinstance(v, str) and v.strip().lower() in {"nan", "null", "none", ""}:
return np.nan
return v
def _score_to_grade(self, score: ScoreValue) -> QualityGrade:
if score < 0.5:
return QualityGrade.POOR
elif score < 0.8:
return QualityGrade.FAIR
elif score < 0.9:
return QualityGrade.GOOD
elif score >= 0.9:
return QualityGrade.EXCELLENT
return QualityGrade.UNSPECIFIED
@computed_field # type: ignore
@property
def mean_grade(self) -> QualityGrade:
return self._score_to_grade(self.mean_score)
@computed_field # type: ignore
@property
def low_grade(self) -> QualityGrade:
return self._score_to_grade(self.low_score)
@computed_field # type: ignore
@property
def mean_score(self) -> ScoreValue:
return ScoreValue(
np.nanmean(
[
self.ocr_score,
self.table_score,
self.layout_score,
self.parse_score,
]
)
)
@computed_field # type: ignore
@property
def low_score(self) -> ScoreValue:
return ScoreValue(
np.nanquantile(
[
self.ocr_score,
self.table_score,
self.layout_score,
self.parse_score,
],
q=0.05,
)
)
class ConfidenceReport(PageConfidenceScores):
pages: dict[int, PageConfidenceScores] = Field(
default_factory=lambda: defaultdict(PageConfidenceScores)
)
@computed_field # type: ignore
@property
def mean_score(self) -> ScoreValue:
return ScoreValue(
np.nanmean(
[c.mean_score for c in self.pages.values()],
)
)
@computed_field # type: ignore
@property
def low_score(self) -> ScoreValue:
return ScoreValue(
np.nanmean(
[c.low_score for c in self.pages.values()],
)
)
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/datamodel/__init__.py | docling/datamodel/__init__.py | python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false | |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/datamodel/layout_model_specs.py | docling/datamodel/layout_model_specs.py | import logging
from enum import Enum
from pathlib import Path
from typing import Optional
from pydantic import BaseModel
from docling.datamodel.accelerator_options import AcceleratorDevice
_log = logging.getLogger(__name__)
class LayoutModelConfig(BaseModel):
name: str
repo_id: str
revision: str
model_path: str
supported_devices: list[AcceleratorDevice] = [
AcceleratorDevice.CPU,
AcceleratorDevice.CUDA,
AcceleratorDevice.MPS,
]
@property
def model_repo_folder(self) -> str:
return self.repo_id.replace("/", "--")
# HuggingFace Layout Models
# Default Docling Layout Model
DOCLING_LAYOUT_V2 = LayoutModelConfig(
name="docling_layout_v2",
repo_id="docling-project/docling-layout-old",
revision="main",
model_path="",
)
DOCLING_LAYOUT_HERON = LayoutModelConfig(
name="docling_layout_heron",
repo_id="docling-project/docling-layout-heron",
revision="main",
model_path="",
)
DOCLING_LAYOUT_HERON_101 = LayoutModelConfig(
name="docling_layout_heron_101",
repo_id="docling-project/docling-layout-heron-101",
revision="main",
model_path="",
)
DOCLING_LAYOUT_EGRET_MEDIUM = LayoutModelConfig(
name="docling_layout_egret_medium",
repo_id="docling-project/docling-layout-egret-medium",
revision="main",
model_path="",
)
DOCLING_LAYOUT_EGRET_LARGE = LayoutModelConfig(
name="docling_layout_egret_large",
repo_id="docling-project/docling-layout-egret-large",
revision="main",
model_path="",
)
DOCLING_LAYOUT_EGRET_XLARGE = LayoutModelConfig(
name="docling_layout_egret_xlarge",
repo_id="docling-project/docling-layout-egret-xlarge",
revision="main",
model_path="",
)
# Example for a hypothetical alternative model
# ALTERNATIVE_LAYOUT = LayoutModelConfig(
# name="alternative_layout",
# repo_id="someorg/alternative-layout",
# revision="main",
# model_path="model_artifacts/layout_alt",
# )
class LayoutModelType(str, Enum):
DOCLING_LAYOUT_V2 = "docling_layout_v2"
DOCLING_LAYOUT_HERON = "docling_layout_heron"
DOCLING_LAYOUT_HERON_101 = "docling_layout_heron_101"
DOCLING_LAYOUT_EGRET_MEDIUM = "docling_layout_egret_medium"
DOCLING_LAYOUT_EGRET_LARGE = "docling_layout_egret_large"
DOCLING_LAYOUT_EGRET_XLARGE = "docling_layout_egret_xlarge"
# ALTERNATIVE_LAYOUT = "alternative_layout"
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/datamodel/pipeline_options_vlm_model.py | docling/datamodel/pipeline_options_vlm_model.py | from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, List, Literal, Optional, Union
from docling_core.types.doc.page import SegmentedPage
from pydantic import AnyUrl, BaseModel, ConfigDict
from transformers import StoppingCriteria
from typing_extensions import deprecated
from docling.datamodel.accelerator_options import AcceleratorDevice
from docling.models.utils.generation_utils import GenerationStopper
if TYPE_CHECKING:
from docling_core.types.doc.page import SegmentedPage
from docling.datamodel.base_models import Page
class BaseVlmOptions(BaseModel):
kind: str
prompt: str
scale: float = 2.0
max_size: Optional[int] = None
temperature: float = 0.0
def build_prompt(
self,
page: Optional["SegmentedPage"],
*,
_internal_page: Optional["Page"] = None,
) -> str:
"""Build the prompt for VLM inference.
Args:
page: The parsed/segmented page to process.
_internal_page: Internal parameter for experimental layout-aware pipelines.
Do not rely on this in user code - subject to change.
Returns:
The formatted prompt string.
"""
return self.prompt
def decode_response(self, text: str) -> str:
return text
class ResponseFormat(str, Enum):
DOCTAGS = "doctags"
MARKDOWN = "markdown"
HTML = "html"
OTSL = "otsl"
PLAINTEXT = "plaintext"
class InferenceFramework(str, Enum):
MLX = "mlx"
TRANSFORMERS = "transformers"
VLLM = "vllm"
class TransformersModelType(str, Enum):
AUTOMODEL = "automodel"
AUTOMODEL_VISION2SEQ = "automodel-vision2seq"
AUTOMODEL_CAUSALLM = "automodel-causallm"
AUTOMODEL_IMAGETEXTTOTEXT = "automodel-imagetexttotext"
class TransformersPromptStyle(str, Enum):
CHAT = "chat"
RAW = "raw"
NONE = "none"
class InlineVlmOptions(BaseVlmOptions):
model_config = ConfigDict(arbitrary_types_allowed=True)
kind: Literal["inline_model_options"] = "inline_model_options"
repo_id: str
revision: str = "main"
trust_remote_code: bool = False
load_in_8bit: bool = True
llm_int8_threshold: float = 6.0
quantized: bool = False
inference_framework: InferenceFramework
transformers_model_type: TransformersModelType = TransformersModelType.AUTOMODEL
transformers_prompt_style: TransformersPromptStyle = TransformersPromptStyle.CHAT
response_format: ResponseFormat
torch_dtype: Optional[str] = None
supported_devices: List[AcceleratorDevice] = [
AcceleratorDevice.CPU,
AcceleratorDevice.CUDA,
AcceleratorDevice.MPS,
]
stop_strings: List[str] = []
custom_stopping_criteria: List[Union[StoppingCriteria, GenerationStopper]] = []
extra_generation_config: Dict[str, Any] = {}
extra_processor_kwargs: Dict[str, Any] = {}
use_kv_cache: bool = True
max_new_tokens: int = 4096
track_generated_tokens: bool = False
track_input_prompt: bool = False
@property
def repo_cache_folder(self) -> str:
return self.repo_id.replace("/", "--")
@deprecated("Use InlineVlmOptions instead.")
class HuggingFaceVlmOptions(InlineVlmOptions):
pass
class ApiVlmOptions(BaseVlmOptions):
model_config = ConfigDict(arbitrary_types_allowed=True)
kind: Literal["api_model_options"] = "api_model_options"
url: AnyUrl = AnyUrl(
"http://localhost:11434/v1/chat/completions"
) # Default to ollama
headers: Dict[str, str] = {}
params: Dict[str, Any] = {}
timeout: float = 60
concurrency: int = 1
response_format: ResponseFormat
stop_strings: List[str] = []
custom_stopping_criteria: List[Union[GenerationStopper]] = []
track_input_prompt: bool = False
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/datamodel/pipeline_options_asr_model.py | docling/datamodel/pipeline_options_asr_model.py | from enum import Enum
from typing import Any, Dict, List, Literal, Optional, Union
from pydantic import AnyUrl, BaseModel
from typing_extensions import deprecated
from docling.datamodel.accelerator_options import AcceleratorDevice
from docling.datamodel.pipeline_options_vlm_model import (
# InferenceFramework,
TransformersModelType,
)
class BaseAsrOptions(BaseModel):
kind: str
# prompt: str
class InferenceAsrFramework(str, Enum):
MLX = "mlx"
# TRANSFORMERS = "transformers" # disabled for now
WHISPER = "whisper"
class InlineAsrOptions(BaseAsrOptions):
kind: Literal["inline_model_options"] = "inline_model_options"
repo_id: str
verbose: bool = False
timestamps: bool = True
temperature: float = 0.0
max_new_tokens: int = 256
max_time_chunk: float = 30.0
torch_dtype: Optional[str] = None
supported_devices: List[AcceleratorDevice] = [
AcceleratorDevice.CPU,
AcceleratorDevice.CUDA,
AcceleratorDevice.MPS,
]
@property
def repo_cache_folder(self) -> str:
return self.repo_id.replace("/", "--")
class InlineAsrNativeWhisperOptions(InlineAsrOptions):
inference_framework: InferenceAsrFramework = InferenceAsrFramework.WHISPER
language: str = "en"
supported_devices: List[AcceleratorDevice] = [
AcceleratorDevice.CPU,
AcceleratorDevice.CUDA,
]
word_timestamps: bool = True
class InlineAsrMlxWhisperOptions(InlineAsrOptions):
"""
MLX Whisper options for Apple Silicon optimization.
Uses mlx-whisper library for efficient inference on Apple Silicon devices.
"""
inference_framework: InferenceAsrFramework = InferenceAsrFramework.MLX
language: str = "en"
task: str = "transcribe" # "transcribe" or "translate"
supported_devices: List[AcceleratorDevice] = [
AcceleratorDevice.MPS, # MLX is optimized for Apple Silicon
]
word_timestamps: bool = True
no_speech_threshold: float = 0.6 # Threshold for detecting speech
logprob_threshold: float = -1.0 # Log probability threshold
compression_ratio_threshold: float = 2.4 # Compression ratio threshold
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/datamodel/backend_options.py | docling/datamodel/backend_options.py | from pathlib import PurePath
from typing import Annotated, Literal, Optional, Union
from pydantic import AnyUrl, BaseModel, Field, SecretStr
class BaseBackendOptions(BaseModel):
"""Common options for all declarative document backends."""
enable_remote_fetch: bool = Field(
False, description="Enable remote resource fetching."
)
enable_local_fetch: bool = Field(
False, description="Enable local resource fetching."
)
class DeclarativeBackendOptions(BaseBackendOptions):
"""Default backend options for a declarative document backend."""
kind: Literal["declarative"] = Field("declarative", exclude=True, repr=False)
class HTMLBackendOptions(BaseBackendOptions):
"""Options specific to the HTML backend.
This class can be extended to include options specific to HTML processing.
"""
kind: Literal["html"] = Field("html", exclude=True, repr=False)
fetch_images: bool = Field(
False,
description=(
"Whether the backend should access remote or local resources to parse "
"images in an HTML document."
),
)
source_uri: Optional[Union[AnyUrl, PurePath]] = Field(
None,
description=(
"The URI that originates the HTML document. If provided, the backend "
"will use it to resolve relative paths in the HTML document."
),
)
add_title: bool = Field(
True, description="Add the HTML title tag as furniture in the DoclingDocument."
)
infer_furniture: bool = Field(
True, description="Infer all the content before the first header as furniture."
)
class MarkdownBackendOptions(BaseBackendOptions):
"""Options specific to the Markdown backend."""
kind: Literal["md"] = Field("md", exclude=True, repr=False)
fetch_images: bool = Field(
False,
description=(
"Whether the backend should access remote or local resources to parse "
"images in the markdown document."
),
)
source_uri: Optional[Union[AnyUrl, PurePath]] = Field(
None,
description=(
"The URI that originates the markdown document. If provided, the backend "
"will use it to resolve relative paths in the markdown document."
),
)
class PdfBackendOptions(BaseBackendOptions):
"""Backend options for pdf document backends."""
kind: Literal["pdf"] = Field("pdf", exclude=True, repr=False)
password: Optional[SecretStr] = None
class MsExcelBackendOptions(BaseBackendOptions):
"""Options specific to the MS Excel backend."""
kind: Literal["xlsx"] = Field("xlsx", exclude=True, repr=False)
treat_singleton_as_text: bool = Field(
False,
description=(
"Whether to treat singleton cells (1x1 tables with empty neighboring "
"cells) as TextItem instead of TableItem."
),
)
BackendOptions = Annotated[
Union[
DeclarativeBackendOptions,
HTMLBackendOptions,
MarkdownBackendOptions,
PdfBackendOptions,
MsExcelBackendOptions,
],
Field(discriminator="kind"),
]
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/datamodel/pipeline_options.py | docling/datamodel/pipeline_options.py | import logging
from datetime import datetime
from enum import Enum
from pathlib import Path
from typing import Annotated, Any, ClassVar, Dict, List, Literal, Optional, Union
from pydantic import (
AnyUrl,
BaseModel,
ConfigDict,
Field,
)
from typing_extensions import deprecated
from docling.datamodel import asr_model_specs, vlm_model_specs
# Import the following for backwards compatibility
from docling.datamodel.accelerator_options import AcceleratorDevice, AcceleratorOptions
from docling.datamodel.layout_model_specs import (
DOCLING_LAYOUT_EGRET_LARGE,
DOCLING_LAYOUT_EGRET_MEDIUM,
DOCLING_LAYOUT_EGRET_XLARGE,
DOCLING_LAYOUT_HERON,
DOCLING_LAYOUT_HERON_101,
DOCLING_LAYOUT_V2,
LayoutModelConfig,
)
from docling.datamodel.pipeline_options_asr_model import (
InlineAsrOptions,
)
from docling.datamodel.pipeline_options_vlm_model import (
ApiVlmOptions,
InferenceFramework,
InlineVlmOptions,
ResponseFormat,
)
from docling.datamodel.vlm_model_specs import (
GRANITE_VISION_OLLAMA as granite_vision_vlm_ollama_conversion_options,
GRANITE_VISION_TRANSFORMERS as granite_vision_vlm_conversion_options,
NU_EXTRACT_2B_TRANSFORMERS,
SMOLDOCLING_MLX as smoldocling_vlm_mlx_conversion_options,
SMOLDOCLING_TRANSFORMERS as smoldocling_vlm_conversion_options,
VlmModelType,
)
_log = logging.getLogger(__name__)
class BaseOptions(BaseModel):
"""Base class for options."""
kind: ClassVar[str]
class TableFormerMode(str, Enum):
"""Modes for the TableFormer model."""
FAST = "fast"
ACCURATE = "accurate"
class BaseTableStructureOptions(BaseOptions):
"""Base options for table structure models."""
class TableStructureOptions(BaseTableStructureOptions):
"""Options for the table structure."""
kind: ClassVar[str] = "docling_tableformer"
do_cell_matching: bool = (
True
# True: Matches predictions back to PDF cells. Can break table output if PDF cells
# are merged across table columns.
# False: Let table structure model define the text cells, ignore PDF cells.
)
mode: TableFormerMode = TableFormerMode.ACCURATE
class OcrOptions(BaseOptions):
"""OCR options."""
lang: Annotated[
List[str],
Field(
description="List of OCR languages to use. The format must match the values of the OCR engine of choice.",
examples=[["deu", "eng"]],
),
]
force_full_page_ocr: Annotated[
bool,
Field(
description="If enabled, a full-page OCR is always applied.",
examples=[False],
),
] = False
bitmap_area_threshold: Annotated[
float,
Field(
description="Percentage of the page area for a bitmap to be processed with OCR.",
examples=[0.05, 0.1],
),
] = 0.05
class OcrAutoOptions(OcrOptions):
"""Options for pick OCR engine automatically."""
kind: ClassVar[Literal["auto"]] = "auto"
lang: Annotated[
List[str],
Field(
description="The automatic OCR engine will use the default values of the engine. Please specify the engine explicitly to change the language selection.",
),
] = []
class RapidOcrOptions(OcrOptions):
"""Options for the RapidOCR engine."""
kind: ClassVar[Literal["rapidocr"]] = "rapidocr"
# English and chinese are the most commly used models and have been tested with RapidOCR.
lang: List[str] = [
"english",
"chinese",
]
# However, language as a parameter is not supported by rapidocr yet
# and hence changing this options doesn't affect anything.
# For more details on supported languages by RapidOCR visit
# https://rapidai.github.io/RapidOCRDocs/blog/2022/09/28/%E6%94%AF%E6%8C%81%E8%AF%86%E5%88%AB%E8%AF%AD%E8%A8%80/
# For more details on the following options visit
# https://rapidai.github.io/RapidOCRDocs/install_usage/api/RapidOCR/
# https://rapidai.github.io/RapidOCRDocs/main/install_usage/rapidocr/usage/#__tabbed_3_4
backend: Literal["onnxruntime", "openvino", "paddle", "torch"] = "onnxruntime"
text_score: float = 0.5 # same default as rapidocr
use_det: Optional[bool] = None # same default as rapidocr
use_cls: Optional[bool] = None # same default as rapidocr
use_rec: Optional[bool] = None # same default as rapidocr
print_verbose: bool = False # same default as rapidocr
det_model_path: Optional[str] = None # same default as rapidocr
cls_model_path: Optional[str] = None # same default as rapidocr
rec_model_path: Optional[str] = None # same default as rapidocr
rec_keys_path: Optional[str] = None # same default as rapidocr
rec_font_path: Optional[str] = None # Deprecated, please use font_path instead
font_path: Optional[str] = None # same default as rapidocr
# Dictionary to overwrite or pass-through additional parameters
rapidocr_params: Dict[str, Any] = Field(default_factory=dict)
model_config = ConfigDict(
extra="forbid",
)
class EasyOcrOptions(OcrOptions):
"""Options for the EasyOCR engine."""
kind: ClassVar[Literal["easyocr"]] = "easyocr"
lang: List[str] = ["fr", "de", "es", "en"]
use_gpu: Optional[bool] = None
confidence_threshold: float = 0.5
model_storage_directory: Optional[str] = None
recog_network: Optional[str] = "standard"
download_enabled: bool = True
suppress_mps_warnings: bool = True
model_config = ConfigDict(
extra="forbid",
protected_namespaces=(),
)
class TesseractCliOcrOptions(OcrOptions):
"""Options for the TesseractCli engine."""
kind: ClassVar[Literal["tesseract"]] = "tesseract"
lang: List[str] = ["fra", "deu", "spa", "eng"]
tesseract_cmd: str = "tesseract"
path: Optional[str] = None
psm: Optional[int] = (
None # Page Segmentation Mode (0-13), defaults to tesseract's default
)
model_config = ConfigDict(
extra="forbid",
)
class TesseractOcrOptions(OcrOptions):
"""Options for the Tesseract engine."""
kind: ClassVar[Literal["tesserocr"]] = "tesserocr"
lang: List[str] = ["fra", "deu", "spa", "eng"]
path: Optional[str] = None
psm: Optional[int] = (
None # Page Segmentation Mode (0-13), defaults to tesseract's default
)
model_config = ConfigDict(
extra="forbid",
)
class OcrMacOptions(OcrOptions):
"""Options for the Mac OCR engine."""
kind: ClassVar[Literal["ocrmac"]] = "ocrmac"
lang: List[str] = ["fr-FR", "de-DE", "es-ES", "en-US"]
recognition: str = "accurate"
framework: str = "vision"
model_config = ConfigDict(
extra="forbid",
)
class PictureDescriptionBaseOptions(BaseOptions):
batch_size: int = 8
scale: float = 2
picture_area_threshold: float = (
0.05 # percentage of the area for a picture to processed with the models
)
class PictureDescriptionApiOptions(PictureDescriptionBaseOptions):
kind: ClassVar[Literal["api"]] = "api"
url: AnyUrl = AnyUrl("http://localhost:8000/v1/chat/completions")
headers: Dict[str, str] = {}
params: Dict[str, Any] = {}
timeout: float = 20
concurrency: int = 1
prompt: str = "Describe this image in a few sentences."
provenance: str = ""
class PictureDescriptionVlmOptions(PictureDescriptionBaseOptions):
kind: ClassVar[Literal["vlm"]] = "vlm"
repo_id: str
prompt: str = "Describe this image in a few sentences."
# Config from here https://huggingface.co/docs/transformers/en/main_classes/text_generation#transformers.GenerationConfig
generation_config: Dict[str, Any] = dict(max_new_tokens=200, do_sample=False)
@property
def repo_cache_folder(self) -> str:
return self.repo_id.replace("/", "--")
# SmolVLM
smolvlm_picture_description = PictureDescriptionVlmOptions(
repo_id="HuggingFaceTB/SmolVLM-256M-Instruct"
)
# GraniteVision
granite_picture_description = PictureDescriptionVlmOptions(
repo_id="ibm-granite/granite-vision-3.3-2b",
prompt="What is shown in this image?",
)
# Define an enum for the backend options
class PdfBackend(str, Enum):
"""Enum of valid PDF backends."""
PYPDFIUM2 = "pypdfium2"
DLPARSE_V1 = "dlparse_v1"
DLPARSE_V2 = "dlparse_v2"
DLPARSE_V4 = "dlparse_v4"
# Define an enum for the ocr engines
@deprecated(
"Use get_ocr_factory().registered_kind to get a list of registered OCR engines."
)
class OcrEngine(str, Enum):
"""Enum of valid OCR engines."""
AUTO = "auto"
EASYOCR = "easyocr"
TESSERACT_CLI = "tesseract_cli"
TESSERACT = "tesseract"
OCRMAC = "ocrmac"
RAPIDOCR = "rapidocr"
class PipelineOptions(BaseOptions):
"""Base pipeline options."""
document_timeout: Annotated[
Optional[float],
Field(
description="Maximum allowed processing time for a document before timing out. If None, no timeout is enforced.",
examples=[10.0, 20.0],
),
] = None
accelerator_options: Annotated[
AcceleratorOptions,
Field(
description="Configuration options for hardware acceleration (e.g., GPU or optimized execution settings).",
),
] = AcceleratorOptions()
enable_remote_services: Annotated[
bool,
Field(
description="Enable calling external APIs or cloud services during pipeline execution.",
examples=[False],
),
] = False
allow_external_plugins: Annotated[
bool,
Field(
description="Allow loading external third-party plugins or modules. Disabled by default for safety.",
examples=[False],
),
] = False
artifacts_path: Annotated[
Optional[Union[Path, str]],
Field(
description="Filesystem path where pipeline artifacts should be stored. If None, artifacts will be fetched. You can use the utility `docling-tools models download` to pre-fetch the model artifacts.",
examples=["./artifacts", "/tmp/docling_outputs"],
),
] = None
class ConvertPipelineOptions(PipelineOptions):
"""Base convert pipeline options."""
do_picture_classification: bool = False # True: classify pictures in documents
do_picture_description: bool = False # True: run describe pictures in documents
picture_description_options: PictureDescriptionBaseOptions = (
smolvlm_picture_description
)
class PaginatedPipelineOptions(ConvertPipelineOptions):
images_scale: float = 1.0
generate_page_images: bool = False
generate_picture_images: bool = False
class VlmPipelineOptions(PaginatedPipelineOptions):
generate_page_images: bool = True
force_backend_text: bool = (
False # (To be used with vlms, or other generative models)
)
# If True, text from backend will be used instead of generated text
vlm_options: Union[InlineVlmOptions, ApiVlmOptions] = (
vlm_model_specs.GRANITEDOCLING_TRANSFORMERS
)
class BaseLayoutOptions(BaseOptions):
"""Base options for layout models."""
keep_empty_clusters: bool = (
False # Whether to keep clusters that contain no text cells
)
skip_cell_assignment: bool = (
False # Skip cell-to-cluster assignment for VLM-only processing
)
class LayoutOptions(BaseLayoutOptions):
"""Options for layout processing."""
kind: ClassVar[str] = "docling_layout_default"
create_orphan_clusters: bool = True # Whether to create clusters for orphaned cells
model_spec: LayoutModelConfig = DOCLING_LAYOUT_HERON
class AsrPipelineOptions(PipelineOptions):
asr_options: Union[InlineAsrOptions] = asr_model_specs.WHISPER_TINY
class VlmExtractionPipelineOptions(PipelineOptions):
"""Options for extraction pipeline."""
vlm_options: Union[InlineVlmOptions] = NU_EXTRACT_2B_TRANSFORMERS
class PdfPipelineOptions(PaginatedPipelineOptions):
"""Options for the PDF pipeline."""
do_table_structure: bool = True # True: perform table structure extraction
do_ocr: bool = True # True: perform OCR, replace programmatic PDF text
do_code_enrichment: bool = False # True: perform code OCR
do_formula_enrichment: bool = False # True: perform formula OCR, return Latex code
force_backend_text: bool = (
False # (To be used with vlms, or other generative models)
)
# If True, text from backend will be used instead of generated text
table_structure_options: BaseTableStructureOptions = TableStructureOptions()
ocr_options: OcrOptions = OcrAutoOptions()
layout_options: BaseLayoutOptions = LayoutOptions()
images_scale: float = 1.0
generate_page_images: bool = False
generate_picture_images: bool = False
generate_table_images: bool = Field(
default=False,
deprecated=(
"Field `generate_table_images` is deprecated. "
"To obtain table images, set `PdfPipelineOptions.generate_page_images = True` "
"before conversion and then use the `TableItem.get_image` function."
),
)
generate_parsed_pages: bool = False
### Arguments for threaded PDF pipeline with batching and backpressure control
# Batch sizes for different stages
ocr_batch_size: int = 4
layout_batch_size: int = 4
table_batch_size: int = 4
# Timing control
batch_polling_interval_seconds: float = 0.5
# Backpressure and queue control
queue_max_size: int = 100
class ProcessingPipeline(str, Enum):
LEGACY = "legacy"
STANDARD = "standard"
VLM = "vlm"
ASR = "asr"
class ThreadedPdfPipelineOptions(PdfPipelineOptions):
"""Pipeline options for the threaded PDF pipeline with batching and backpressure control"""
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/datamodel/asr_model_specs.py | docling/datamodel/asr_model_specs.py | import logging
from enum import Enum
from pydantic import (
AnyUrl,
)
from docling.datamodel.accelerator_options import AcceleratorDevice
from docling.datamodel.pipeline_options_asr_model import (
# AsrResponseFormat,
# ApiAsrOptions,
InferenceAsrFramework,
InlineAsrMlxWhisperOptions,
InlineAsrNativeWhisperOptions,
TransformersModelType,
)
_log = logging.getLogger(__name__)
def _get_whisper_tiny_model():
"""
Get the best Whisper Tiny model for the current hardware.
Automatically selects MLX Whisper Tiny for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Tiny.
"""
# Check if MPS is available (Apple Silicon)
try:
import torch
has_mps = torch.backends.mps.is_built() and torch.backends.mps.is_available()
except ImportError:
has_mps = False
# Check if mlx-whisper is available
try:
import mlx_whisper # type: ignore
has_mlx_whisper = True
except ImportError:
has_mlx_whisper = False
# Use MLX Whisper if both MPS and mlx-whisper are available
if has_mps and has_mlx_whisper:
return InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-tiny-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
else:
return InlineAsrNativeWhisperOptions(
repo_id="tiny",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
# Create the model instance
WHISPER_TINY = _get_whisper_tiny_model()
def _get_whisper_small_model():
"""
Get the best Whisper Small model for the current hardware.
Automatically selects MLX Whisper Small for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Small.
"""
# Check if MPS is available (Apple Silicon)
try:
import torch
has_mps = torch.backends.mps.is_built() and torch.backends.mps.is_available()
except ImportError:
has_mps = False
# Check if mlx-whisper is available
try:
import mlx_whisper # type: ignore
has_mlx_whisper = True
except ImportError:
has_mlx_whisper = False
# Use MLX Whisper if both MPS and mlx-whisper are available
if has_mps and has_mlx_whisper:
return InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-small-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
else:
return InlineAsrNativeWhisperOptions(
repo_id="small",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
# Create the model instance
WHISPER_SMALL = _get_whisper_small_model()
def _get_whisper_medium_model():
"""
Get the best Whisper Medium model for the current hardware.
Automatically selects MLX Whisper Medium for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Medium.
"""
# Check if MPS is available (Apple Silicon)
try:
import torch
has_mps = torch.backends.mps.is_built() and torch.backends.mps.is_available()
except ImportError:
has_mps = False
# Check if mlx-whisper is available
try:
import mlx_whisper # type: ignore
has_mlx_whisper = True
except ImportError:
has_mlx_whisper = False
# Use MLX Whisper if both MPS and mlx-whisper are available
if has_mps and has_mlx_whisper:
return InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-medium-mlx-8bit",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
else:
return InlineAsrNativeWhisperOptions(
repo_id="medium",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
# Create the model instance
WHISPER_MEDIUM = _get_whisper_medium_model()
def _get_whisper_base_model():
"""
Get the best Whisper Base model for the current hardware.
Automatically selects MLX Whisper Base for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Base.
"""
# Check if MPS is available (Apple Silicon)
try:
import torch
has_mps = torch.backends.mps.is_built() and torch.backends.mps.is_available()
except ImportError:
has_mps = False
# Check if mlx-whisper is available
try:
import mlx_whisper # type: ignore
has_mlx_whisper = True
except ImportError:
has_mlx_whisper = False
# Use MLX Whisper if both MPS and mlx-whisper are available
if has_mps and has_mlx_whisper:
return InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-base-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
else:
return InlineAsrNativeWhisperOptions(
repo_id="base",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
# Create the model instance
WHISPER_BASE = _get_whisper_base_model()
def _get_whisper_large_model():
"""
Get the best Whisper Large model for the current hardware.
Automatically selects MLX Whisper Large for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Large.
"""
# Check if MPS is available (Apple Silicon)
try:
import torch
has_mps = torch.backends.mps.is_built() and torch.backends.mps.is_available()
except ImportError:
has_mps = False
# Check if mlx-whisper is available
try:
import mlx_whisper # type: ignore
has_mlx_whisper = True
except ImportError:
has_mlx_whisper = False
# Use MLX Whisper if both MPS and mlx-whisper are available
if has_mps and has_mlx_whisper:
return InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-large-mlx-8bit",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
else:
return InlineAsrNativeWhisperOptions(
repo_id="large",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
# Create the model instance
WHISPER_LARGE = _get_whisper_large_model()
def _get_whisper_turbo_model():
"""
Get the best Whisper Turbo model for the current hardware.
Automatically selects MLX Whisper Turbo for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Turbo.
"""
# Check if MPS is available (Apple Silicon)
try:
import torch
has_mps = torch.backends.mps.is_built() and torch.backends.mps.is_available()
except ImportError:
has_mps = False
# Check if mlx-whisper is available
try:
import mlx_whisper # type: ignore
has_mlx_whisper = True
except ImportError:
has_mlx_whisper = False
# Use MLX Whisper if both MPS and mlx-whisper are available
if has_mps and has_mlx_whisper:
return InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-turbo",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
else:
return InlineAsrNativeWhisperOptions(
repo_id="turbo",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
# Create the model instance
WHISPER_TURBO = _get_whisper_turbo_model()
# Explicit MLX Whisper model options for users who want to force MLX usage
WHISPER_TINY_MLX = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-tiny-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
WHISPER_SMALL_MLX = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-small-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
WHISPER_MEDIUM_MLX = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-medium-mlx-8bit",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
WHISPER_BASE_MLX = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-base-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
WHISPER_LARGE_MLX = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-large-mlx-8bit",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
WHISPER_TURBO_MLX = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-turbo",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
# Explicit Native Whisper model options for users who want to force native usage
WHISPER_TINY_NATIVE = InlineAsrNativeWhisperOptions(
repo_id="tiny",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
WHISPER_SMALL_NATIVE = InlineAsrNativeWhisperOptions(
repo_id="small",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
WHISPER_MEDIUM_NATIVE = InlineAsrNativeWhisperOptions(
repo_id="medium",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
WHISPER_BASE_NATIVE = InlineAsrNativeWhisperOptions(
repo_id="base",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
WHISPER_LARGE_NATIVE = InlineAsrNativeWhisperOptions(
repo_id="large",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
WHISPER_TURBO_NATIVE = InlineAsrNativeWhisperOptions(
repo_id="turbo",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
# Note: The main WHISPER_* models (WHISPER_TURBO, WHISPER_BASE, etc.) automatically
# select the best implementation (MLX on Apple Silicon, Native elsewhere).
# Use the explicit _MLX or _NATIVE variants if you need to force a specific implementation.
class AsrModelType(str, Enum):
# Auto-selecting models (choose best implementation for hardware)
WHISPER_TINY = "whisper_tiny"
WHISPER_SMALL = "whisper_small"
WHISPER_MEDIUM = "whisper_medium"
WHISPER_BASE = "whisper_base"
WHISPER_LARGE = "whisper_large"
WHISPER_TURBO = "whisper_turbo"
# Explicit MLX models (force MLX implementation)
WHISPER_TINY_MLX = "whisper_tiny_mlx"
WHISPER_SMALL_MLX = "whisper_small_mlx"
WHISPER_MEDIUM_MLX = "whisper_medium_mlx"
WHISPER_BASE_MLX = "whisper_base_mlx"
WHISPER_LARGE_MLX = "whisper_large_mlx"
WHISPER_TURBO_MLX = "whisper_turbo_mlx"
# Explicit Native models (force native implementation)
WHISPER_TINY_NATIVE = "whisper_tiny_native"
WHISPER_SMALL_NATIVE = "whisper_small_native"
WHISPER_MEDIUM_NATIVE = "whisper_medium_native"
WHISPER_BASE_NATIVE = "whisper_base_native"
WHISPER_LARGE_NATIVE = "whisper_large_native"
WHISPER_TURBO_NATIVE = "whisper_turbo_native"
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/models/base_model.py | docling/models/base_model.py | import logging
from abc import ABC, abstractmethod
from collections.abc import Iterable
from typing import Any, Generic, Optional, Protocol, Type, Union
import numpy as np
from docling_core.types.doc import (
BoundingBox,
DocItem,
DoclingDocument,
NodeItem,
PictureItem,
)
from PIL.Image import Image
from typing_extensions import TypeVar
from docling.datamodel.base_models import (
ItemAndImageEnrichmentElement,
Page,
VlmPrediction,
)
from docling.datamodel.document import ConversionResult
from docling.datamodel.pipeline_options import BaseOptions
from docling.datamodel.pipeline_options_vlm_model import (
InlineVlmOptions,
TransformersPromptStyle,
)
from docling.datamodel.settings import settings
class BaseModelWithOptions(Protocol):
@classmethod
def get_options_type(cls) -> Type[BaseOptions]: ...
def __init__(self, *, options: BaseOptions, **kwargs): ...
class BasePageModel(ABC):
@abstractmethod
def __call__(
self, conv_res: ConversionResult, page_batch: Iterable[Page]
) -> Iterable[Page]:
pass
class BaseVlmModel(ABC):
"""Base class for Vision-Language Models that adds image processing capability."""
@abstractmethod
def process_images(
self,
image_batch: Iterable[Union[Image, np.ndarray]],
prompt: Union[str, list[str]],
) -> Iterable[VlmPrediction]:
"""Process raw images without page metadata.
Args:
image_batch: Iterable of PIL Images or numpy arrays
prompt: Either:
- str: Single prompt used for all images
- list[str]: List of prompts (one per image, must match image count)
Raises:
ValueError: If prompt list length doesn't match image count.
"""
class BaseVlmPageModel(BasePageModel, BaseVlmModel):
"""Base implementation for VLM models that inherit from BasePageModel.
Provides a default __call__ implementation that extracts images from pages,
processes them using process_images, and attaches results back to pages.
"""
# Type annotations for attributes that subclasses must initialize
vlm_options: InlineVlmOptions
processor: Any
def _build_prompt_safe(self, page: Page) -> str:
"""Build prompt with backward compatibility for user overrides.
Tries to call build_prompt with _internal_page parameter (for layout-aware
pipelines). Falls back to basic call if user override doesn't accept it.
Args:
page: The full Page object with layout predictions and parsed_page.
Returns:
The formatted prompt string.
"""
try:
return self.vlm_options.build_prompt(page.parsed_page, _internal_page=page)
except TypeError:
# User override doesn't accept _internal_page - fall back to basic call
return self.vlm_options.build_prompt(page.parsed_page)
@abstractmethod
def __call__(
self, conv_res: ConversionResult, page_batch: Iterable[Page]
) -> Iterable[Page]:
"""Extract images from pages, process them, and attach results back."""
def formulate_prompt(self, user_prompt: str) -> str:
"""Formulate a prompt for the VLM."""
_log = logging.getLogger(__name__)
if self.vlm_options.transformers_prompt_style == TransformersPromptStyle.RAW:
return user_prompt
elif self.vlm_options.transformers_prompt_style == TransformersPromptStyle.NONE:
return ""
elif self.vlm_options.repo_id == "microsoft/Phi-4-multimodal-instruct":
_log.debug("Using specialized prompt for Phi-4")
# Note: This might need adjustment for VLLM vs transformers
user_prompt_prefix = "<|user|>"
assistant_prompt = "<|assistant|>"
prompt_suffix = "<|end|>"
prompt = f"{user_prompt_prefix}<|image_1|>{user_prompt}{prompt_suffix}{assistant_prompt}"
_log.debug(f"prompt for {self.vlm_options.repo_id}: {prompt}")
return prompt
elif self.vlm_options.transformers_prompt_style == TransformersPromptStyle.CHAT:
messages = [
{
"role": "user",
"content": [
{
"type": "text",
"text": "This is a page from a document.",
},
{"type": "image"},
{"type": "text", "text": user_prompt},
],
}
]
prompt = self.processor.apply_chat_template(
messages, add_generation_prompt=True
)
return prompt
raise RuntimeError(
f"Unknown prompt style `{self.vlm_options.transformers_prompt_style}`. Valid values are {', '.join(s.value for s in TransformersPromptStyle)}."
)
EnrichElementT = TypeVar("EnrichElementT", default=NodeItem)
class GenericEnrichmentModel(ABC, Generic[EnrichElementT]):
elements_batch_size: int = settings.perf.elements_batch_size
@abstractmethod
def is_processable(self, doc: DoclingDocument, element: NodeItem) -> bool:
pass
@abstractmethod
def prepare_element(
self, conv_res: ConversionResult, element: NodeItem
) -> Optional[EnrichElementT]:
pass
@abstractmethod
def __call__(
self, doc: DoclingDocument, element_batch: Iterable[EnrichElementT]
) -> Iterable[NodeItem]:
pass
class BaseEnrichmentModel(GenericEnrichmentModel[NodeItem]):
def prepare_element(
self, conv_res: ConversionResult, element: NodeItem
) -> Optional[NodeItem]:
if self.is_processable(doc=conv_res.document, element=element):
return element
return None
class BaseItemAndImageEnrichmentModel(
GenericEnrichmentModel[ItemAndImageEnrichmentElement]
):
images_scale: float
expansion_factor: float = 0.0
def prepare_element(
self, conv_res: ConversionResult, element: NodeItem
) -> Optional[ItemAndImageEnrichmentElement]:
if not self.is_processable(doc=conv_res.document, element=element):
return None
assert isinstance(element, DocItem)
# Allow the case of documents without page images but embedded images (e.g. Word and HTML docs)
if isinstance(element, PictureItem):
embedded_im = element.get_image(conv_res.document)
if embedded_im is not None:
return ItemAndImageEnrichmentElement(item=element, image=embedded_im)
elif len(element.prov) == 0:
return None
# Crop the image form the page
element_prov = element.prov[0]
bbox = element_prov.bbox
width = bbox.r - bbox.l
height = bbox.t - bbox.b
# TODO: move to a utility in the BoundingBox class
expanded_bbox = BoundingBox(
l=bbox.l - width * self.expansion_factor,
t=bbox.t + height * self.expansion_factor,
r=bbox.r + width * self.expansion_factor,
b=bbox.b - height * self.expansion_factor,
coord_origin=bbox.coord_origin,
)
page_ix = element_prov.page_no - conv_res.pages[0].page_no - 1
cropped_image = conv_res.pages[page_ix].get_image(
scale=self.images_scale, cropbox=expanded_bbox
)
# Allow for images being embedded without the page backend or page images
if cropped_image is None and isinstance(element, PictureItem):
embedded_im = element.get_image(conv_res.document)
if embedded_im is not None:
return ItemAndImageEnrichmentElement(item=element, image=embedded_im)
else:
return None
# Return the proper cropped image
return ItemAndImageEnrichmentElement(item=element, image=cropped_image)
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/models/table_structure_model.py | docling/models/table_structure_model.py | import copy
import warnings
from collections.abc import Iterable, Sequence
from pathlib import Path
from typing import Optional
import numpy
from docling_core.types.doc import BoundingBox, DocItemLabel, TableCell
from docling_core.types.doc.page import (
BoundingRectangle,
TextCellUnit,
)
from PIL import ImageDraw
from docling.datamodel.accelerator_options import AcceleratorDevice, AcceleratorOptions
from docling.datamodel.base_models import Page, Table, TableStructurePrediction
from docling.datamodel.document import ConversionResult
from docling.datamodel.pipeline_options import (
TableFormerMode,
TableStructureOptions,
)
from docling.datamodel.settings import settings
from docling.models.base_table_model import BaseTableStructureModel
from docling.models.utils.hf_model_download import download_hf_model
from docling.utils.accelerator_utils import decide_device
from docling.utils.profiling import TimeRecorder
class TableStructureModel(BaseTableStructureModel):
_model_repo_folder = "docling-project--docling-models"
_model_path = "model_artifacts/tableformer"
def __init__(
self,
enabled: bool,
artifacts_path: Optional[Path],
options: TableStructureOptions,
accelerator_options: AcceleratorOptions,
):
self.options = options
self.do_cell_matching = self.options.do_cell_matching
self.mode = self.options.mode
self.enabled = enabled
if self.enabled:
if artifacts_path is None:
artifacts_path = self.download_models() / self._model_path
else:
# will become the default in the future
if (artifacts_path / self._model_repo_folder).exists():
artifacts_path = (
artifacts_path / self._model_repo_folder / self._model_path
)
elif (artifacts_path / self._model_path).exists():
warnings.warn(
"The usage of artifacts_path containing directly "
f"{self._model_path} is deprecated. Please point "
"the artifacts_path to the parent containing "
f"the {self._model_repo_folder} folder.",
DeprecationWarning,
stacklevel=3,
)
artifacts_path = artifacts_path / self._model_path
if self.mode == TableFormerMode.ACCURATE:
artifacts_path = artifacts_path / "accurate"
else:
artifacts_path = artifacts_path / "fast"
# Third Party
import docling_ibm_models.tableformer.common as c
from docling_ibm_models.tableformer.data_management.tf_predictor import (
TFPredictor,
)
device = decide_device(accelerator_options.device)
# Disable MPS here, until we know why it makes things slower.
if device == AcceleratorDevice.MPS.value:
device = AcceleratorDevice.CPU.value
self.tm_config = c.read_config(f"{artifacts_path}/tm_config.json")
self.tm_config["model"]["save_dir"] = artifacts_path
self.tm_model_type = self.tm_config["model"]["type"]
self.tf_predictor = TFPredictor(
self.tm_config, device, accelerator_options.num_threads
)
self.scale = 2.0 # Scale up table input images to 144 dpi
@classmethod
def get_options_type(cls) -> type[TableStructureOptions]:
return TableStructureOptions
@staticmethod
def download_models(
local_dir: Optional[Path] = None, force: bool = False, progress: bool = False
) -> Path:
return download_hf_model(
repo_id="docling-project/docling-models",
revision="v2.3.0",
local_dir=local_dir,
force=force,
progress=progress,
)
def draw_table_and_cells(
self,
conv_res: ConversionResult,
page: Page,
tbl_list: Iterable[Table],
show: bool = False,
):
assert page._backend is not None
assert page.size is not None
image = (
page._backend.get_page_image()
) # make new image to avoid drawing on the saved ones
scale_x = image.width / page.size.width
scale_y = image.height / page.size.height
draw = ImageDraw.Draw(image)
for table_element in tbl_list:
x0, y0, x1, y1 = table_element.cluster.bbox.as_tuple()
y0 *= scale_y
y1 *= scale_y
x0 *= scale_x
x1 *= scale_x
draw.rectangle([(x0, y0), (x1, y1)], outline="red")
for cell in table_element.cluster.cells:
x0, y0, x1, y1 = cell.rect.to_bounding_box().as_tuple()
x0 *= scale_x
x1 *= scale_x
y0 *= scale_y
y1 *= scale_y
draw.rectangle([(x0, y0), (x1, y1)], outline="green")
for tc in table_element.table_cells:
if tc.bbox is not None:
x0, y0, x1, y1 = tc.bbox.as_tuple()
x0 *= scale_x
x1 *= scale_x
y0 *= scale_y
y1 *= scale_y
if tc.column_header:
width = 3
else:
width = 1
draw.rectangle([(x0, y0), (x1, y1)], outline="blue", width=width)
draw.text(
(x0 + 3, y0 + 3),
text=f"{tc.start_row_offset_idx}, {tc.start_col_offset_idx}",
fill="black",
)
if show:
image.show()
else:
out_path: Path = (
Path(settings.debug.debug_output_path)
/ f"debug_{conv_res.input.file.stem}"
)
out_path.mkdir(parents=True, exist_ok=True)
out_file = out_path / f"table_struct_page_{page.page_no:05}.png"
image.save(str(out_file), format="png")
def predict_tables(
self,
conv_res: ConversionResult,
pages: Sequence[Page],
) -> Sequence[TableStructurePrediction]:
pages = list(pages)
predictions: list[TableStructurePrediction] = []
for page in pages:
assert page._backend is not None
if not page._backend.is_valid():
existing_prediction = (
page.predictions.tablestructure or TableStructurePrediction()
)
page.predictions.tablestructure = existing_prediction
predictions.append(existing_prediction)
continue
with TimeRecorder(conv_res, "table_structure"):
assert page.predictions.layout is not None
assert page.size is not None
table_prediction = TableStructurePrediction()
page.predictions.tablestructure = table_prediction
in_tables = [
(
cluster,
[
round(cluster.bbox.l) * self.scale,
round(cluster.bbox.t) * self.scale,
round(cluster.bbox.r) * self.scale,
round(cluster.bbox.b) * self.scale,
],
)
for cluster in page.predictions.layout.clusters
if cluster.label
in [DocItemLabel.TABLE, DocItemLabel.DOCUMENT_INDEX]
]
if not in_tables:
predictions.append(table_prediction)
continue
page_input = {
"width": page.size.width * self.scale,
"height": page.size.height * self.scale,
"image": numpy.asarray(page.get_image(scale=self.scale)),
}
for table_cluster, tbl_box in in_tables:
# Check if word-level cells are available from backend:
sp = page._backend.get_segmented_page()
if sp is not None:
tcells = sp.get_cells_in_bbox(
cell_unit=TextCellUnit.WORD,
bbox=table_cluster.bbox,
)
if len(tcells) == 0:
# In case word-level cells yield empty
tcells = table_cluster.cells
else:
# Otherwise - we use normal (line/phrase) cells
tcells = table_cluster.cells
tokens = []
for c in tcells:
# Only allow non empty strings (spaces) into the cells of a table
if len(c.text.strip()) > 0:
new_cell = copy.deepcopy(c)
new_cell.rect = BoundingRectangle.from_bounding_box(
new_cell.rect.to_bounding_box().scaled(scale=self.scale)
)
tokens.append(
{
"id": new_cell.index,
"text": new_cell.text,
"bbox": new_cell.rect.to_bounding_box().model_dump(),
}
)
page_input["tokens"] = tokens
tf_output = self.tf_predictor.multi_table_predict(
page_input, [tbl_box], do_matching=self.do_cell_matching
)
table_out = tf_output[0]
table_cells = []
for element in table_out["tf_responses"]:
if not self.do_cell_matching:
the_bbox = BoundingBox.model_validate(
element["bbox"]
).scaled(1 / self.scale)
text_piece = page._backend.get_text_in_rect(the_bbox)
element["bbox"]["token"] = text_piece
tc = TableCell.model_validate(element)
if tc.bbox is not None:
tc.bbox = tc.bbox.scaled(1 / self.scale)
table_cells.append(tc)
assert "predict_details" in table_out
# Retrieving cols/rows, after post processing:
num_rows = table_out["predict_details"].get("num_rows", 0)
num_cols = table_out["predict_details"].get("num_cols", 0)
otsl_seq = (
table_out["predict_details"]
.get("prediction", {})
.get("rs_seq", [])
)
tbl = Table(
otsl_seq=otsl_seq,
table_cells=table_cells,
num_rows=num_rows,
num_cols=num_cols,
id=table_cluster.id,
page_no=page.page_no,
cluster=table_cluster,
label=table_cluster.label,
)
table_prediction.table_map[table_cluster.id] = tbl
if settings.debug.visualize_tables:
self.draw_table_and_cells(
conv_res,
page,
page.predictions.tablestructure.table_map.values(),
)
predictions.append(table_prediction)
return predictions
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/models/document_picture_classifier.py | docling/models/document_picture_classifier.py | from collections.abc import Iterable
from pathlib import Path
from typing import List, Literal, Optional, Union
import numpy as np
from docling_core.types.doc import (
DoclingDocument,
NodeItem,
PictureClassificationClass,
PictureClassificationData,
PictureItem,
)
from PIL import Image
from pydantic import BaseModel
from docling.datamodel.accelerator_options import AcceleratorOptions
from docling.datamodel.base_models import ItemAndImageEnrichmentElement
from docling.models.base_model import BaseItemAndImageEnrichmentModel
from docling.models.utils.hf_model_download import download_hf_model
from docling.utils.accelerator_utils import decide_device
class DocumentPictureClassifierOptions(BaseModel):
"""
Options for configuring the DocumentPictureClassifier.
Attributes
----------
kind : Literal["document_picture_classifier"]
Identifier for the type of classifier.
"""
kind: Literal["document_picture_classifier"] = "document_picture_classifier"
class DocumentPictureClassifier(BaseItemAndImageEnrichmentModel):
"""
A model for classifying pictures in documents.
This class enriches document pictures with predicted classifications
based on a predefined set of classes.
Attributes
----------
enabled : bool
Whether the classifier is enabled for use.
options : DocumentPictureClassifierOptions
Configuration options for the classifier.
document_picture_classifier : DocumentPictureClassifierPredictor
The underlying prediction model, loaded if the classifier is enabled.
Methods
-------
__init__(enabled, artifacts_path, options, accelerator_options)
Initializes the classifier with specified configurations.
is_processable(doc, element)
Checks if the given element can be processed by the classifier.
__call__(doc, element_batch)
Processes a batch of elements and adds classification annotations.
"""
_model_repo_folder = "docling-project--DocumentFigureClassifier"
images_scale = 2
def __init__(
self,
enabled: bool,
artifacts_path: Optional[Path],
options: DocumentPictureClassifierOptions,
accelerator_options: AcceleratorOptions,
):
"""
Initializes the DocumentPictureClassifier.
Parameters
----------
enabled : bool
Indicates whether the classifier is enabled.
artifacts_path : Optional[Union[Path, str]],
Path to the directory containing model artifacts.
options : DocumentPictureClassifierOptions
Configuration options for the classifier.
accelerator_options : AcceleratorOptions
Options for configuring the device and parallelism.
"""
self.enabled = enabled
self.options = options
if self.enabled:
device = decide_device(accelerator_options.device)
from docling_ibm_models.document_figure_classifier_model.document_figure_classifier_predictor import (
DocumentFigureClassifierPredictor,
)
if artifacts_path is None:
artifacts_path = self.download_models()
else:
artifacts_path = artifacts_path / self._model_repo_folder
self.document_picture_classifier = DocumentFigureClassifierPredictor(
artifacts_path=str(artifacts_path),
device=device,
num_threads=accelerator_options.num_threads,
)
@staticmethod
def download_models(
local_dir: Optional[Path] = None, force: bool = False, progress: bool = False
) -> Path:
return download_hf_model(
repo_id="docling-project/DocumentFigureClassifier",
revision="v1.0.1",
local_dir=local_dir,
force=force,
progress=progress,
)
def is_processable(self, doc: DoclingDocument, element: NodeItem) -> bool:
"""
Determines if the given element can be processed by the classifier.
Parameters
----------
doc : DoclingDocument
The document containing the element.
element : NodeItem
The element to be checked.
Returns
-------
bool
True if the element is a PictureItem and processing is enabled; False otherwise.
"""
return self.enabled and isinstance(element, PictureItem)
def __call__(
self,
doc: DoclingDocument,
element_batch: Iterable[ItemAndImageEnrichmentElement],
) -> Iterable[NodeItem]:
"""
Processes a batch of elements and enriches them with classification predictions.
Parameters
----------
doc : DoclingDocument
The document containing the elements to be processed.
element_batch : Iterable[ItemAndImageEnrichmentElement]
A batch of pictures to classify.
Returns
-------
Iterable[NodeItem]
An iterable of NodeItem objects after processing. The field
'data.classification' is added containing the classification for each picture.
"""
if not self.enabled:
for element in element_batch:
yield element.item
return
images: List[Union[Image.Image, np.ndarray]] = []
elements: List[PictureItem] = []
for el in element_batch:
assert isinstance(el.item, PictureItem)
elements.append(el.item)
images.append(el.image)
outputs = self.document_picture_classifier.predict(images)
for item, output in zip(elements, outputs):
item.annotations.append(
PictureClassificationData(
provenance="DocumentPictureClassifier",
predicted_classes=[
PictureClassificationClass(
class_name=pred[0],
confidence=pred[1],
)
for pred in output
],
)
)
yield item
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
docling-project/docling | https://github.com/docling-project/docling/blob/7a4593f41f14ffce3871b11a92fdd56d391ffcd0/docling/models/picture_description_api_model.py | docling/models/picture_description_api_model.py | from collections.abc import Iterable
from concurrent.futures import ThreadPoolExecutor
from pathlib import Path
from typing import Optional, Type, Union
from PIL import Image
from docling.datamodel.accelerator_options import AcceleratorOptions
from docling.datamodel.pipeline_options import (
PictureDescriptionApiOptions,
PictureDescriptionBaseOptions,
)
from docling.exceptions import OperationNotAllowed
from docling.models.picture_description_base_model import PictureDescriptionBaseModel
from docling.utils.api_image_request import api_image_request
class PictureDescriptionApiModel(PictureDescriptionBaseModel):
# elements_batch_size = 4
@classmethod
def get_options_type(cls) -> Type[PictureDescriptionBaseOptions]:
return PictureDescriptionApiOptions
def __init__(
self,
enabled: bool,
enable_remote_services: bool,
artifacts_path: Optional[Union[Path, str]],
options: PictureDescriptionApiOptions,
accelerator_options: AcceleratorOptions,
):
super().__init__(
enabled=enabled,
enable_remote_services=enable_remote_services,
artifacts_path=artifacts_path,
options=options,
accelerator_options=accelerator_options,
)
self.options: PictureDescriptionApiOptions
self.concurrency = self.options.concurrency
if self.enabled:
if not enable_remote_services:
raise OperationNotAllowed(
"Connections to remote services is only allowed when set explicitly. "
"pipeline_options.enable_remote_services=True."
)
def _annotate_images(self, images: Iterable[Image.Image]) -> Iterable[str]:
# Note: technically we could make a batch request here,
# but not all APIs will allow for it. For example, vllm won't allow more than 1.
def _api_request(image):
page_tags, _, _ = api_image_request(
image=image,
prompt=self.options.prompt,
url=self.options.url,
timeout=self.options.timeout,
headers=self.options.headers,
**self.options.params,
)
return page_tags
with ThreadPoolExecutor(max_workers=self.concurrency) as executor:
yield from executor.map(_api_request, images)
| python | MIT | 7a4593f41f14ffce3871b11a92fdd56d391ffcd0 | 2026-01-04T14:39:31.148558Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.