sample_id
stringlengths
21
196
text
stringlengths
105
936k
metadata
dict
category
stringclasses
6 values
django/django:tests/resolve_url/views.py
from django.http import HttpResponse from django.views import View def some_view(request): return HttpResponse("ok") def params_view(request, slug): return HttpResponse(f"Params: {slug}") class SomeView(View): def get(self, request): return HttpResponse("ok") class ParamsView(View): def get(self, request, pk): return HttpResponse(f"Params: {pk}") some_cbv = SomeView.as_view() params_cbv = ParamsView.as_view()
{ "repo_id": "django/django", "file_path": "tests/resolve_url/views.py", "license": "BSD 3-Clause \"New\" or \"Revised\" License", "lines": 14, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
django/django:tests/urlpatterns/lazy_path_urls.py
from django.urls import include, path from django.utils.translation import gettext_lazy as _ from . import views urlpatterns = [ path(_("included_urls/"), include("urlpatterns.included_urls")), path(_("lazy/<slug:slug>/"), views.empty_view, name="lazy"), ]
{ "repo_id": "django/django", "file_path": "tests/urlpatterns/lazy_path_urls.py", "license": "BSD 3-Clause \"New\" or \"Revised\" License", "lines": 7, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
django/django:django/db/models/functions/uuid.py
from django.db import NotSupportedError from django.db.models.expressions import Func from django.db.models.fields import UUIDField class UUID4(Func): function = "UUIDV4" arity = 0 output_field = UUIDField() def as_sql(self, compiler, connection, **extra_context): if connection.features.supports_uuid4_function: return super().as_sql(compiler, connection, **extra_context) raise NotSupportedError("UUID4 is not supported on this database backend.") def as_postgresql(self, compiler, connection, **extra_context): if connection.features.is_postgresql_18: return self.as_sql(compiler, connection, **extra_context) return self.as_sql( compiler, connection, function="GEN_RANDOM_UUID", **extra_context ) def as_mysql(self, compiler, connection, **extra_context): if not connection.features.supports_uuid4_function: if connection.mysql_is_mariadb: raise NotSupportedError("UUID4 requires MariaDB version 11.7 or later.") raise NotSupportedError("UUID4 is not supported on MySQL.") return self.as_sql(compiler, connection, function="UUID_V4", **extra_context) def as_oracle(self, compiler, connection, **extra_context): if not connection.features.supports_uuid4_function: raise NotSupportedError( "UUID4 requires Oracle version 23ai/26ai (23.9) or later." ) return self.as_sql(compiler, connection, function="UUID", **extra_context) class UUID7(Func): function = "UUIDV7" arity = 1 output_field = UUIDField() def __init__(self, shift=None, **extra): super().__init__(shift, **extra) def _parse_expressions(self, *expressions): if expressions[0] is None: expressions = expressions[1:] return super()._parse_expressions(*expressions) def as_sql(self, compiler, connection, **extra_context): if not connection.features.supports_uuid7_function: raise NotSupportedError("UUID7 is not supported on this database backend.") if len(self.source_expressions) == 1: if not connection.features.supports_uuid7_function_shift: msg = ( "The shift argument to UUID7 is not supported " "on this database backend." ) raise NotSupportedError(msg) return super().as_sql(compiler, connection, **extra_context) def as_postgresql(self, compiler, connection, **extra_context): if connection.features.supports_uuid7_function: return self.as_sql(compiler, connection, **extra_context) raise NotSupportedError("UUID7 requires PostgreSQL version 18 or later.") # PY314: When dropping support for 3.14, remove the entire method. def as_sqlite(self, compiler, connection, **extra_context): if connection.features.supports_uuid7_function: return self.as_sql(compiler, connection, **extra_context) raise NotSupportedError( "UUID7 on SQLite requires Python version 3.14 or later." ) def as_mysql(self, compiler, connection, **extra_context): if connection.features.supports_uuid7_function: return self.as_sql( compiler, connection, function="UUID_V7", **extra_context ) if connection.mysql_is_mariadb: raise NotSupportedError("UUID7 requires MariaDB version 11.7 or later.") raise NotSupportedError("UUID7 is not supported on MySQL.")
{ "repo_id": "django/django", "file_path": "django/db/models/functions/uuid.py", "license": "BSD 3-Clause \"New\" or \"Revised\" License", "lines": 69, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
django/django:tests/db_functions/test_uuid.py
import uuid from datetime import datetime, timedelta, timezone from django.db import NotSupportedError, connection from django.db.models.functions import UUID4, UUID7 from django.test import TestCase from django.test.testcases import skipIfDBFeature, skipUnlessDBFeature from .models import UUIDModel class TestUUID(TestCase): @skipUnlessDBFeature("supports_uuid4_function") def test_uuid4(self): m1 = UUIDModel.objects.create() m2 = UUIDModel.objects.create() UUIDModel.objects.update(uuid=UUID4()) m1.refresh_from_db() m2.refresh_from_db() self.assertIsInstance(m1.uuid, uuid.UUID) self.assertEqual(m1.uuid.version, 4) self.assertNotEqual(m1.uuid, m2.uuid) @skipUnlessDBFeature("supports_uuid7_function") def test_uuid7(self): m1 = UUIDModel.objects.create() m2 = UUIDModel.objects.create() UUIDModel.objects.update(uuid=UUID7()) m1.refresh_from_db() m2.refresh_from_db() self.assertIsInstance(m1.uuid, uuid.UUID) self.assertEqual(m1.uuid.version, 7) self.assertNotEqual(m1.uuid, m2.uuid) @skipUnlessDBFeature("supports_uuid7_function_shift") def test_uuid7_shift(self): now = datetime.now(timezone.utc) past = datetime(2005, 11, 16, tzinfo=timezone.utc) shift = past - now m = UUIDModel.objects.create(uuid=UUID7(shift)) self.assertTrue(str(m.uuid).startswith("0107965e-e40"), m.uuid) @skipUnlessDBFeature("supports_uuid7_function_shift") def test_uuid7_shift_duration_field(self): now = datetime.now(timezone.utc) past = datetime(2005, 11, 16, tzinfo=timezone.utc) shift = past - now m = UUIDModel.objects.create(shift=shift) UUIDModel.objects.update(uuid=UUID7("shift")) m.refresh_from_db() self.assertTrue(str(m.uuid).startswith("0107965e-e40"), m.uuid) @skipIfDBFeature("supports_uuid4_function") def test_uuid4_unsupported(self): if connection.vendor == "mysql": if connection.mysql_is_mariadb: msg = "UUID4 requires MariaDB version 11.7 or later." else: msg = "UUID4 is not supported on MySQL." elif connection.vendor == "oracle": msg = "UUID4 requires Oracle version 23ai/26ai (23.9) or later." else: msg = "UUID4 is not supported on this database backend." with self.assertRaisesMessage(NotSupportedError, msg): UUIDModel.objects.update(uuid=UUID4()) @skipIfDBFeature("supports_uuid7_function") def test_uuid7_unsupported(self): if connection.vendor == "mysql": if connection.mysql_is_mariadb: msg = "UUID7 requires MariaDB version 11.7 or later." else: msg = "UUID7 is not supported on MySQL." elif connection.vendor == "postgresql": msg = "UUID7 requires PostgreSQL version 18 or later." elif connection.vendor == "sqlite": msg = "UUID7 on SQLite requires Python version 3.14 or later." else: msg = "UUID7 is not supported on this database backend." with self.assertRaisesMessage(NotSupportedError, msg): UUIDModel.objects.update(uuid=UUID7()) @skipUnlessDBFeature("supports_uuid7_function") @skipIfDBFeature("supports_uuid7_function_shift") def test_uuid7_shift_unsupported(self): msg = "The shift argument to UUID7 is not supported on this database backend." with self.assertRaisesMessage(NotSupportedError, msg): UUIDModel.objects.update(uuid=UUID7(shift=timedelta(hours=12)))
{ "repo_id": "django/django", "file_path": "tests/db_functions/test_uuid.py", "license": "BSD 3-Clause \"New\" or \"Revised\" License", "lines": 78, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
django/django:scripts/archive_eol_stable_branches.py
#! /usr/bin/env python3 import argparse import os import subprocess import sys def run(cmd, *, cwd=None, env=None, dry_run=True): """Run a command with optional dry-run behavior.""" environ = os.environ.copy() if env: environ.update(env) if dry_run: print("[DRY RUN]", " ".join(cmd)) else: print("[EXECUTE]", " ".join(cmd)) try: result = subprocess.check_output( cmd, cwd=cwd, env=environ, stderr=subprocess.STDOUT ) except subprocess.CalledProcessError as e: result = e.output print(" [ERROR]", result) raise else: print(" [RESULT]", result) return result.decode().strip() def validate_env(checkout_dir): if not checkout_dir: sys.exit("Error: checkout directory not provided (--checkout-dir).") if not os.path.exists(checkout_dir): sys.exit(f"Error: checkout directory '{checkout_dir}' does not exist.") if not os.path.isdir(checkout_dir): sys.exit(f"Error: '{checkout_dir}' is not a directory.") def get_remote_branches(checkout_dir, include_fn): """Return list of remote branches filtered by include_fn.""" result = run( ["git", "branch", "--list", "-r"], cwd=checkout_dir, dry_run=False, ) branches = [b.strip() for b in result.split("\n") if b.strip()] return [b for b in branches if include_fn(b)] def get_branch_info(checkout_dir, branch): """Return (commit_hash, last_update_date) for a given branch.""" commit_hash = run(["git", "rev-parse", branch], cwd=checkout_dir, dry_run=False) last_update = run( ["git", "show", branch, "--format=format:%ai", "-s"], cwd=checkout_dir, dry_run=False, ) return commit_hash, last_update def create_tag(checkout_dir, branch, commit_hash, last_update, *, dry_run=True): """Create a tag locally for a given branch at its last update.""" tag_name = branch.replace("origin/", "", 1) msg = f'"Tagged {tag_name} for EOL stable branch removal."' run( ["git", "tag", "--sign", "--message", msg, tag_name, commit_hash], cwd=checkout_dir, env={"GIT_COMMITTER_DATE": last_update}, dry_run=dry_run, ) return tag_name def delete_remote_and_local_branch(checkout_dir, branch, *, dry_run=True): """Delete a remote branch from origin and the maching local branch.""" try: run( ["git", "branch", "-D", branch], cwd=checkout_dir, dry_run=dry_run, ) except subprocess.CalledProcessError: print(f"[ERROR] Local branch {branch} can not be deleted.") run( ["git", "push", "origin", "--delete", branch.replace("origin/", "", 1)], cwd=checkout_dir, dry_run=dry_run, ) def main(): parser = argparse.ArgumentParser( description="Archive Django branches into tags and optionally delete them." ) parser.add_argument( "--checkout-dir", required=True, help="Path to Django git checkout" ) parser.add_argument( "--dry-run", action="store_true", help="Print commands instead of executing them", ) parser.add_argument( "--branches", nargs="*", help="Specific remote branches to include (optional)" ) args = parser.parse_args() validate_env(args.checkout_dir) dry_run = args.dry_run checkout_dir = args.checkout_dir if args.branches: wanted = set(f"origin/{b}" for b in args.branches) else: wanted = set() branches = get_remote_branches(checkout_dir, include_fn=lambda b: b in wanted) if not branches: print("No branches matched inclusion criteria.") return print("\nMatched branches:") print("\n".join(branches)) print() branch_updates = {b: get_branch_info(checkout_dir, b) for b in branches} print("\nLast updates:") for b, (h, d) in branch_updates.items(): print(f"{b}\t{h}\t{d}") if ( input("\nDelete remote branches and create tags? [y/N]: ").strip().lower() == "y" ): for b, (commit_hash, last_update_date) in branch_updates.items(): print(f"Creating tag for {b} at {commit_hash=} with {last_update_date=}") create_tag(checkout_dir, b, commit_hash, last_update_date, dry_run=dry_run) print(f"Deleting remote branch {b}") delete_remote_and_local_branch(checkout_dir, b, dry_run=dry_run) run( ["git", "push", "--tags"], cwd=checkout_dir, dry_run=dry_run, ) print("Done.") if __name__ == "__main__": main()
{ "repo_id": "django/django", "file_path": "scripts/archive_eol_stable_branches.py", "license": "BSD 3-Clause \"New\" or \"Revised\" License", "lines": 127, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
django/django:scripts/do_django_release.py
#! /usr/bin/env python """Helper to build and publish Django artifacts. Original author: Tim Graham. Other authors: Mariusz Felisiak, Natalia Bidart. """ import hashlib import os import re import subprocess from datetime import date PGP_KEY_ID = os.getenv("PGP_KEY_ID") PGP_KEY_URL = os.getenv("PGP_KEY_URL") PGP_EMAIL = os.getenv("PGP_EMAIL") DEST_FOLDER = os.path.expanduser(os.getenv("DEST_FOLDER")) assert ( PGP_KEY_ID ), "Missing PGP_KEY_ID: Set this env var to your PGP key ID (used for signing)." assert ( PGP_KEY_URL ), "Missing PGP_KEY_URL: Set this env var to your PGP public key URL (for fetching)." assert DEST_FOLDER and os.path.exists( DEST_FOLDER ), "Missing DEST_FOLDER: Set this env var to the local path to place the artifacts." checksum_file_text = """This file contains MD5, SHA1, and SHA256 checksums for the source-code tarball and wheel files of Django {django_version}, released {release_date}. To use this file, you will need a working install of PGP or other compatible public-key encryption software. You will also need to have the Django release manager's public key in your keyring. This key has the ID ``{pgp_key_id}`` and can be imported from the MIT keyserver, for example, if using the open-source GNU Privacy Guard implementation of PGP: gpg --keyserver pgp.mit.edu --recv-key {pgp_key_id} or via the GitHub API: curl {pgp_key_url} | gpg --import - Once the key is imported, verify this file: gpg --verify {checksum_file_name} Once you have verified this file, you can use normal MD5, SHA1, or SHA256 checksumming applications to generate the checksums of the Django package and compare them to the checksums listed below. Release packages ================ https://www.djangoproject.com/download/{django_version}/tarball/ https://www.djangoproject.com/download/{django_version}/wheel/ MD5 checksums ============= {md5_tarball} {tarball_name} {md5_wheel} {wheel_name} SHA1 checksums ============== {sha1_tarball} {tarball_name} {sha1_wheel} {wheel_name} SHA256 checksums ================ {sha256_tarball} {tarball_name} {sha256_wheel} {wheel_name} """ def build_artifacts(): from build.__main__ import main as build_main build_main([]) def do_checksum(checksum_algo, release_file): with open(os.path.join(dist_path, release_file), "rb") as f: return checksum_algo(f.read()).hexdigest() # Ensure the working directory is clean. subprocess.call(["git", "clean", "-fdx"]) django_repo_path = os.path.abspath(os.path.curdir) dist_path = os.path.join(django_repo_path, "dist") # Build release files. build_artifacts() release_files = os.listdir(dist_path) wheel_name = None tarball_name = None for f in release_files: if f.endswith(".whl"): wheel_name = f if f.endswith(".tar.gz"): tarball_name = f assert wheel_name is not None assert tarball_name is not None django_version = wheel_name.split("-")[1] django_major_version = ".".join(django_version.split(".")[:2]) artifacts_path = os.path.join(os.path.expanduser(DEST_FOLDER), django_version) os.makedirs(artifacts_path, exist_ok=True) # Chop alpha/beta/rc suffix match = re.search("[abrc]", django_major_version) if match: django_major_version = django_major_version[: match.start()] release_date = date.today().strftime("%B %-d, %Y") checksum_file_name = f"Django-{django_version}.checksum.txt" checksum_file_kwargs = dict( release_date=release_date, pgp_key_id=PGP_KEY_ID, django_version=django_version, pgp_key_url=PGP_KEY_URL, checksum_file_name=checksum_file_name, wheel_name=wheel_name, tarball_name=tarball_name, ) checksums = ( ("md5", hashlib.md5), ("sha1", hashlib.sha1), ("sha256", hashlib.sha256), ) for checksum_name, checksum_algo in checksums: checksum_file_kwargs[f"{checksum_name}_tarball"] = do_checksum( checksum_algo, tarball_name ) checksum_file_kwargs[f"{checksum_name}_wheel"] = do_checksum( checksum_algo, wheel_name ) # Create the checksum file checksum_file_text = checksum_file_text.format(**checksum_file_kwargs) checksum_file_path = os.path.join(artifacts_path, checksum_file_name) with open(checksum_file_path, "wb") as f: f.write(checksum_file_text.encode("ascii")) print("\n\nDiffing release with checkout for sanity check.") # Unzip and diff... unzip_command = [ "unzip", "-q", os.path.join(dist_path, wheel_name), "-d", os.path.join(dist_path, django_major_version), ] subprocess.run(unzip_command) diff_command = [ "diff", "-qr", "./django/", os.path.join(dist_path, django_major_version, "django"), ] subprocess.run(diff_command) subprocess.run( [ "rm", "-rf", os.path.join(dist_path, django_major_version), ] ) print("\n\n=> Commands to run NOW:") # Sign the checksum file, this may prompt for a passphrase. pgp_email = f"-u {PGP_EMAIL} " if PGP_EMAIL else "" print(f"gpg --clearsign {pgp_email}--digest-algo SHA256 {checksum_file_path}") # Create, verify and push tag print(f'git tag --sign --message="Tag {django_version}" {django_version}') print(f"git tag --verify {django_version}") # Copy binaries outside the current repo tree to avoid lossing them. subprocess.run(["cp", "-r", dist_path, artifacts_path]) # Make the binaries available to the world print( "\n\n=> These ONLY 15 MINUTES BEFORE RELEASE TIME (consider new terminal " "session with isolated venv)!" ) # Upload the checksum file and release artifacts to the djangoproject admin. print( "\n==> ACTION Add tarball, wheel, and checksum files to the Release entry at:" f"https://www.djangoproject.com/admin/releases/release/{django_version}" ) print( f"* Tarball and wheel from {artifacts_path}\n" f"* Signed checksum {checksum_file_path}.asc" ) # Test the new version and confirm the signature using Jenkins. print("\n==> ACTION Test the release artifacts:") print(f"VERSION={django_version} test_new_version.sh") print("\n==> ACTION Run confirm-release job:") print(f"VERSION={django_version} confirm_release.sh") # Upload to PyPI. print("\n==> ACTION Upload to PyPI, ensure your release venv is activated:") print(f"cd {artifacts_path}") print("pip install -U pip twine") print("twine upload --repository django dist/*") # Push the tags. print("\n==> ACTION Push the tags:") print("git push --tags") print("\n\nDONE!!!")
{ "repo_id": "django/django", "file_path": "scripts/do_django_release.py", "license": "BSD 3-Clause \"New\" or \"Revised\" License", "lines": 175, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
django/django:django/conf/locale/ht/formats.py
# This file is distributed under the same license as the Django package. # # The *_FORMAT strings use the Django date format syntax, # see https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = "N j, Y" TIME_FORMAT = "P" DATETIME_FORMAT = "N j, Y, P" YEAR_MONTH_FORMAT = "F Y" MONTH_DAY_FORMAT = "F j" SHORT_DATE_FORMAT = "d/m/Y" SHORT_DATETIME_FORMAT = "d/m/Y P" FIRST_DAY_OF_WEEK = 0 # The *_INPUT_FORMATS strings use the Python strftime format syntax, # see https://docs.python.org/library/datetime.html#strftime-strptime-behavior DATE_INPUT_FORMATS = [ "%Y-%m-%d", # '2006-10-25' "%m/%d/%Y", # '10/25/2006' "%m/%d/%y", # '10/25/06' "%b %d %Y", # 'Oct 25 2006' "%b %d, %Y", # 'Oct 25, 2006' "%d %b %Y", # '25 Oct 2006' "%d %b, %Y", # '25 Oct, 2006' "%B %d %Y", # 'October 25 2006' "%B %d, %Y", # 'October 25, 2006' "%d %B %Y", # '25 October 2006' "%d %B, %Y", # '25 October, 2006' ] DATETIME_INPUT_FORMATS = [ "%Y-%m-%d %H:%M:%S", # '2006-10-25 14:30:59' "%Y-%m-%d %H:%M:%S.%f", # '2006-10-25 14:30:59.000200' "%Y-%m-%d %H:%M", # '2006-10-25 14:30' "%m/%d/%Y %H:%M:%S", # '10/25/2006 14:30:59' "%m/%d/%Y %H:%M:%S.%f", # '10/25/2006 14:30:59.000200' "%m/%d/%Y %H:%M", # '10/25/2006 14:30' "%m/%d/%y %H:%M:%S", # '10/25/06 14:30:59' "%m/%d/%y %H:%M:%S.%f", # '10/25/06 14:30:59.000200' "%m/%d/%y %H:%M", # '10/25/06 14:30' ] TIME_INPUT_FORMATS = [ "%H:%M:%S", # '14:30:59' "%H:%M:%S.%f", # '14:30:59.000200' "%H:%M", # '14:30' ] DECIMAL_SEPARATOR = "," THOUSAND_SEPARATOR = "\xa0" NUMBER_GROUPING = 3
{ "repo_id": "django/django", "file_path": "django/conf/locale/ht/formats.py", "license": "BSD 3-Clause \"New\" or \"Revised\" License", "lines": 46, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
django/django:django/tasks/backends/base.py
from abc import ABCMeta, abstractmethod from inspect import iscoroutinefunction from asgiref.sync import sync_to_async from django.conf import settings from django.tasks import DEFAULT_TASK_QUEUE_NAME from django.tasks.base import ( DEFAULT_TASK_PRIORITY, TASK_MAX_PRIORITY, TASK_MIN_PRIORITY, Task, ) from django.tasks.exceptions import InvalidTask from django.utils import timezone from django.utils.inspect import get_func_args, is_module_level_function class BaseTaskBackend(metaclass=ABCMeta): task_class = Task # Does the backend support Tasks to be enqueued with the run_after # attribute? supports_defer = False # Does the backend support coroutines to be enqueued? supports_async_task = False # Does the backend support results being retrieved (from any # thread/process)? supports_get_result = False # Does the backend support executing Tasks in a given # priority order? supports_priority = False def __init__(self, alias, params): self.alias = alias self.queues = set(params.get("QUEUES", [DEFAULT_TASK_QUEUE_NAME])) self.options = params.get("OPTIONS", {}) def validate_task(self, task): """ Determine whether the provided Task can be executed by the backend. """ if not is_module_level_function(task.func): raise InvalidTask("Task function must be defined at a module level.") if not self.supports_async_task and iscoroutinefunction(task.func): raise InvalidTask("Backend does not support async Tasks.") task_func_args = get_func_args(task.func) if task.takes_context and ( not task_func_args or task_func_args[0] != "context" ): raise InvalidTask( "Task takes context but does not have a first argument of 'context'." ) if not self.supports_priority and task.priority != DEFAULT_TASK_PRIORITY: raise InvalidTask("Backend does not support setting priority of tasks.") if ( task.priority < TASK_MIN_PRIORITY or task.priority > TASK_MAX_PRIORITY or int(task.priority) != task.priority ): raise InvalidTask( f"priority must be a whole number between {TASK_MIN_PRIORITY} and " f"{TASK_MAX_PRIORITY}." ) if not self.supports_defer and task.run_after is not None: raise InvalidTask("Backend does not support run_after.") if ( settings.USE_TZ and task.run_after is not None and not timezone.is_aware(task.run_after) ): raise InvalidTask("run_after must be an aware datetime.") if self.queues and task.queue_name not in self.queues: raise InvalidTask(f"Queue '{task.queue_name}' is not valid for backend.") @abstractmethod def enqueue(self, task, args, kwargs): """Queue up a task to be executed.""" async def aenqueue(self, task, args, kwargs): """Queue up a task function (or coroutine) to be executed.""" return await sync_to_async(self.enqueue, thread_sensitive=True)( task=task, args=args, kwargs=kwargs ) def get_result(self, result_id): """ Retrieve a task result by id. Raise TaskResultDoesNotExist if such result does not exist. """ raise NotImplementedError( "This backend does not support retrieving or refreshing results." ) async def aget_result(self, result_id): """See get_result().""" return await sync_to_async(self.get_result, thread_sensitive=True)( result_id=result_id ) def check(self, **kwargs): return []
{ "repo_id": "django/django", "file_path": "django/tasks/backends/base.py", "license": "BSD 3-Clause \"New\" or \"Revised\" License", "lines": 90, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
django/django:django/tasks/backends/dummy.py
from copy import deepcopy from django.tasks.base import TaskResult, TaskResultStatus from django.tasks.exceptions import TaskResultDoesNotExist from django.tasks.signals import task_enqueued from django.utils import timezone from django.utils.crypto import get_random_string from .base import BaseTaskBackend class DummyBackend(BaseTaskBackend): supports_defer = True supports_async_task = True supports_priority = True def __init__(self, alias, params): super().__init__(alias, params) self.results = [] def _store_result(self, result): object.__setattr__(result, "enqueued_at", timezone.now()) self.results.append(result) task_enqueued.send(type(self), task_result=result) def enqueue(self, task, args, kwargs): self.validate_task(task) result = TaskResult( task=task, id=get_random_string(32), status=TaskResultStatus.READY, enqueued_at=None, started_at=None, last_attempted_at=None, finished_at=None, args=args, kwargs=kwargs, backend=self.alias, errors=[], worker_ids=[], ) self._store_result(result) # Copy the task to prevent mutation issues. return deepcopy(result) def get_result(self, result_id): # Results are only scoped to the current thread, hence # supports_get_result is False. try: return next(result for result in self.results if result.id == result_id) except StopIteration: raise TaskResultDoesNotExist(result_id) from None async def aget_result(self, result_id): try: return next(result for result in self.results if result.id == result_id) except StopIteration: raise TaskResultDoesNotExist(result_id) from None def clear(self): self.results.clear()
{ "repo_id": "django/django", "file_path": "django/tasks/backends/dummy.py", "license": "BSD 3-Clause \"New\" or \"Revised\" License", "lines": 51, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
django/django:django/tasks/backends/immediate.py
import logging from traceback import format_exception from django.tasks.base import TaskContext, TaskError, TaskResult, TaskResultStatus from django.tasks.signals import task_enqueued, task_finished, task_started from django.utils import timezone from django.utils.crypto import get_random_string from django.utils.json import normalize_json from .base import BaseTaskBackend logger = logging.getLogger(__name__) class ImmediateBackend(BaseTaskBackend): supports_async_task = True supports_priority = True def __init__(self, alias, params): super().__init__(alias, params) self.worker_id = get_random_string(32) def _execute_task(self, task_result): """ Execute the Task for the given TaskResult, mutating it with the outcome. """ object.__setattr__(task_result, "enqueued_at", timezone.now()) task_enqueued.send(type(self), task_result=task_result) task = task_result.task task_start_time = timezone.now() object.__setattr__(task_result, "status", TaskResultStatus.RUNNING) object.__setattr__(task_result, "started_at", task_start_time) object.__setattr__(task_result, "last_attempted_at", task_start_time) task_result.worker_ids.append(self.worker_id) task_started.send(sender=type(self), task_result=task_result) try: if task.takes_context: raw_return_value = task.call( TaskContext(task_result=task_result), *task_result.args, **task_result.kwargs, ) else: raw_return_value = task.call(*task_result.args, **task_result.kwargs) object.__setattr__( task_result, "_return_value", normalize_json(raw_return_value), ) except KeyboardInterrupt: # If the user tried to terminate, let them raise except BaseException as e: object.__setattr__(task_result, "finished_at", timezone.now()) exception_type = type(e) task_result.errors.append( TaskError( exception_class_path=( f"{exception_type.__module__}.{exception_type.__qualname__}" ), traceback="".join(format_exception(e)), ) ) object.__setattr__(task_result, "status", TaskResultStatus.FAILED) task_finished.send(type(self), task_result=task_result) else: object.__setattr__(task_result, "finished_at", timezone.now()) object.__setattr__(task_result, "status", TaskResultStatus.SUCCESSFUL) task_finished.send(type(self), task_result=task_result) def enqueue(self, task, args, kwargs): self.validate_task(task) task_result = TaskResult( task=task, id=get_random_string(32), status=TaskResultStatus.READY, enqueued_at=None, started_at=None, last_attempted_at=None, finished_at=None, args=args, kwargs=kwargs, backend=self.alias, errors=[], worker_ids=[], ) self._execute_task(task_result) return task_result
{ "repo_id": "django/django", "file_path": "django/tasks/backends/immediate.py", "license": "BSD 3-Clause \"New\" or \"Revised\" License", "lines": 81, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
django/django:django/tasks/base.py
from collections.abc import Callable from dataclasses import dataclass, field, replace from datetime import datetime from inspect import isclass, iscoroutinefunction from typing import Any from asgiref.sync import async_to_sync, sync_to_async from django.db.models.enums import TextChoices from django.utils.json import normalize_json from django.utils.module_loading import import_string from django.utils.translation import pgettext_lazy from .exceptions import TaskResultMismatch DEFAULT_TASK_BACKEND_ALIAS = "default" DEFAULT_TASK_PRIORITY = 0 DEFAULT_TASK_QUEUE_NAME = "default" TASK_MAX_PRIORITY = 100 TASK_MIN_PRIORITY = -100 TASK_REFRESH_ATTRS = { "errors", "_return_value", "finished_at", "started_at", "last_attempted_at", "status", "enqueued_at", "worker_ids", } class TaskResultStatus(TextChoices): # The Task has just been enqueued, or is ready to be executed again. READY = ("READY", pgettext_lazy("Task", "Ready")) # The Task is currently running. RUNNING = ("RUNNING", pgettext_lazy("Task", "Running")) # The Task raised an exception during execution, or was unable to start. FAILED = ("FAILED", pgettext_lazy("Task", "Failed")) # The Task has finished running successfully. SUCCESSFUL = ("SUCCESSFUL", pgettext_lazy("Task", "Successful")) @dataclass(frozen=True, slots=True, kw_only=True) class Task: priority: int func: Callable[..., Any] # The Task function. backend: str queue_name: str run_after: datetime | None # The earliest this Task will run. # Whether the Task receives the Task context when executed. takes_context: bool = False def __post_init__(self): self.get_backend().validate_task(self) @property def name(self): return self.func.__name__ def using( self, *, priority=None, queue_name=None, run_after=None, backend=None, ): """Create a new Task with modified defaults.""" changes = {} if priority is not None: changes["priority"] = priority if queue_name is not None: changes["queue_name"] = queue_name if run_after is not None: changes["run_after"] = run_after if backend is not None: changes["backend"] = backend return replace(self, **changes) def enqueue(self, *args, **kwargs): """Queue up the Task to be executed.""" return self.get_backend().enqueue(self, args, kwargs) async def aenqueue(self, *args, **kwargs): """Queue up the Task to be executed.""" return await self.get_backend().aenqueue(self, args, kwargs) def get_result(self, result_id): """ Retrieve a task result by id. Raise TaskResultDoesNotExist if such result does not exist, or raise TaskResultMismatch if the result exists but belongs to another Task. """ result = self.get_backend().get_result(result_id) if result.task.func != self.func: raise TaskResultMismatch( f"Task does not match (received {result.task.module_path!r})" ) return result async def aget_result(self, result_id): """See get_result().""" result = await self.get_backend().aget_result(result_id) if result.task.func != self.func: raise TaskResultMismatch( f"Task does not match (received {result.task.module_path!r})" ) return result def call(self, *args, **kwargs): if iscoroutinefunction(self.func): return async_to_sync(self.func)(*args, **kwargs) return self.func(*args, **kwargs) async def acall(self, *args, **kwargs): if iscoroutinefunction(self.func): return await self.func(*args, **kwargs) return await sync_to_async(self.func)(*args, **kwargs) def get_backend(self): from . import task_backends return task_backends[self.backend] @property def module_path(self): return f"{self.func.__module__}.{self.func.__qualname__}" def task( function=None, *, priority=DEFAULT_TASK_PRIORITY, queue_name=DEFAULT_TASK_QUEUE_NAME, backend=DEFAULT_TASK_BACKEND_ALIAS, takes_context=False, ): from . import task_backends def wrapper(f): return task_backends[backend].task_class( priority=priority, func=f, queue_name=queue_name, backend=backend, takes_context=takes_context, run_after=None, ) if function: return wrapper(function) return wrapper @dataclass(frozen=True, slots=True, kw_only=True) class TaskError: exception_class_path: str traceback: str @property def exception_class(self): # Lazy resolve the exception class. exception_class = import_string(self.exception_class_path) if not isclass(exception_class) or not issubclass( exception_class, BaseException ): raise ValueError( f"{self.exception_class_path!r} does not reference a valid exception." ) return exception_class @dataclass(frozen=True, slots=True, kw_only=True) class TaskResult: task: Task id: str # Unique identifier for the task result. status: TaskResultStatus enqueued_at: datetime | None # Time the task was enqueued. started_at: datetime | None # Time the task was started. finished_at: datetime | None # Time the task was finished. # Time the task was last attempted to be run. last_attempted_at: datetime | None args: list[Any] # Arguments to pass to the task function. kwargs: dict[str, Any] # Keyword arguments to pass to the task function. backend: str errors: list[TaskError] # Errors raised when running the task. worker_ids: list[str] # Workers which have processed the task. _return_value: Any | None = field(init=False, default=None) def __post_init__(self): object.__setattr__(self, "args", normalize_json(self.args)) object.__setattr__(self, "kwargs", normalize_json(self.kwargs)) @property def return_value(self): """ The return value of the task. If the task didn't succeed, an exception is raised. This is to distinguish against the task returning None. """ if self.status == TaskResultStatus.SUCCESSFUL: return self._return_value elif self.status == TaskResultStatus.FAILED: raise ValueError("Task failed") else: raise ValueError("Task has not finished yet") @property def is_finished(self): return self.status in {TaskResultStatus.FAILED, TaskResultStatus.SUCCESSFUL} @property def attempts(self): return len(self.worker_ids) def refresh(self): """Reload the cached task data from the task store.""" refreshed_task = self.task.get_backend().get_result(self.id) for attr in TASK_REFRESH_ATTRS: object.__setattr__(self, attr, getattr(refreshed_task, attr)) async def arefresh(self): """ Reload the cached task data from the task store """ refreshed_task = await self.task.get_backend().aget_result(self.id) for attr in TASK_REFRESH_ATTRS: object.__setattr__(self, attr, getattr(refreshed_task, attr)) @dataclass(frozen=True, slots=True, kw_only=True) class TaskContext: task_result: TaskResult @property def attempt(self): return self.task_result.attempts
{ "repo_id": "django/django", "file_path": "django/tasks/base.py", "license": "BSD 3-Clause \"New\" or \"Revised\" License", "lines": 200, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
django/django:django/tasks/checks.py
from django.core import checks @checks.register def check_tasks(app_configs=None, **kwargs): """Checks all registered Task backends.""" from . import task_backends for backend in task_backends.all(): yield from backend.check()
{ "repo_id": "django/django", "file_path": "django/tasks/checks.py", "license": "BSD 3-Clause \"New\" or \"Revised\" License", "lines": 7, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
django/django:django/tasks/exceptions.py
from django.core.exceptions import ImproperlyConfigured class TaskException(Exception): """Base class for task-related exceptions. Do not raise directly.""" class InvalidTask(TaskException): """The provided Task is invalid.""" class InvalidTaskBackend(ImproperlyConfigured): """The provided Task backend is invalid.""" class TaskResultDoesNotExist(TaskException): """The requested TaskResult does not exist.""" class TaskResultMismatch(TaskException): """The requested TaskResult is invalid."""
{ "repo_id": "django/django", "file_path": "django/tasks/exceptions.py", "license": "BSD 3-Clause \"New\" or \"Revised\" License", "lines": 11, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
django/django:django/tasks/signals.py
import logging import sys from asgiref.local import Local from django.core.signals import setting_changed from django.dispatch import Signal, receiver from .base import TaskResultStatus logger = logging.getLogger("django.tasks") task_enqueued = Signal() task_finished = Signal() task_started = Signal() @receiver(setting_changed) def clear_tasks_handlers(*, setting, **kwargs): """Reset the connection handler whenever the settings change.""" if setting == "TASKS": from . import task_backends task_backends._settings = task_backends.settings = ( task_backends.configure_settings(None) ) task_backends._connections = Local() @receiver(task_enqueued) def log_task_enqueued(sender, task_result, **kwargs): logger.debug( "Task id=%s path=%s enqueued backend=%s", task_result.id, task_result.task.module_path, task_result.backend, ) @receiver(task_started) def log_task_started(sender, task_result, **kwargs): logger.info( "Task id=%s path=%s state=%s", task_result.id, task_result.task.module_path, task_result.status, ) @receiver(task_finished) def log_task_finished(sender, task_result, **kwargs): # Signal is sent inside exception handlers, so exc_info() is available. exc_info = sys.exc_info() logger.log( ( logging.ERROR if task_result.status == TaskResultStatus.FAILED else logging.INFO ), "Task id=%s path=%s state=%s", task_result.id, task_result.task.module_path, task_result.status, exc_info=exc_info if exc_info[0] else None, )
{ "repo_id": "django/django", "file_path": "django/tasks/signals.py", "license": "BSD 3-Clause \"New\" or \"Revised\" License", "lines": 51, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
django/django:django/utils/json.py
from collections.abc import Mapping, Sequence def normalize_json(obj): """Recursively normalize an object into JSON-compatible types.""" match obj: case Mapping(): return {normalize_json(k): normalize_json(v) for k, v in obj.items()} case bytes(): try: return obj.decode("utf-8") except UnicodeDecodeError: raise ValueError(f"Unsupported value: {type(obj)}") case str() | int() | float() | bool() | None: return obj case Sequence(): # str and bytes were already handled. return [normalize_json(v) for v in obj] case _: # Other types can't be serialized to JSON raise TypeError(f"Unsupported type: {type(obj)}")
{ "repo_id": "django/django", "file_path": "django/utils/json.py", "license": "BSD 3-Clause \"New\" or \"Revised\" License", "lines": 17, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
django/django:tests/tasks/tasks.py
import time from django.tasks import TaskContext, task @task() def noop_task(*args, **kwargs): return None @task def noop_task_from_bare_decorator(*args, **kwargs): return None @task() async def noop_task_async(*args, **kwargs): return None @task() def calculate_meaning_of_life(): return 42 @task() def failing_task_value_error(): raise ValueError("This Task failed due to ValueError") @task() def failing_task_system_exit(): raise SystemExit("This Task failed due to SystemExit") @task() def failing_task_keyboard_interrupt(): raise KeyboardInterrupt("This Task failed due to KeyboardInterrupt") @task() def complex_exception(): raise ValueError(ValueError("This task failed")) @task() def complex_return_value(): # Return something which isn't JSON serializable nor picklable. return lambda: True @task() def exit_task(): exit(1) @task() def hang(): """ Do nothing for 5 minutes """ time.sleep(300) @task() def sleep_for(seconds): time.sleep(seconds) @task(takes_context=True) def get_task_id(context): return context.task_result.id @task(takes_context=True) def test_context(context, attempt): assert isinstance(context, TaskContext) assert context.attempt == attempt
{ "repo_id": "django/django", "file_path": "tests/tasks/tasks.py", "license": "BSD 3-Clause \"New\" or \"Revised\" License", "lines": 49, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
django/django:tests/tasks/test_custom_backend.py
import logging from unittest import mock from django.tasks import default_task_backend, task_backends from django.tasks.backends.base import BaseTaskBackend from django.tasks.exceptions import InvalidTask from django.test import SimpleTestCase, override_settings from . import tasks as test_tasks class CustomBackend(BaseTaskBackend): def __init__(self, alias, params): super().__init__(alias, params) self.prefix = self.options.get("prefix", "") def enqueue(self, *args, **kwargs): logger = logging.getLogger(__name__) logger.info(f"{self.prefix}Task enqueued.") class CustomBackendNoEnqueue(BaseTaskBackend): pass @override_settings( TASKS={ "default": { "BACKEND": f"{CustomBackend.__module__}.{CustomBackend.__qualname__}", "OPTIONS": {"prefix": "PREFIX: "}, }, "no_enqueue": { "BACKEND": f"{CustomBackendNoEnqueue.__module__}." f"{CustomBackendNoEnqueue.__qualname__}", }, } ) class CustomBackendTestCase(SimpleTestCase): def test_using_correct_backend(self): self.assertEqual(default_task_backend, task_backends["default"]) self.assertIsInstance(task_backends["default"], CustomBackend) self.assertEqual(default_task_backend.alias, "default") self.assertEqual(default_task_backend.options, {"prefix": "PREFIX: "}) @mock.patch.multiple(CustomBackend, supports_async_task=False) def test_enqueue_async_task_on_non_async_backend(self): with self.assertRaisesMessage( InvalidTask, "Backend does not support async Tasks." ): default_task_backend.validate_task(test_tasks.noop_task_async) def test_backend_does_not_support_priority(self): with self.assertRaisesMessage( InvalidTask, "Backend does not support setting priority of tasks." ): test_tasks.noop_task.using(priority=10) def test_options(self): with self.assertLogs(__name__, level="INFO") as captured_logs: test_tasks.noop_task.enqueue() self.assertEqual(len(captured_logs.output), 1) self.assertIn("PREFIX: Task enqueued", captured_logs.output[0]) def test_no_enqueue(self): with self.assertRaisesMessage( TypeError, "Can't instantiate abstract class CustomBackendNoEnqueue " "without an implementation for abstract method 'enqueue'", ): test_tasks.noop_task.using(backend="no_enqueue")
{ "repo_id": "django/django", "file_path": "tests/tasks/test_custom_backend.py", "license": "BSD 3-Clause \"New\" or \"Revised\" License", "lines": 57, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
django/django:tests/tasks/test_dummy_backend.py
from typing import cast from unittest import mock from django.db import transaction from django.tasks import TaskResultStatus, default_task_backend, task_backends from django.tasks.backends.dummy import DummyBackend from django.tasks.base import Task from django.tasks.exceptions import InvalidTask, TaskResultDoesNotExist from django.test import SimpleTestCase, TransactionTestCase, override_settings from . import tasks as test_tasks @override_settings( TASKS={ "default": { "BACKEND": "django.tasks.backends.dummy.DummyBackend", "QUEUES": [], } } ) class DummyBackendTestCase(SimpleTestCase): def setUp(self): default_task_backend.clear() def test_using_correct_backend(self): self.assertEqual(default_task_backend, task_backends["default"]) self.assertIsInstance(task_backends["default"], DummyBackend) self.assertEqual(default_task_backend.alias, "default") self.assertEqual(default_task_backend.options, {}) def test_enqueue_task(self): for task in [test_tasks.noop_task, test_tasks.noop_task_async]: with self.subTest(task): result = cast(Task, task).enqueue(1, two=3) self.assertEqual(result.status, TaskResultStatus.READY) self.assertIs(result.is_finished, False) self.assertIsNone(result.started_at) self.assertIsNone(result.last_attempted_at) self.assertIsNone(result.finished_at) with self.assertRaisesMessage(ValueError, "Task has not finished yet"): result.return_value self.assertEqual(result.task, task) self.assertEqual(result.args, [1]) self.assertEqual(result.kwargs, {"two": 3}) self.assertEqual(result.attempts, 0) self.assertIn(result, default_task_backend.results) async def test_enqueue_task_async(self): for task in [test_tasks.noop_task, test_tasks.noop_task_async]: with self.subTest(task): result = await cast(Task, task).aenqueue() self.assertEqual(result.status, TaskResultStatus.READY) self.assertIs(result.is_finished, False) self.assertIsNone(result.started_at) self.assertIsNone(result.last_attempted_at) self.assertIsNone(result.finished_at) with self.assertRaisesMessage(ValueError, "Task has not finished yet"): result.return_value self.assertEqual(result.task, task) self.assertEqual(result.args, []) self.assertEqual(result.kwargs, {}) self.assertEqual(result.attempts, 0) self.assertIn(result, default_task_backend.results) def test_get_result(self): result = default_task_backend.enqueue(test_tasks.noop_task, (), {}) new_result = default_task_backend.get_result(result.id) self.assertEqual(result, new_result) async def test_get_result_async(self): result = await default_task_backend.aenqueue(test_tasks.noop_task, (), {}) new_result = await default_task_backend.aget_result(result.id) self.assertEqual(result, new_result) def test_refresh_result(self): result = default_task_backend.enqueue( test_tasks.calculate_meaning_of_life, (), {} ) enqueued_result = default_task_backend.results[0] object.__setattr__(enqueued_result, "status", TaskResultStatus.SUCCESSFUL) self.assertEqual(result.status, TaskResultStatus.READY) result.refresh() self.assertEqual(result.status, TaskResultStatus.SUCCESSFUL) async def test_refresh_result_async(self): result = await default_task_backend.aenqueue( test_tasks.calculate_meaning_of_life, (), {} ) enqueued_result = default_task_backend.results[0] object.__setattr__(enqueued_result, "status", TaskResultStatus.SUCCESSFUL) self.assertEqual(result.status, TaskResultStatus.READY) await result.arefresh() self.assertEqual(result.status, TaskResultStatus.SUCCESSFUL) async def test_get_missing_result(self): with self.assertRaises(TaskResultDoesNotExist): default_task_backend.get_result("123") with self.assertRaises(TaskResultDoesNotExist): await default_task_backend.aget_result("123") def test_enqueue_logs(self): with self.assertLogs("django.tasks", level="DEBUG") as captured_logs: result = test_tasks.noop_task.enqueue() self.assertEqual(len(captured_logs.output), 1) self.assertIn("enqueued", captured_logs.output[0]) self.assertIn(result.id, captured_logs.output[0]) def test_errors(self): result = test_tasks.noop_task.enqueue() self.assertEqual(result.errors, []) def test_validate_disallowed_async_task(self): with mock.patch.multiple(default_task_backend, supports_async_task=False): with self.assertRaisesMessage( InvalidTask, "Backend does not support async Tasks." ): default_task_backend.validate_task(test_tasks.noop_task_async) def test_check(self): errors = list(default_task_backend.check()) self.assertEqual(len(errors), 0, errors) def test_takes_context(self): result = test_tasks.get_task_id.enqueue() self.assertEqual(result.status, TaskResultStatus.READY) def test_clear(self): result = test_tasks.noop_task.enqueue() default_task_backend.get_result(result.id) default_task_backend.clear() with self.assertRaisesMessage(TaskResultDoesNotExist, result.id): default_task_backend.get_result(result.id) def test_validate_on_enqueue(self): task_with_custom_queue_name = test_tasks.noop_task.using( queue_name="unknown_queue" ) with override_settings( TASKS={ "default": { "BACKEND": "django.tasks.backends.dummy.DummyBackend", "QUEUES": ["queue-1"], } } ): with self.assertRaisesMessage( InvalidTask, "Queue 'unknown_queue' is not valid for backend" ): task_with_custom_queue_name.enqueue() async def test_validate_on_aenqueue(self): task_with_custom_queue_name = test_tasks.noop_task.using( queue_name="unknown_queue" ) with override_settings( TASKS={ "default": { "BACKEND": "django.tasks.backends.dummy.DummyBackend", "QUEUES": ["queue-1"], } } ): with self.assertRaisesMessage( InvalidTask, "Queue 'unknown_queue' is not valid for backend" ): await task_with_custom_queue_name.aenqueue() class DummyBackendTransactionTestCase(TransactionTestCase): available_apps = [] @override_settings( TASKS={ "default": { "BACKEND": "django.tasks.backends.dummy.DummyBackend", } } ) def test_doesnt_wait_until_transaction_commit_by_default(self): with transaction.atomic(): result = test_tasks.noop_task.enqueue() self.assertIsNotNone(result.enqueued_at) self.assertEqual(len(default_task_backend.results), 1) self.assertEqual(len(default_task_backend.results), 1)
{ "repo_id": "django/django", "file_path": "tests/tasks/test_dummy_backend.py", "license": "BSD 3-Clause \"New\" or \"Revised\" License", "lines": 162, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
django/django:tests/tasks/test_immediate_backend.py
from django.db import transaction from django.tasks import TaskResultStatus, default_task_backend, task_backends from django.tasks.backends.immediate import ImmediateBackend from django.tasks.exceptions import InvalidTask from django.test import SimpleTestCase, TransactionTestCase, override_settings from django.utils import timezone from . import tasks as test_tasks @override_settings( TASKS={ "default": { "BACKEND": "django.tasks.backends.immediate.ImmediateBackend", "QUEUES": [], } } ) class ImmediateBackendTestCase(SimpleTestCase): def test_using_correct_backend(self): self.assertEqual(default_task_backend, task_backends["default"]) self.assertIsInstance(task_backends["default"], ImmediateBackend) self.assertEqual(default_task_backend.alias, "default") self.assertEqual(default_task_backend.options, {}) def test_enqueue_task(self): for task in [test_tasks.noop_task, test_tasks.noop_task_async]: with self.subTest(task): result = task.enqueue(1, two=3) self.assertEqual(result.status, TaskResultStatus.SUCCESSFUL) self.assertIs(result.is_finished, True) self.assertIsNotNone(result.started_at) self.assertIsNotNone(result.last_attempted_at) self.assertIsNotNone(result.finished_at) self.assertGreaterEqual(result.started_at, result.enqueued_at) self.assertGreaterEqual(result.finished_at, result.started_at) self.assertIsNone(result.return_value) self.assertEqual(result.task, task) self.assertEqual(result.args, [1]) self.assertEqual(result.kwargs, {"two": 3}) self.assertEqual(result.attempts, 1) async def test_enqueue_task_async(self): for task in [test_tasks.noop_task, test_tasks.noop_task_async]: with self.subTest(task): result = await task.aenqueue() self.assertEqual(result.status, TaskResultStatus.SUCCESSFUL) self.assertIs(result.is_finished, True) self.assertIsNotNone(result.started_at) self.assertIsNotNone(result.last_attempted_at) self.assertIsNotNone(result.finished_at) self.assertGreaterEqual(result.started_at, result.enqueued_at) self.assertGreaterEqual(result.finished_at, result.started_at) self.assertIsNone(result.return_value) self.assertEqual(result.task, task) self.assertEqual(result.args, []) self.assertEqual(result.kwargs, {}) self.assertEqual(result.attempts, 1) def test_catches_exception(self): test_data = [ ( test_tasks.failing_task_value_error, # Task function. ValueError, # Expected exception. "This Task failed due to ValueError", # Expected message. ), ( test_tasks.failing_task_system_exit, SystemExit, "This Task failed due to SystemExit", ), ] for task, exception, message in test_data: with ( self.subTest(task), self.assertLogs("django.tasks", level="ERROR") as captured_logs, ): result = task.enqueue() self.assertEqual(len(captured_logs.output), 1) self.assertIn(message, captured_logs.output[0]) self.assertEqual(result.status, TaskResultStatus.FAILED) with self.assertRaisesMessage(ValueError, "Task failed"): result.return_value self.assertIs(result.is_finished, True) self.assertIsNotNone(result.started_at) self.assertIsNotNone(result.last_attempted_at) self.assertIsNotNone(result.finished_at) self.assertGreaterEqual(result.started_at, result.enqueued_at) self.assertGreaterEqual(result.finished_at, result.started_at) self.assertEqual(result.errors[0].exception_class, exception) traceback = result.errors[0].traceback self.assertIs( traceback and traceback.endswith(f"{exception.__name__}: {message}\n"), True, traceback, ) self.assertEqual(result.task, task) self.assertEqual(result.args, []) self.assertEqual(result.kwargs, {}) def test_throws_keyboard_interrupt(self): with self.assertRaises(KeyboardInterrupt): with self.assertNoLogs("django.tasks", level="ERROR"): default_task_backend.enqueue( test_tasks.failing_task_keyboard_interrupt, [], {} ) def test_complex_exception(self): with self.assertLogs("django.tasks", level="ERROR"): result = test_tasks.complex_exception.enqueue() self.assertEqual(result.status, TaskResultStatus.FAILED) with self.assertRaisesMessage(ValueError, "Task failed"): result.return_value self.assertIsNotNone(result.started_at) self.assertIsNotNone(result.last_attempted_at) self.assertIsNotNone(result.finished_at) self.assertGreaterEqual(result.started_at, result.enqueued_at) self.assertGreaterEqual(result.finished_at, result.started_at) self.assertIsNone(result._return_value) self.assertEqual(result.errors[0].exception_class, ValueError) self.assertIn( 'ValueError(ValueError("This task failed"))', result.errors[0].traceback ) self.assertEqual(result.task, test_tasks.complex_exception) self.assertEqual(result.args, []) self.assertEqual(result.kwargs, {}) def test_complex_return_value(self): with self.assertLogs("django.tasks", level="ERROR"): result = test_tasks.complex_return_value.enqueue() self.assertEqual(result.status, TaskResultStatus.FAILED) self.assertIsNotNone(result.started_at) self.assertIsNotNone(result.last_attempted_at) self.assertIsNotNone(result.finished_at) self.assertGreaterEqual(result.started_at, result.enqueued_at) self.assertGreaterEqual(result.finished_at, result.started_at) self.assertIsNone(result._return_value) self.assertEqual(result.errors[0].exception_class, TypeError) self.assertIn("Unsupported type", result.errors[0].traceback) def test_result(self): result = default_task_backend.enqueue( test_tasks.calculate_meaning_of_life, [], {} ) self.assertEqual(result.status, TaskResultStatus.SUCCESSFUL) self.assertEqual(result.return_value, 42) async def test_result_async(self): result = await default_task_backend.aenqueue( test_tasks.calculate_meaning_of_life, [], {} ) self.assertEqual(result.status, TaskResultStatus.SUCCESSFUL) self.assertEqual(result.return_value, 42) async def test_cannot_get_result(self): with self.assertRaisesMessage( NotImplementedError, "This backend does not support retrieving or refreshing results.", ): default_task_backend.get_result("123") with self.assertRaisesMessage( NotImplementedError, "This backend does not support retrieving or refreshing results.", ): await default_task_backend.aget_result(123) async def test_cannot_refresh_result(self): result = await default_task_backend.aenqueue( test_tasks.calculate_meaning_of_life, (), {} ) with self.assertRaisesMessage( NotImplementedError, "This backend does not support retrieving or refreshing results.", ): await result.arefresh() with self.assertRaisesMessage( NotImplementedError, "This backend does not support retrieving or refreshing results.", ): result.refresh() def test_cannot_pass_run_after(self): with self.assertRaisesMessage( InvalidTask, "Backend does not support run_after.", ): default_task_backend.validate_task( test_tasks.failing_task_value_error.using(run_after=timezone.now()) ) def test_enqueue_logs(self): with self.assertLogs("django.tasks", level="DEBUG") as captured_logs: result = test_tasks.noop_task.enqueue() self.assertEqual(len(captured_logs.output), 3) self.assertIn("enqueued", captured_logs.output[0]) self.assertIn(result.id, captured_logs.output[0]) self.assertIn("state=RUNNING", captured_logs.output[1]) self.assertIn(result.id, captured_logs.output[1]) self.assertIn("state=SUCCESSFUL", captured_logs.output[2]) self.assertIn(result.id, captured_logs.output[2]) def test_failed_logs(self): with self.assertLogs("django.tasks", level="DEBUG") as captured_logs: result = test_tasks.failing_task_value_error.enqueue() self.assertEqual(len(captured_logs.output), 3) self.assertIn("state=RUNNING", captured_logs.output[1]) self.assertIn(result.id, captured_logs.output[1]) self.assertIn("state=FAILED", captured_logs.output[2]) self.assertIn(result.id, captured_logs.output[2]) def test_successful_task_no_none_in_logs(self): with self.assertLogs("django.tasks", level="DEBUG") as captured_logs: result = test_tasks.noop_task.enqueue() self.assertEqual(result.status, TaskResultStatus.SUCCESSFUL) for log_output in captured_logs.output: self.assertNotIn("None", log_output) def test_takes_context(self): result = test_tasks.get_task_id.enqueue() self.assertEqual(result.return_value, result.id) def test_context(self): result = test_tasks.test_context.enqueue(1) self.assertEqual(result.status, TaskResultStatus.SUCCESSFUL) def test_validate_on_enqueue(self): task_with_custom_queue_name = test_tasks.noop_task.using( queue_name="unknown_queue" ) with override_settings( TASKS={ "default": { "BACKEND": "django.tasks.backends.immediate.ImmediateBackend", "QUEUES": ["queue-1"], } } ): with self.assertRaisesMessage( InvalidTask, "Queue 'unknown_queue' is not valid for backend" ): task_with_custom_queue_name.enqueue() async def test_validate_on_aenqueue(self): task_with_custom_queue_name = test_tasks.noop_task.using( queue_name="unknown_queue" ) with override_settings( TASKS={ "default": { "BACKEND": "django.tasks.backends.immediate.ImmediateBackend", "QUEUES": ["queue-1"], } } ): with self.assertRaisesMessage( InvalidTask, "Queue 'unknown_queue' is not valid for backend" ): await task_with_custom_queue_name.aenqueue() class ImmediateBackendTransactionTestCase(TransactionTestCase): available_apps = [] @override_settings( TASKS={ "default": { "BACKEND": "django.tasks.backends.immediate.ImmediateBackend", } } ) def test_doesnt_wait_until_transaction_commit_by_default(self): with transaction.atomic(): result = test_tasks.noop_task.enqueue() self.assertIsNotNone(result.enqueued_at) self.assertEqual(result.status, TaskResultStatus.SUCCESSFUL) self.assertEqual(result.status, TaskResultStatus.SUCCESSFUL)
{ "repo_id": "django/django", "file_path": "tests/tasks/test_immediate_backend.py", "license": "BSD 3-Clause \"New\" or \"Revised\" License", "lines": 253, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
django/django:tests/tasks/test_tasks.py
import dataclasses from datetime import datetime from django.tasks import ( DEFAULT_TASK_QUEUE_NAME, TaskResultStatus, default_task_backend, task, task_backends, ) from django.tasks.backends.dummy import DummyBackend from django.tasks.backends.immediate import ImmediateBackend from django.tasks.base import TASK_MAX_PRIORITY, TASK_MIN_PRIORITY, Task from django.tasks.exceptions import ( InvalidTask, InvalidTaskBackend, TaskResultDoesNotExist, TaskResultMismatch, ) from django.test import SimpleTestCase, override_settings from django.utils import timezone from django.utils.module_loading import import_string from . import tasks as test_tasks @override_settings( TASKS={ "default": { "BACKEND": "django.tasks.backends.dummy.DummyBackend", "QUEUES": ["default", "queue_1"], }, "immediate": { "BACKEND": "django.tasks.backends.immediate.ImmediateBackend", "QUEUES": [], }, "missing": {"BACKEND": "does.not.exist"}, }, USE_TZ=True, ) class TaskTestCase(SimpleTestCase): def setUp(self): default_task_backend.clear() def test_using_correct_backend(self): self.assertEqual(default_task_backend, task_backends["default"]) self.assertIsInstance(task_backends["default"], DummyBackend) def test_task_decorator(self): self.assertIsInstance(test_tasks.noop_task, Task) self.assertIsInstance(test_tasks.noop_task_async, Task) self.assertIsInstance(test_tasks.noop_task_from_bare_decorator, Task) def test_enqueue_task(self): result = test_tasks.noop_task.enqueue() self.assertEqual(result.status, TaskResultStatus.READY) self.assertEqual(result.task, test_tasks.noop_task) self.assertEqual(result.args, []) self.assertEqual(result.kwargs, {}) self.assertEqual(default_task_backend.results, [result]) async def test_enqueue_task_async(self): result = await test_tasks.noop_task.aenqueue() self.assertEqual(result.status, TaskResultStatus.READY) self.assertEqual(result.task, test_tasks.noop_task) self.assertEqual(result.args, []) self.assertEqual(result.kwargs, {}) self.assertEqual(default_task_backend.results, [result]) def test_enqueue_with_invalid_argument(self): with self.assertRaisesMessage(TypeError, "Unsupported type"): test_tasks.noop_task.enqueue(datetime.now()) async def test_aenqueue_with_invalid_argument(self): with self.assertRaisesMessage(TypeError, "Unsupported type"): await test_tasks.noop_task.aenqueue(datetime.now()) def test_using_priority(self): self.assertEqual(test_tasks.noop_task.priority, 0) self.assertEqual(test_tasks.noop_task.using(priority=1).priority, 1) self.assertEqual(test_tasks.noop_task.priority, 0) def test_using_queue_name(self): self.assertEqual(test_tasks.noop_task.queue_name, DEFAULT_TASK_QUEUE_NAME) self.assertEqual( test_tasks.noop_task.using(queue_name="queue_1").queue_name, "queue_1" ) self.assertEqual(test_tasks.noop_task.queue_name, DEFAULT_TASK_QUEUE_NAME) def test_using_run_after(self): now = timezone.now() self.assertIsNone(test_tasks.noop_task.run_after) self.assertEqual(test_tasks.noop_task.using(run_after=now).run_after, now) self.assertIsNone(test_tasks.noop_task.run_after) def test_using_unknown_backend(self): self.assertEqual(test_tasks.noop_task.backend, "default") with self.assertRaisesMessage( InvalidTaskBackend, "The connection 'unknown' doesn't exist." ): test_tasks.noop_task.using(backend="unknown") def test_using_missing_backend(self): self.assertEqual(test_tasks.noop_task.backend, "default") with self.assertRaisesMessage( InvalidTaskBackend, "Could not find backend 'does.not.exist': No module named 'does'", ): test_tasks.noop_task.using(backend="missing") def test_using_creates_new_instance(self): new_task = test_tasks.noop_task.using() self.assertEqual(new_task, test_tasks.noop_task) self.assertIsNot(new_task, test_tasks.noop_task) def test_chained_using(self): now = timezone.now() run_after_task = test_tasks.noop_task.using(run_after=now) self.assertEqual(run_after_task.run_after, now) priority_task = run_after_task.using(priority=10) self.assertEqual(priority_task.priority, 10) self.assertEqual(priority_task.run_after, now) self.assertEqual(run_after_task.priority, 0) async def test_refresh_result(self): result = await test_tasks.noop_task.aenqueue() original_result = dataclasses.asdict(result) result.refresh() self.assertEqual(dataclasses.asdict(result), original_result) await result.arefresh() self.assertEqual(dataclasses.asdict(result), original_result) def test_naive_datetime(self): with self.assertRaisesMessage( InvalidTask, "run_after must be an aware datetime." ): test_tasks.noop_task.using(run_after=datetime.now()) def test_invalid_priority(self): with self.assertRaisesMessage( InvalidTask, f"priority must be a whole number between {TASK_MIN_PRIORITY} and " f"{TASK_MAX_PRIORITY}.", ): test_tasks.noop_task.using(priority=-101) with self.assertRaisesMessage( InvalidTask, f"priority must be a whole number between {TASK_MIN_PRIORITY} and " f"{TASK_MAX_PRIORITY}.", ): test_tasks.noop_task.using(priority=101) with self.assertRaisesMessage( InvalidTask, f"priority must be a whole number between {TASK_MIN_PRIORITY} and " f"{TASK_MAX_PRIORITY}.", ): test_tasks.noop_task.using(priority=3.1) test_tasks.noop_task.using(priority=100) test_tasks.noop_task.using(priority=-100) test_tasks.noop_task.using(priority=0) def test_unknown_queue_name(self): with self.assertRaisesMessage( InvalidTask, "Queue 'queue-2' is not valid for backend." ): test_tasks.noop_task.using(queue_name="queue-2") # Validation is bypassed when the backend QUEUES is an empty list. self.assertEqual( test_tasks.noop_task.using( queue_name="queue-2", backend="immediate" ).queue_name, "queue-2", ) def test_call_task(self): self.assertEqual(test_tasks.calculate_meaning_of_life.call(), 42) async def test_call_task_async(self): self.assertEqual(await test_tasks.calculate_meaning_of_life.acall(), 42) async def test_call_async_task(self): self.assertIsNone(await test_tasks.noop_task_async.acall()) def test_call_async_task_sync(self): self.assertIsNone(test_tasks.noop_task_async.call()) def test_get_result(self): result = default_task_backend.enqueue(test_tasks.noop_task, (), {}) new_result = test_tasks.noop_task.get_result(result.id) self.assertEqual(result, new_result) async def test_get_result_async(self): result = await default_task_backend.aenqueue(test_tasks.noop_task, (), {}) new_result = await test_tasks.noop_task.aget_result(result.id) self.assertEqual(result, new_result) async def test_get_missing_result(self): with self.assertRaises(TaskResultDoesNotExist): test_tasks.noop_task.get_result("123") with self.assertRaises(TaskResultDoesNotExist): await test_tasks.noop_task.aget_result("123") def test_get_incorrect_result(self): result = default_task_backend.enqueue(test_tasks.noop_task_async, (), {}) with self.assertRaisesMessage(TaskResultMismatch, "Task does not match"): test_tasks.noop_task.get_result(result.id) async def test_get_incorrect_result_async(self): result = await default_task_backend.aenqueue(test_tasks.noop_task_async, (), {}) with self.assertRaisesMessage(TaskResultMismatch, "Task does not match"): await test_tasks.noop_task.aget_result(result.id) def test_invalid_function(self): for invalid_function in [any, self.test_invalid_function]: with self.subTest(invalid_function): with self.assertRaisesMessage( InvalidTask, "Task function must be defined at a module level.", ): task()(invalid_function) def test_get_backend(self): self.assertEqual(test_tasks.noop_task.backend, "default") self.assertIsInstance(test_tasks.noop_task.get_backend(), DummyBackend) immediate_task = test_tasks.noop_task.using(backend="immediate") self.assertEqual(immediate_task.backend, "immediate") self.assertIsInstance(immediate_task.get_backend(), ImmediateBackend) def test_name(self): self.assertEqual(test_tasks.noop_task.name, "noop_task") self.assertEqual(test_tasks.noop_task_async.name, "noop_task_async") def test_module_path(self): self.assertEqual(test_tasks.noop_task.module_path, "tasks.tasks.noop_task") self.assertEqual( test_tasks.noop_task_async.module_path, "tasks.tasks.noop_task_async" ) self.assertIs( import_string(test_tasks.noop_task.module_path), test_tasks.noop_task ) self.assertIs( import_string(test_tasks.noop_task_async.module_path), test_tasks.noop_task_async, ) @override_settings(TASKS={}) def test_no_backends(self): with self.assertRaises(InvalidTaskBackend): test_tasks.noop_task.enqueue() def test_task_error_invalid_exception(self): with self.assertLogs("django.tasks"): immediate_task = test_tasks.failing_task_value_error.using( backend="immediate" ).enqueue() self.assertEqual(len(immediate_task.errors), 1) object.__setattr__( immediate_task.errors[0], "exception_class_path", "subprocess.run" ) with self.assertRaisesMessage( ValueError, "'subprocess.run' does not reference a valid exception." ): immediate_task.errors[0].exception_class def test_task_error_unknown_module(self): with self.assertLogs("django.tasks"): immediate_task = test_tasks.failing_task_value_error.using( backend="immediate" ).enqueue() self.assertEqual(len(immediate_task.errors), 1) object.__setattr__( immediate_task.errors[0], "exception_class_path", "does.not.exist" ) with self.assertRaises(ImportError): immediate_task.errors[0].exception_class def test_takes_context_without_taking_context(self): with self.assertRaisesMessage( InvalidTask, "Task takes context but does not have a first argument of 'context'.", ): task(takes_context=True)(test_tasks.calculate_meaning_of_life.func)
{ "repo_id": "django/django", "file_path": "tests/tasks/test_tasks.py", "license": "BSD 3-Clause \"New\" or \"Revised\" License", "lines": 244, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
django/django:tests/utils_tests/test_json.py
import json from collections import UserList, defaultdict from datetime import datetime from decimal import Decimal from django.test import SimpleTestCase from django.utils.json import normalize_json class JSONNormalizeTestCase(SimpleTestCase): def test_converts_json_types(self): for test_case, expected in [ (None, "null"), (True, "true"), (False, "false"), (2, "2"), (3.0, "3.0"), (1e23 + 1, "1e+23"), ("1", '"1"'), (b"hello", '"hello"'), ([], "[]"), (UserList([1, 2]), "[1, 2]"), ({}, "{}"), ({1: "a"}, '{"1": "a"}'), ({"foo": (1, 2, 3)}, '{"foo": [1, 2, 3]}'), (defaultdict(list), "{}"), (float("nan"), "NaN"), (float("inf"), "Infinity"), (float("-inf"), "-Infinity"), ]: with self.subTest(test_case): normalized = normalize_json(test_case) # Ensure that the normalized result is serializable. self.assertEqual(json.dumps(normalized), expected) def test_bytes_decode_error(self): with self.assertRaisesMessage(ValueError, "Unsupported value"): normalize_json(b"\xff") def test_encode_error(self): for test_case in [self, any, object(), datetime.now(), set(), Decimal("3.42")]: with ( self.subTest(test_case), self.assertRaisesMessage(TypeError, "Unsupported type"), ): normalize_json(test_case)
{ "repo_id": "django/django", "file_path": "tests/utils_tests/test_json.py", "license": "BSD 3-Clause \"New\" or \"Revised\" License", "lines": 41, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
django/django:django/views/decorators/csp.py
from functools import wraps from inspect import iscoroutinefunction def _make_csp_decorator(config_attr_name, config_attr_value): """General CSP override decorator factory.""" if not isinstance(config_attr_value, dict): raise TypeError("CSP config should be a mapping.") def decorator(view_func): @wraps(view_func) async def _wrapped_async_view(request, *args, **kwargs): response = await view_func(request, *args, **kwargs) setattr(response, config_attr_name, config_attr_value) return response @wraps(view_func) def _wrapped_sync_view(request, *args, **kwargs): response = view_func(request, *args, **kwargs) setattr(response, config_attr_name, config_attr_value) return response if iscoroutinefunction(view_func): return _wrapped_async_view return _wrapped_sync_view return decorator def csp_override(config): """Override the Content-Security-Policy header for a view.""" return _make_csp_decorator("_csp_config", config) def csp_report_only_override(config): """Override the Content-Security-Policy-Report-Only header for a view.""" return _make_csp_decorator("_csp_ro_config", config)
{ "repo_id": "django/django", "file_path": "django/views/decorators/csp.py", "license": "BSD 3-Clause \"New\" or \"Revised\" License", "lines": 27, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
django/django:tests/decorators/test_csp.py
from inspect import iscoroutinefunction from itertools import product from django.http import HttpRequest, HttpResponse from django.test import SimpleTestCase from django.utils.csp import CSP from django.views.decorators.csp import csp_override, csp_report_only_override basic_config = { "default-src": [CSP.SELF], } class CSPOverrideDecoratorTest(SimpleTestCase): def test_wrapped_sync_function_is_not_coroutine_function(self): def sync_view(request): return HttpResponse() wrapped_view = csp_override({})(sync_view) self.assertIs(iscoroutinefunction(wrapped_view), False) def test_wrapped_async_function_is_coroutine_function(self): async def async_view(request): return HttpResponse() wrapped_view = csp_override({})(async_view) self.assertIs(iscoroutinefunction(wrapped_view), True) def test_decorator_requires_mapping(self): for config, decorator in product( [None, 0, False, [], [1, 2, 3], 42, {4, 5}], (csp_override, csp_report_only_override), ): with ( self.subTest(config=config, decorator=decorator), self.assertRaisesMessage(TypeError, "CSP config should be a mapping"), ): decorator(config) def test_csp_override(self): @csp_override(basic_config) def sync_view(request): return HttpResponse("OK") response = sync_view(HttpRequest()) self.assertEqual(response._csp_config, basic_config) self.assertIs(hasattr(response, "_csp_ro_config"), False) async def test_csp_override_async_view(self): @csp_override(basic_config) async def async_view(request): return HttpResponse("OK") response = await async_view(HttpRequest()) self.assertEqual(response._csp_config, basic_config) self.assertIs(hasattr(response, "_csp_ro_config"), False) def test_csp_report_only_override(self): @csp_report_only_override(basic_config) def sync_view(request): return HttpResponse("OK") response = sync_view(HttpRequest()) self.assertEqual(response._csp_ro_config, basic_config) self.assertIs(hasattr(response, "_csp_config"), False) async def test_csp_report_only_override_async_view(self): @csp_report_only_override(basic_config) async def async_view(request): return HttpResponse("OK") response = await async_view(HttpRequest()) self.assertEqual(response._csp_ro_config, basic_config) self.assertIs(hasattr(response, "_csp_config"), False) def test_csp_override_both(self): @csp_override(basic_config) @csp_report_only_override(basic_config) def sync_view(request): return HttpResponse("OK") response = sync_view(HttpRequest()) self.assertEqual(response._csp_config, basic_config) self.assertEqual(response._csp_ro_config, basic_config) async def test_csp_override_both_async_view(self): @csp_override(basic_config) @csp_report_only_override(basic_config) async def async_view(request): return HttpResponse("OK") response = await async_view(HttpRequest()) self.assertEqual(response._csp_config, basic_config) self.assertEqual(response._csp_ro_config, basic_config)
{ "repo_id": "django/django", "file_path": "tests/decorators/test_csp.py", "license": "BSD 3-Clause \"New\" or \"Revised\" License", "lines": 74, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
django/django:docs/lint.py
import re import sys from collections import Counter from os.path import abspath, dirname, splitext from unittest import mock from sphinxlint.checkers import ( _ROLE_BODY, _is_long_interpreted_text, _is_very_long_string_literal, _starts_with_anonymous_hyperlink, _starts_with_directive_or_hyperlink, ) from sphinxlint.checkers import checker as sphinxlint_checker from sphinxlint.rst import SIMPLENAME from sphinxlint.sphinxlint import check_text from sphinxlint.utils import PER_FILE_CACHES, hide_non_rst_blocks, paragraphs def django_check_file(filename, checkers, options=None): try: for checker in checkers: # Django docs use ".txt" for docs file extension. if ".rst" in checker.suffixes: checker.suffixes = (".txt",) ext = splitext(filename)[1] if not any(ext in checker.suffixes for checker in checkers): return Counter() try: with open(filename, encoding="utf-8") as f: text = f.read() except OSError as err: return [f"{filename}: cannot open: {err}"] except UnicodeDecodeError as err: return [f"{filename}: cannot decode as UTF-8: {err}"] return check_text(filename, text, checkers, options) finally: for memoized_function in PER_FILE_CACHES: memoized_function.cache_clear() _TOCTREE_DIRECTIVE_RE = re.compile(r"^ *.. toctree::") _PARSED_LITERAL_DIRECTIVE_RE = re.compile(r"^ *.. parsed-literal::") _IS_METHOD_RE = re.compile(r"^ *([\w.]+)\([\w ,*]*\)\s*$") # https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html # Use two trailing underscores when embedding the URL. Technically, a single # underscore works as well, but that would create a named reference instead of # an anonymous one. Named references typically do not have a benefit when the # URL is embedded. Moreover, they have the disadvantage that you must make sure # that you do not use the same “Link text” for another link in your document. _HYPERLINK_DANGLING_RE = re.compile(r"^\s*<https?://[^>]+>`__?[\.,;]?$") @sphinxlint_checker(".rst", enabled=False, rst_only=True) def check_line_too_long_django(file, lines, options=None): """A modified version of Sphinx-lint's line-too-long check. Original: https://github.com/sphinx-contrib/sphinx-lint/blob/main/sphinxlint/checkers.py """ def is_multiline_block_to_exclude(line): return _TOCTREE_DIRECTIVE_RE.match(line) or _PARSED_LITERAL_DIRECTIVE_RE.match( line ) # Ignore additional blocks from line length checks. with mock.patch( "sphinxlint.utils.is_multiline_non_rst_block", is_multiline_block_to_exclude ): lines = hide_non_rst_blocks(lines) table_rows = [] for lno, line in enumerate(lines): # Beware, in `line` we have the trailing newline. if len(line) - 1 > options.max_line_length: # Sphinxlint default exceptions. if line.lstrip()[0] in "+|": continue # ignore wide tables if _is_long_interpreted_text(line): continue # ignore long interpreted text if _starts_with_directive_or_hyperlink(line): continue # ignore directives and hyperlink targets if _starts_with_anonymous_hyperlink(line): continue # ignore anonymous hyperlink targets if _is_very_long_string_literal(line): continue # ignore a very long literal string # Additional exceptions try: # Ignore headings if len(set(lines[lno + 1].strip())) == 1 and len(line) == len( lines[lno + 1] ): continue except IndexError: # End of file pass if len(set(line.strip())) == 1 and len(line) == len(lines[lno - 1]): continue # Ignore heading underline if lno in table_rows: continue # Ignore lines in tables if len(set(line.strip())) == 2 and " " in line: # Ignore simple tables borders = [lno_ for lno_, line_ in enumerate(lines) if line == line_] table_rows.extend([n for n in range(min(borders), max(borders))]) continue if _HYPERLINK_DANGLING_RE.match(line): continue # Ignore dangling long links inside a ``_ ref. if match := _IS_METHOD_RE.match(line): # Ignore second definition of function signature. previous_line = lines[lno - 1] if previous_line.startswith(".. method:: ") and ( previous_line.find(match[1]) != -1 ): continue yield lno + 1, f"Line too long ({len(line) - 1}/{options.max_line_length})" _PYTHON_DOMAIN = re.compile(f":py:{SIMPLENAME}:`{_ROLE_BODY}`") @sphinxlint_checker(".rst", enabled=False, rst_only=True) def check_python_domain_in_roles(file, lines, options=None): """ :py: indicates the Python language domain. This means code writen in Python, not Python built-ins in particular. Bad: :py:class:`email.message.EmailMessage` Good: :class:`email.message.EmailMessage` """ for lno, line in enumerate(lines, start=1): role = _PYTHON_DOMAIN.search(line) if role: yield lno, f":py domain is the default and can be omitted {role.group(0)!r}" _DOC_CAPTURE_TARGET_RE = re.compile(r":doc:`(?:[^<`]+<)?([^>`]+)>?`") @sphinxlint_checker(".rst", rst_only=True) def check_absolute_targets_doc_role(file, lines, options=None): for paragraph_lno, paragraph in paragraphs(lines): for error in _DOC_CAPTURE_TARGET_RE.finditer(paragraph): target = error.group(1) # Skip absolute or intersphinx refs like "python:using/windows". if target.startswith("/") or ":" in target.split("/", 1)[0]: continue # Relative target, report as a violation. error_offset = paragraph[: error.start()].count("\n") yield (paragraph_lno + error_offset, target) import sphinxlint # noqa: E402 sphinxlint.check_file = django_check_file from sphinxlint.cli import main # noqa: E402 if __name__ == "__main__": directory = dirname(abspath(__file__)) params = sys.argv[1:] if len(sys.argv) > 1 else [] print(f"Running sphinxlint for: {directory} {params=}") sys.exit( main( [ directory, "--jobs", "0", "--ignore", "_build", "--ignore", "_theme", "--ignore", "_ext", "--enable", "all", "--disable", "line-too-long", # Disable sphinx-lint version "--max-line-length", "79", *params, ] ) )
{ "repo_id": "django/django", "file_path": "docs/lint.py", "license": "BSD 3-Clause \"New\" or \"Revised\" License", "lines": 159, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
django/django:tests/template_tests/syntax_tests/test_partials.py
from django.template import ( Context, TemplateDoesNotExist, TemplateSyntaxError, VariableDoesNotExist, ) from django.template.base import Token, TokenType from django.test import SimpleTestCase from django.views.debug import ExceptionReporter from ..utils import setup partial_templates = { "partial_base.html": ( "<main>{% block main %}Default main content.{% endblock main %}</main>" ), "partial_included.html": ( "INCLUDED TEMPLATE START\n" "{% partialdef included-partial %}\n" "THIS IS CONTENT FROM THE INCLUDED PARTIAL\n" "{% endpartialdef %}\n\n" "Now using the partial: {% partial included-partial %}\n" "INCLUDED TEMPLATE END\n" ), } valid_partialdef_names = ( "dot.in.name", "'space in name'", "exclamation!", "@at", "slash/something", "inline", "inline-inline", "INLINE" "with+plus", "with&amp", "with%percent", "with,comma", "with:colon", "with;semicolon", "[brackets]", "(parens)", "{curly}", ) def gen_partial_template(name, *args, **kwargs): if args or kwargs: extra = " ".join((args, *("{k}={v}" for k, v in kwargs.items()))) + " " else: extra = "" return ( f"{{% partialdef {name} {extra}%}}TEST with {name}!{{% endpartialdef %}}" f"{{% partial {name} %}}" ) class PartialTagTests(SimpleTestCase): libraries = {"bad_tag": "template_tests.templatetags.bad_tag"} @setup({name: gen_partial_template(name) for name in valid_partialdef_names}) def test_valid_partialdef_names(self): for template_name in valid_partialdef_names: with self.subTest(template_name=template_name): output = self.engine.render_to_string(template_name) self.assertEqual(output, f"TEST with {template_name}!") @setup( { "basic": ( "{% partialdef testing-name %}" "HERE IS THE TEST CONTENT" "{% endpartialdef %}" "{% partial testing-name %}" ), "basic_inline": ( "{% partialdef testing-name inline %}" "HERE IS THE TEST CONTENT" "{% endpartialdef %}" ), "inline_inline": ( "{% partialdef inline inline %}" "HERE IS THE TEST CONTENT" "{% endpartialdef %}" ), "with_newlines": ( "{% partialdef testing-name %}\n" "HERE IS THE TEST CONTENT\n" "{% endpartialdef testing-name %}\n" "{% partial testing-name %}" ), } ) def test_basic_usage(self): for template_name in ( "basic", "basic_inline", "inline_inline", "with_newlines", ): with self.subTest(template_name=template_name): output = self.engine.render_to_string(template_name) self.assertEqual(output.strip(), "HERE IS THE TEST CONTENT") @setup( { "inline_partial_with_context": ( "BEFORE\n" "{% partialdef testing-name inline %}" "HERE IS THE TEST CONTENT" "{% endpartialdef %}\n" "AFTER" ) } ) def test_partial_inline_only_with_before_and_after_content(self): output = self.engine.render_to_string("inline_partial_with_context") self.assertEqual(output.strip(), "BEFORE\nHERE IS THE TEST CONTENT\nAFTER") @setup( { "inline_partial_explicit_end": ( "{% partialdef testing-name inline %}" "HERE IS THE TEST CONTENT" "{% endpartialdef testing-name %}\n" "{% partial testing-name %}" ) } ) def test_partial_inline_and_used_once(self): output = self.engine.render_to_string("inline_partial_explicit_end") self.assertEqual(output, "HERE IS THE TEST CONTENT\nHERE IS THE TEST CONTENT") @setup( { "inline_partial_with_usage": ( "BEFORE\n" "{% partialdef content_snippet inline %}" "HERE IS THE TEST CONTENT" "{% endpartialdef %}\n" "AFTER\n" "{% partial content_snippet %}" ) } ) def test_partial_inline_and_used_once_with_before_and_after_content(self): output = self.engine.render_to_string("inline_partial_with_usage") self.assertEqual( output.strip(), "BEFORE\nHERE IS THE TEST CONTENT\nAFTER\nHERE IS THE TEST CONTENT", ) @setup( { "partial_used_before_definition": ( "TEMPLATE START\n" "{% partial testing-name %}\n" "MIDDLE CONTENT\n" "{% partialdef testing-name %}\n" "THIS IS THE PARTIAL CONTENT\n" "{% endpartialdef %}\n" "TEMPLATE END" ), } ) def test_partial_used_before_definition(self): output = self.engine.render_to_string("partial_used_before_definition") expected = ( "TEMPLATE START\n\nTHIS IS THE PARTIAL CONTENT\n\n" "MIDDLE CONTENT\n\nTEMPLATE END" ) self.assertEqual(output, expected) @setup( { "partial_with_extends": ( "{% extends 'partial_base.html' %}" "{% partialdef testing-name %}Inside Content{% endpartialdef %}" "{% block main %}" "Main content with {% partial testing-name %}" "{% endblock %}" ), }, partial_templates, ) def test_partial_defined_outside_main_block(self): output = self.engine.render_to_string("partial_with_extends") self.assertIn("<main>Main content with Inside Content</main>", output) @setup( { "partial_with_extends_and_block_super": ( "{% extends 'partial_base.html' %}" "{% partialdef testing-name %}Inside Content{% endpartialdef %}" "{% block main %}{{ block.super }} " "Main content with {% partial testing-name %}" "{% endblock %}" ), }, partial_templates, ) def test_partial_used_with_block_super(self): output = self.engine.render_to_string("partial_with_extends_and_block_super") self.assertIn( "<main>Default main content. Main content with Inside Content</main>", output, ) @setup( { "partial_with_include": ( "MAIN TEMPLATE START\n" "{% include 'partial_included.html' %}\n" "MAIN TEMPLATE END" ) }, partial_templates, ) def test_partial_in_included_template(self): output = self.engine.render_to_string("partial_with_include") expected = ( "MAIN TEMPLATE START\nINCLUDED TEMPLATE START\n\n\n" "Now using the partial: \n" "THIS IS CONTENT FROM THE INCLUDED PARTIAL\n\n" "INCLUDED TEMPLATE END\n\nMAIN TEMPLATE END" ) self.assertEqual(output, expected) @setup( { "partial_as_include_in_other_template": ( "MAIN TEMPLATE START\n" "{% include 'partial_included.html#included-partial' %}\n" "MAIN TEMPLATE END" ) }, partial_templates, ) def test_partial_as_include_in_template(self): output = self.engine.render_to_string("partial_as_include_in_other_template") expected = ( "MAIN TEMPLATE START\n\n" "THIS IS CONTENT FROM THE INCLUDED PARTIAL\n\n" "MAIN TEMPLATE END" ) self.assertEqual(output, expected) @setup( { "nested_simple": ( "{% extends 'base.html' %}" "{% block content %}" "This is my main page." "{% partialdef outer inline %}" " It hosts a couple of partials.\n" " {% partialdef inner inline %}" " And an inner one." " {% endpartialdef inner %}" "{% endpartialdef outer %}" "{% endblock content %}" ), "use_outer": "{% include 'nested_simple#outer' %}", "use_inner": "{% include 'nested_simple#inner' %}", } ) def test_nested_partials(self): with self.subTest(template_name="use_outer"): output = self.engine.render_to_string("use_outer") self.assertEqual( [line.strip() for line in output.split("\n")], ["It hosts a couple of partials.", "And an inner one."], ) with self.subTest(template_name="use_inner"): output = self.engine.render_to_string("use_inner") self.assertEqual(output.strip(), "And an inner one.") @setup( { "partial_undefined_name": "{% partial undefined %}", "partial_missing_name": "{% partial %}", "partial_closing_tag": ( "{% partialdef testing-name %}TEST{% endpartialdef %}" "{% partial testing-name %}{% endpartial %}" ), "partialdef_missing_name": "{% partialdef %}{% endpartialdef %}", "partialdef_missing_close_tag": "{% partialdef name %}TEST", "partialdef_opening_closing_name_mismatch": ( "{% partialdef testing-name %}TEST{% endpartialdef invalid %}" ), "partialdef_invalid_name": gen_partial_template("with\nnewline"), "partialdef_extra_params": ( "{% partialdef testing-name inline extra %}TEST{% endpartialdef %}" ), "partialdef_duplicated_names": ( "{% partialdef testing-name %}TEST{% endpartialdef %}" "{% partialdef testing-name %}TEST{% endpartialdef %}" "{% partial testing-name %}" ), "partialdef_duplicated_nested_names": ( "{% partialdef testing-name %}" "TEST" "{% partialdef testing-name %}TEST{% endpartialdef %}" "{% endpartialdef %}" "{% partial testing-name %}" ), }, ) def test_basic_parse_errors(self): for template_name, error_msg in ( ( "partial_undefined_name", "Partial 'undefined' is not defined in the current template.", ), ("partial_missing_name", "'partial' tag requires a single argument"), ("partial_closing_tag", "Invalid block tag on line 1: 'endpartial'"), ("partialdef_missing_name", "'partialdef' tag requires a name"), ("partialdef_missing_close_tag", "Unclosed tag on line 1: 'partialdef'"), ( "partialdef_opening_closing_name_mismatch", "expected 'endpartialdef' or 'endpartialdef testing-name'.", ), ("partialdef_invalid_name", "Invalid block tag on line 3: 'endpartialdef'"), ("partialdef_extra_params", "'partialdef' tag takes at most 2 arguments"), ( "partialdef_duplicated_names", "Partial 'testing-name' is already defined in the " "'partialdef_duplicated_names' template.", ), ( "partialdef_duplicated_nested_names", "Partial 'testing-name' is already defined in the " "'partialdef_duplicated_nested_names' template.", ), ): with ( self.subTest(template_name=template_name), self.assertRaisesMessage(TemplateSyntaxError, error_msg), ): self.engine.render_to_string(template_name) @setup( { "with_params": ( "{% partialdef testing-name inline=true %}TEST{% endpartialdef %}" ), "uppercase": "{% partialdef testing-name INLINE %}TEST{% endpartialdef %}", } ) def test_partialdef_invalid_inline(self): error_msg = "The 'inline' argument does not have any parameters" for template_name in ("with_params", "uppercase"): with ( self.subTest(template_name=template_name), self.assertRaisesMessage(TemplateSyntaxError, error_msg), ): self.engine.render_to_string(template_name) @setup( { "partial_broken_unclosed": ( "<div>Before partial</div>" "{% partialdef unclosed_partial %}" "<p>This partial has no closing tag</p>" "<div>After partial content</div>" ) } ) def test_broken_partial_unclosed_exception_info(self): with self.assertRaises(TemplateSyntaxError) as cm: self.engine.get_template("partial_broken_unclosed") self.assertIn("endpartialdef", str(cm.exception)) self.assertIn("Unclosed tag", str(cm.exception)) reporter = ExceptionReporter(None, cm.exception.__class__, cm.exception, None) traceback_data = reporter.get_traceback_data() exception_value = str(traceback_data.get("exception_value", "")) self.assertIn("Unclosed tag", exception_value) @setup( { "partial_with_variable_error": ( "<h1>Title</h1>\n" "{% partialdef testing-name %}\n" "<p>{{ nonexistent|default:alsonotthere }}</p>\n" "{% endpartialdef %}\n" "<h2>Sub Title</h2>\n" "{% partial testing-name %}\n" ), } ) def test_partial_runtime_exception_has_debug_info(self): template = self.engine.get_template("partial_with_variable_error") context = Context({}) if hasattr(self.engine, "string_if_invalid") and self.engine.string_if_invalid: output = template.render(context) # The variable should be replaced with INVALID self.assertIn("INVALID", output) else: with self.assertRaises(VariableDoesNotExist) as cm: template.render(context) if self.engine.debug: exc_info = cm.exception.template_debug self.assertEqual( exc_info["during"], "{{ nonexistent|default:alsonotthere }}" ) self.assertEqual(exc_info["line"], 3) self.assertEqual(exc_info["name"], "partial_with_variable_error") self.assertIn("Failed lookup", exc_info["message"]) @setup( { "partial_exception_info_test": ( "<h1>Title</h1>\n" "{% partialdef testing-name %}\n" "<p>Content</p>\n" "{% endpartialdef %}\n" ), } ) def test_partial_template_get_exception_info_delegation(self): if self.engine.debug: template = self.engine.get_template("partial_exception_info_test") partial_template = template.extra_data["partials"]["testing-name"] test_exc = Exception("Test exception") token = Token( token_type=TokenType.VAR, contents="test", position=(0, 4), ) exc_info = partial_template.get_exception_info(test_exc, token) self.assertIn("message", exc_info) self.assertIn("line", exc_info) self.assertIn("name", exc_info) self.assertEqual(exc_info["name"], "partial_exception_info_test") self.assertEqual(exc_info["message"], "Test exception") @setup( { "partial_with_undefined_reference": ( "<h1>Header</h1>\n" "{% partial undefined %}\n" "<p>After undefined partial</p>\n" ), } ) def test_undefined_partial_exception_info(self): template = self.engine.get_template("partial_with_undefined_reference") with self.assertRaises(TemplateSyntaxError) as cm: template.render(Context()) self.assertIn("undefined", str(cm.exception)) self.assertIn("is not defined", str(cm.exception)) if self.engine.debug: exc_debug = cm.exception.template_debug self.assertEqual(exc_debug["during"], "{% partial undefined %}") self.assertEqual(exc_debug["line"], 2) self.assertEqual(exc_debug["name"], "partial_with_undefined_reference") self.assertIn("undefined", exc_debug["message"]) @setup( { "existing_template": ( "<h1>Header</h1><p>This template has no partials defined</p>" ), } ) def test_undefined_partial_exception_info_template_does_not_exist(self): with self.assertRaises(TemplateDoesNotExist) as cm: self.engine.get_template("existing_template#undefined") self.assertIn("undefined", str(cm.exception)) @setup( { "partial_with_syntax_error": ( "<h1>Title</h1>\n" "{% partialdef syntax_error_partial %}\n" " {% if user %}\n" " <p>User: {{ user.name }}</p>\n" " {% endif\n" " <p>Missing closing tag above</p>\n" "{% endpartialdef %}\n" "{% partial syntax_error_partial %}\n" ), } ) def test_partial_with_syntax_error_exception_info(self): with self.assertRaises(TemplateSyntaxError) as cm: self.engine.get_template("partial_with_syntax_error") self.assertIn("endif", str(cm.exception).lower()) if self.engine.debug: exc_debug = cm.exception.template_debug self.assertIn("endpartialdef", exc_debug["during"]) self.assertEqual(exc_debug["name"], "partial_with_syntax_error") self.assertIn("endif", exc_debug["message"].lower()) @setup( { "partial_with_runtime_error": ( "<h1>Title</h1>\n" "{% load bad_tag %}\n" "{% partialdef runtime_error_partial %}\n" " <p>This will raise an error:</p>\n" " {% badsimpletag %}\n" "{% endpartialdef %}\n" "{% partial runtime_error_partial %}\n" ), } ) def test_partial_runtime_error_exception_info(self): template = self.engine.get_template("partial_with_runtime_error") context = Context() with self.assertRaises(RuntimeError) as cm: template.render(context) if self.engine.debug: exc_debug = cm.exception.template_debug self.assertIn("badsimpletag", exc_debug["during"]) self.assertEqual(exc_debug["line"], 5) # Line 5 is where badsimpletag is self.assertEqual(exc_debug["name"], "partial_with_runtime_error") self.assertIn("bad simpletag", exc_debug["message"]) @setup( { "nested_partial_with_undefined_var": ( "<h1>Title</h1>\n" "{% partialdef outer_partial %}\n" ' <div class="outer">\n' " {% partialdef inner_partial %}\n" " <p>{{ undefined_var }}</p>\n" " {% endpartialdef %}\n" " {% partial inner_partial %}\n" " </div>\n" "{% endpartialdef %}\n" "{% partial outer_partial %}\n" ), } ) def test_nested_partial_error_exception_info(self): template = self.engine.get_template("nested_partial_with_undefined_var") context = Context() output = template.render(context) # When string_if_invalid is set, it will show INVALID # When not set, undefined variables just render as empty string if hasattr(self.engine, "string_if_invalid") and self.engine.string_if_invalid: self.assertIn("INVALID", output) else: self.assertIn("<p>", output) self.assertIn("</p>", output) @setup( { "parent.html": ( "<!DOCTYPE html>\n" "<html>\n" "<head>{% block title %}Default Title{% endblock %}</head>\n" "<body>\n" " {% block content %}{% endblock %}\n" "</body>\n" "</html>\n" ), "child.html": ( "{% extends 'parent.html' %}\n" "{% block content %}\n" " {% partialdef content_partial %}\n" " <p>{{ missing_variable|undefined_filter }}</p>\n" " {% endpartialdef %}\n" " {% partial content_partial %}\n" "{% endblock %}\n" ), } ) def test_partial_in_extended_template_error(self): with self.assertRaises(TemplateSyntaxError) as cm: self.engine.get_template("child.html") self.assertIn("undefined_filter", str(cm.exception)) if self.engine.debug: exc_debug = cm.exception.template_debug self.assertIn("undefined_filter", exc_debug["during"]) self.assertEqual(exc_debug["name"], "child.html") self.assertIn("undefined_filter", exc_debug["message"]) @setup( { "partial_broken_nesting": ( "<div>Before partial</div>\n" "{% partialdef outer %}\n" "{% partialdef inner %}...{% endpartialdef outer %}\n" "{% endpartialdef inner %}\n" "<div>After partial content</div>" ) } ) def test_broken_partial_nesting(self): with self.assertRaises(TemplateSyntaxError) as cm: self.engine.get_template("partial_broken_nesting") self.assertIn("endpartialdef", str(cm.exception)) self.assertIn("Invalid block tag", str(cm.exception)) self.assertIn("'endpartialdef inner'", str(cm.exception)) reporter = ExceptionReporter(None, cm.exception.__class__, cm.exception, None) traceback_data = reporter.get_traceback_data() exception_value = str(traceback_data.get("exception_value", "")) self.assertIn("Invalid block tag", exception_value) self.assertIn("'endpartialdef inner'", str(cm.exception)) @setup( { "partial_broken_nesting_mixed": ( "<div>Before partial</div>\n" "{% partialdef outer %}\n" "{% partialdef inner %}...{% endpartialdef %}\n" "{% endpartialdef inner %}\n" "<div>After partial content</div>" ) } ) def test_broken_partial_nesting_mixed(self): with self.assertRaises(TemplateSyntaxError) as cm: self.engine.get_template("partial_broken_nesting_mixed") self.assertIn("endpartialdef", str(cm.exception)) self.assertIn("Invalid block tag", str(cm.exception)) self.assertIn("'endpartialdef outer'", str(cm.exception)) reporter = ExceptionReporter(None, cm.exception.__class__, cm.exception, None) traceback_data = reporter.get_traceback_data() exception_value = str(traceback_data.get("exception_value", "")) self.assertIn("Invalid block tag", exception_value) self.assertIn("'endpartialdef outer'", str(cm.exception))
{ "repo_id": "django/django", "file_path": "tests/template_tests/syntax_tests/test_partials.py", "license": "BSD 3-Clause \"New\" or \"Revised\" License", "lines": 592, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
django/django:tests/template_tests/test_partials.py
import os from unittest import mock from django.http import HttpResponse from django.template import ( Context, NodeList, Origin, PartialTemplate, Template, TemplateDoesNotExist, TemplateSyntaxError, engines, ) from django.template.backends.django import DjangoTemplates from django.template.loader import render_to_string from django.test import TestCase, override_settings from django.urls import path, reverse from .utils import setup engine = engines["django"] class PartialTagsTests(TestCase): def test_invalid_template_name_raises_template_does_not_exist(self): for template_name in [123, None, "", "#", "#name"]: with ( self.subTest(template_name=template_name), self.assertRaisesMessage(TemplateDoesNotExist, str(template_name)), ): engine.get_template(template_name) def test_full_template_from_loader(self): template = engine.get_template("partial_examples.html") rendered = template.render({}) # Check the partial was rendered twice self.assertEqual(2, rendered.count("TEST-PARTIAL-CONTENT")) self.assertEqual(1, rendered.count("INLINE-CONTENT")) def test_chained_exception_forwarded(self): with self.assertRaises(TemplateDoesNotExist) as ctx: engine.get_template("not_there.html#not-a-partial") exception = ctx.exception self.assertGreater(len(exception.tried), 0) origin, _ = exception.tried[0] self.assertEqual(origin.template_name, "not_there.html") def test_partials_use_cached_loader_when_configured(self): template_dir = os.path.join(os.path.dirname(__file__), "templates") backend = DjangoTemplates( { "NAME": "django", "DIRS": [template_dir], "APP_DIRS": False, "OPTIONS": { "loaders": [ ( "django.template.loaders.cached.Loader", ["django.template.loaders.filesystem.Loader"], ), ], }, } ) cached_loader = backend.engine.template_loaders[0] filesystem_loader = cached_loader.loaders[0] with mock.patch.object( filesystem_loader, "get_contents", wraps=filesystem_loader.get_contents ) as mock_get_contents: full_template = backend.get_template("partial_examples.html") self.assertIn("TEST-PARTIAL-CONTENT", full_template.render({})) partial_template = backend.get_template( "partial_examples.html#test-partial" ) self.assertEqual( "TEST-PARTIAL-CONTENT", partial_template.render({}).strip() ) mock_get_contents.assert_called_once() def test_context_available_in_response_for_partial_template(self): def sample_view(request): return HttpResponse( render_to_string("partial_examples.html#test-partial", {"foo": "bar"}) ) class PartialUrls: urlpatterns = [path("sample/", sample_view, name="sample-view")] with override_settings(ROOT_URLCONF=PartialUrls): response = self.client.get(reverse("sample-view")) self.assertContains(response, "TEST-PARTIAL-CONTENT") self.assertEqual(response.context.get("foo"), "bar") def test_response_with_multiple_parts(self): context = {} template_partials = ["partial_child.html", "partial_child.html#extra-content"] response_whole_content_at_once = HttpResponse( "".join( render_to_string(template_name, context) for template_name in template_partials ) ) response_with_multiple_writes = HttpResponse() for template_name in template_partials: response_with_multiple_writes.write( render_to_string(template_name, context) ) response_with_generator = HttpResponse( render_to_string(template_name, context) for template_name in template_partials ) for label, response in [ ("response_whole_content_at_once", response_whole_content_at_once), ("response_with_multiple_writes", response_with_multiple_writes), ("response_with_generator", response_with_generator), ]: with self.subTest(response=label): self.assertIn(b"Main Content", response.content) self.assertIn(b"Extra Content", response.content) def test_partial_engine_assignment_with_real_template(self): template_with_partial = engine.get_template( "partial_examples.html#test-partial" ) self.assertEqual(template_with_partial.template.engine, engine.engine) rendered_content = template_with_partial.render({}) self.assertEqual("TEST-PARTIAL-CONTENT", rendered_content.strip()) def test_template_source_warning(self): partial = engine.get_template("partial_examples.html#test-partial") with self.assertWarnsMessage( RuntimeWarning, "PartialTemplate.source is only available when template " "debugging is enabled.", ) as ctx: self.assertEqual(partial.template.source, "") self.assertEqual(ctx.filename, __file__) class RobustPartialHandlingTests(TestCase): def override_get_template(self, **kwargs): class TemplateWithCustomAttrs: def __init__(self, **kwargs): for k, v in kwargs.items(): setattr(self, k, v) def render(self, context): return "rendered content" template = TemplateWithCustomAttrs(**kwargs) origin = self.id() return mock.patch.object( engine.engine, "find_template", return_value=(template, origin), ) def test_template_without_extra_data_attribute(self): partial_name = "some_partial_name" with ( self.override_get_template(), self.assertRaisesMessage(TemplateDoesNotExist, partial_name), ): engine.get_template(f"some_template.html#{partial_name}") def test_template_extract_extra_data_robust(self): partial_name = "some_partial_name" for extra_data in ( None, 0, [], {}, {"wrong-key": {}}, {"partials": None}, {"partials": {}}, {"partials": []}, {"partials": 0}, ): with ( self.subTest(extra_data=extra_data), self.override_get_template(extra_data=extra_data), self.assertRaisesMessage(TemplateDoesNotExist, partial_name), ): engine.get_template(f"template.html#{partial_name}") def test_nested_partials_rendering_with_context(self): template_source = """ {% partialdef outer inline %} Hello {{ name }}! {% partialdef inner inline %} Your age is {{ age }}. {% endpartialdef inner %} Nice to meet you. {% endpartialdef outer %} """ template = Template(template_source, origin=Origin(name="template.html")) context = Context({"name": "Alice", "age": 25}) rendered = template.render(context) self.assertIn("Hello Alice!", rendered) self.assertIn("Your age is 25.", rendered) self.assertIn("Nice to meet you.", rendered) class FindPartialSourceTests(TestCase): @setup( { "partial_source_success_template": ( "{% partialdef test-partial %}\n" "TEST-PARTIAL-CONTENT\n" "{% endpartialdef %}\n" ), }, debug_only=True, ) def test_find_partial_source_success(self): template = self.engine.get_template("partial_source_success_template") partial_proxy = template.extra_data["partials"]["test-partial"] expected = """{% partialdef test-partial %} TEST-PARTIAL-CONTENT {% endpartialdef %}""" self.assertEqual(partial_proxy.source.strip(), expected.strip()) @setup( { "partial_source_with_inline_template": ( "{% partialdef inline-partial inline %}\n" "INLINE-CONTENT\n" "{% endpartialdef %}\n" ), }, debug_only=True, ) def test_find_partial_source_with_inline(self): template = self.engine.get_template("partial_source_with_inline_template") partial_proxy = template.extra_data["partials"]["inline-partial"] expected = """{% partialdef inline-partial inline %} INLINE-CONTENT {% endpartialdef %}""" self.assertEqual(partial_proxy.source.strip(), expected.strip()) def test_find_partial_source_fallback_cases(self): cases = {"None offsets": (None, None), "Out of bounds offsets": (10, 20)} for name, (source_start, source_end) in cases.items(): with self.subTest(name): partial = PartialTemplate( NodeList(), Origin("test"), "test", source_start=source_start, source_end=source_end, ) result = partial.find_partial_source("nonexistent-partial") self.assertEqual(result, "") @setup( { "empty_partial_template": ("{% partialdef empty %}{% endpartialdef %}"), }, debug_only=True, ) def test_find_partial_source_empty_partial(self): template = self.engine.get_template("empty_partial_template") partial_proxy = template.extra_data["partials"]["empty"] result = partial_proxy.find_partial_source(template.source) self.assertEqual(result, "{% partialdef empty %}{% endpartialdef %}") @setup( { "consecutive_partials_template": ( "{% partialdef empty %}{% endpartialdef %}" "{% partialdef other %}...{% endpartialdef %}" ), }, debug_only=True, ) def test_find_partial_source_multiple_consecutive_partials(self): template = self.engine.get_template("consecutive_partials_template") empty_proxy = template.extra_data["partials"]["empty"] other_proxy = template.extra_data["partials"]["other"] empty_result = empty_proxy.find_partial_source(template.source) self.assertEqual(empty_result, "{% partialdef empty %}{% endpartialdef %}") other_result = other_proxy.find_partial_source(template.source) self.assertEqual(other_result, "{% partialdef other %}...{% endpartialdef %}") def test_partials_with_duplicate_names(self): test_cases = [ ( "nested", """ {% partialdef duplicate %}{% partialdef duplicate %} CONTENT {% endpartialdef %}{% endpartialdef %} """, ), ( "conditional", """ {% if ... %} {% partialdef duplicate %} CONTENT {% endpartialdef %} {% else %} {% partialdef duplicate %} OTHER-CONTENT {% endpartialdef %} {% endif %} """, ), ] for test_name, template_source in test_cases: with self.subTest(test_name=test_name): with self.assertRaisesMessage( TemplateSyntaxError, "Partial 'duplicate' is already defined in the " "'template.html' template.", ): Template(template_source, origin=Origin(name="template.html")) @setup( { "named_end_tag_template": ( "{% partialdef thing %}CONTENT{% endpartialdef thing %}" ), }, debug_only=True, ) def test_find_partial_source_supports_named_end_tag(self): template = self.engine.get_template("named_end_tag_template") partial_proxy = template.extra_data["partials"]["thing"] result = partial_proxy.find_partial_source(template.source) self.assertEqual( result, "{% partialdef thing %}CONTENT{% endpartialdef thing %}" ) @setup( { "nested_partials_basic_template": ( "{% partialdef outer %}" "{% partialdef inner %}...{% endpartialdef %}" "{% endpartialdef %}" ), }, debug_only=True, ) def test_find_partial_source_supports_nested_partials(self): template = self.engine.get_template("nested_partials_basic_template") empty_proxy = template.extra_data["partials"]["outer"] other_proxy = template.extra_data["partials"]["inner"] outer_result = empty_proxy.find_partial_source(template.source) self.assertEqual( outer_result, ( "{% partialdef outer %}{% partialdef inner %}" "...{% endpartialdef %}{% endpartialdef %}" ), ) inner_result = other_proxy.find_partial_source(template.source) self.assertEqual(inner_result, "{% partialdef inner %}...{% endpartialdef %}") @setup( { "nested_partials_named_end_template": ( "{% partialdef outer %}" "{% partialdef inner %}...{% endpartialdef inner %}" "{% endpartialdef outer %}" ), }, debug_only=True, ) def test_find_partial_source_supports_nested_partials_and_named_end_tags(self): template = self.engine.get_template("nested_partials_named_end_template") empty_proxy = template.extra_data["partials"]["outer"] other_proxy = template.extra_data["partials"]["inner"] outer_result = empty_proxy.find_partial_source(template.source) self.assertEqual( outer_result, ( "{% partialdef outer %}{% partialdef inner %}" "...{% endpartialdef inner %}{% endpartialdef outer %}" ), ) inner_result = other_proxy.find_partial_source(template.source) self.assertEqual( inner_result, "{% partialdef inner %}...{% endpartialdef inner %}" ) @setup( { "nested_partials_mixed_end_1_template": ( "{% partialdef outer %}" "{% partialdef inner %}...{% endpartialdef %}" "{% endpartialdef outer %}" ), }, debug_only=True, ) def test_find_partial_source_supports_nested_partials_and_mixed_end_tags_1(self): template = self.engine.get_template("nested_partials_mixed_end_1_template") empty_proxy = template.extra_data["partials"]["outer"] other_proxy = template.extra_data["partials"]["inner"] outer_result = empty_proxy.find_partial_source(template.source) self.assertEqual( outer_result, ( "{% partialdef outer %}{% partialdef inner %}" "...{% endpartialdef %}{% endpartialdef outer %}" ), ) inner_result = other_proxy.find_partial_source(template.source) self.assertEqual(inner_result, "{% partialdef inner %}...{% endpartialdef %}") @setup( { "nested_partials_mixed_end_2_template": ( "{% partialdef outer %}" "{% partialdef inner %}...{% endpartialdef inner %}" "{% endpartialdef %}" ), }, debug_only=True, ) def test_find_partial_source_supports_nested_partials_and_mixed_end_tags_2(self): template = self.engine.get_template("nested_partials_mixed_end_2_template") empty_proxy = template.extra_data["partials"]["outer"] other_proxy = template.extra_data["partials"]["inner"] outer_result = empty_proxy.find_partial_source(template.source) self.assertEqual( outer_result, ( "{% partialdef outer %}{% partialdef inner %}" "...{% endpartialdef inner %}{% endpartialdef %}" ), ) inner_result = other_proxy.find_partial_source(template.source) self.assertEqual( inner_result, "{% partialdef inner %}...{% endpartialdef inner %}" ) @setup( { "partial_embedded_in_verbatim": ( "{% verbatim %}\n" "{% partialdef testing-name %}\n" "<p>Should be ignored</p>" "{% endpartialdef testing-name %}\n" "{% endverbatim %}\n" "{% partialdef testing-name %}\n" "<p>Content</p>\n" "{% endpartialdef %}\n" ), }, debug_only=True, ) def test_partial_template_embedded_in_verbatim(self): template = self.engine.get_template("partial_embedded_in_verbatim") partial_template = template.extra_data["partials"]["testing-name"] self.assertEqual( partial_template.source, "{% partialdef testing-name %}\n<p>Content</p>\n{% endpartialdef %}", ) @setup( { "partial_debug_source": ( "{% partialdef testing-name %}\n" "<p>Content</p>\n" "{% endpartialdef %}\n" ), }, debug_only=True, ) def test_partial_source_uses_offsets_in_debug(self): template = self.engine.get_template("partial_debug_source") partial_template = template.extra_data["partials"]["testing-name"] self.assertEqual(partial_template._source_start, 0) self.assertEqual(partial_template._source_end, 64) expected = template.source[ partial_template._source_start : partial_template._source_end ] self.assertEqual(partial_template.source, expected) @setup( { "partial_embedded_in_named_verbatim": ( "{% verbatim block1 %}\n" "{% partialdef testing-name %}\n" "{% endverbatim block1 %}\n" "{% partialdef testing-name %}\n" "<p>Named Content</p>\n" "{% endpartialdef %}\n" ), }, debug_only=True, ) def test_partial_template_embedded_in_named_verbatim(self): template = self.engine.get_template("partial_embedded_in_named_verbatim") partial_template = template.extra_data["partials"]["testing-name"] self.assertEqual( "{% partialdef testing-name %}\n<p>Named Content</p>\n{% endpartialdef %}", partial_template.source, ) @setup( { "partial_embedded_in_comment_block": ( "{% comment %}\n" "{% partialdef testing-name %}\n" "{% endcomment %}\n" "{% partialdef testing-name %}\n" "<p>Comment Content</p>\n" "{% endpartialdef %}\n" ), }, debug_only=True, ) def test_partial_template_embedded_in_comment_block(self): template = self.engine.get_template("partial_embedded_in_comment_block") partial_template = template.extra_data["partials"]["testing-name"] self.assertEqual( partial_template.source, "{% partialdef testing-name %}\n" "<p>Comment Content</p>\n" "{% endpartialdef %}", ) @setup( { "partial_embedded_in_inline_comment": ( "{# {% partialdef testing-name %} #}\n" "{% partialdef testing-name %}\n" "<p>Inline Comment Content</p>\n" "{% endpartialdef %}\n" ), }, debug_only=True, ) def test_partial_template_embedded_in_inline_comment(self): template = self.engine.get_template("partial_embedded_in_inline_comment") partial_template = template.extra_data["partials"]["testing-name"] self.assertEqual( partial_template.source, "{% partialdef testing-name %}\n" "<p>Inline Comment Content</p>\n" "{% endpartialdef %}", ) @setup( { "partial_contains_fake_end_inside_verbatim": ( "{% partialdef testing-name %}\n" "{% verbatim %}{% endpartialdef %}{% endverbatim %}\n" "<p>Body</p>\n" "{% endpartialdef %}\n" ), }, debug_only=True, ) def test_partial_template_contains_fake_end_inside_verbatim(self): template = self.engine.get_template("partial_contains_fake_end_inside_verbatim") partial_template = template.extra_data["partials"]["testing-name"] self.assertEqual( partial_template.source, "{% partialdef testing-name %}\n" "{% verbatim %}{% endpartialdef %}{% endverbatim %}\n" "<p>Body</p>\n" "{% endpartialdef %}", )
{ "repo_id": "django/django", "file_path": "tests/template_tests/test_partials.py", "license": "BSD 3-Clause \"New\" or \"Revised\" License", "lines": 529, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
django/django:tests/deprecation/test_deprecate_posargs.py
import inspect import unittest from typing import TYPE_CHECKING from django.test import SimpleTestCase from django.utils.deprecation import RemovedAfterNextVersionWarning, deprecate_posargs from django.utils.version import PY314 if TYPE_CHECKING: type AnnotatedKwarg = int class DeprecatePosargsTests(SimpleTestCase): # Note: these tests use the generic RemovedAfterNextVersionWarning so they # don't need to be updated each release. In actual use, you must substitute # a specific RemovedInDjangoXXWarning. def assertDeprecated(self, params, name): msg = ( "Passing positional argument(s) {0} to {1}() is deprecated. Use keyword " "arguments instead." ) return self.assertWarnsMessage( RemovedAfterNextVersionWarning, msg.format(params, name) ) def test_all_keyword_only_params(self): """All positional arguments are remapped to keyword-only arguments.""" @deprecate_posargs(RemovedAfterNextVersionWarning, ["a", "b"]) def some_func(*, a=1, b=2): return a, b with ( self.subTest("Multiple affected args"), self.assertDeprecated("'a', 'b'", "some_func"), ): result = some_func(10, 20) self.assertEqual(result, (10, 20)) with ( self.subTest("One affected arg"), self.assertDeprecated("'a'", "some_func"), ): result = some_func(10) self.assertEqual(result, (10, 2)) def test_some_keyword_only_params(self): """Works when keeping some params as positional-or-keyword.""" @deprecate_posargs(RemovedAfterNextVersionWarning, ["b"]) def some_func(a, *, b=1): return a, b with self.assertDeprecated("'b'", "some_func"): result = some_func(10, 20) self.assertEqual(result, (10, 20)) def test_no_warning_when_not_needed(self): @deprecate_posargs(RemovedAfterNextVersionWarning, ["b"]) def some_func(a=0, *, b=1): return a, b with self.subTest("All arguments supplied"), self.assertNoLogs(level="WARNING"): result = some_func(10, b=20) self.assertEqual(result, (10, 20)) with self.subTest("All default arguments"), self.assertNoLogs(level="WARNING"): result = some_func() self.assertEqual(result, (0, 1)) with ( self.subTest("Partial arguments supplied"), self.assertNoLogs(level="WARNING"), ): result = some_func(10) self.assertEqual(result, (10, 1)) def test_allows_reordering_keyword_only_params(self): """Keyword-only params can be freely added and rearranged.""" # Original signature: some_func(b=2, a=1), and remappable_names # reflects the original positional argument order. @deprecate_posargs(RemovedAfterNextVersionWarning, ["b", "a"]) def some_func(*, aa_new=0, a=1, b=2): return aa_new, a, b with self.assertDeprecated("'b', 'a'", "some_func"): result = some_func(20, 10) self.assertEqual(result, (0, 10, 20)) def test_detects_duplicate_arguments(self): @deprecate_posargs(RemovedAfterNextVersionWarning, ["b", "c"]) def func(a, *, b=1, c=2): return a, b, c msg = ( "func() got both deprecated positional and keyword argument values for {0}" ) with ( self.subTest("One duplicate"), self.assertRaisesMessage(TypeError, msg.format("'b'")), ): func(0, 10, b=12) with ( self.subTest("Multiple duplicates"), self.assertRaisesMessage(TypeError, msg.format("'b', 'c'")), ): func(0, 10, 20, b=12, c=22) with ( self.subTest("No false positives for valid kwargs"), # Deprecation warning for 'b', not TypeError for duplicate 'c'. self.assertDeprecated("'b'", "func"), ): result = func(0, 11, c=22) self.assertEqual(result, (0, 11, 22)) def test_detects_extra_positional_arguments(self): @deprecate_posargs(RemovedAfterNextVersionWarning, ["b"]) def func(a, *, b=1): return a, b with self.assertRaisesMessage( TypeError, "func() takes at most 2 positional argument(s) (including 1 deprecated) " "but 3 were given.", ): func(10, 20, 30) def test_avoids_remapping_to_new_keyword_arguments(self): # Only 'b' is moving; 'c' was added later. @deprecate_posargs(RemovedAfterNextVersionWarning, ["b"]) def func(a, *, b=1, c=2): return a, b, c with self.assertRaisesMessage( TypeError, "func() takes at most 2 positional argument(s) (including 1 deprecated) " "but 3 were given.", ): func(10, 20, 30) def test_variable_kwargs(self): """Works with **kwargs.""" @deprecate_posargs(RemovedAfterNextVersionWarning, ["b"]) def some_func(a, *, b=1, **kwargs): return a, b, kwargs with ( self.subTest("Called with additional kwargs"), self.assertDeprecated("'b'", "some_func"), ): result = some_func(10, 20, c=30) self.assertEqual(result, (10, 20, {"c": 30})) with ( self.subTest("Called without additional kwargs"), self.assertDeprecated("'b'", "some_func"), ): result = some_func(10, 20) self.assertEqual(result, (10, 20, {})) with ( self.subTest("Called with too many positional arguments"), # Similar to test_detects_extra_positional_arguments() above, # but verifying logic is not confused by variable **kwargs. self.assertRaisesMessage( TypeError, "some_func() takes at most 2 positional argument(s) (including 1 " "deprecated) but 3 were given.", ), ): some_func(10, 20, 30) with self.subTest("No warning needed"): result = some_func(10, b=20, c=30) self.assertEqual(result, (10, 20, {"c": 30})) def test_positional_only_params(self): @deprecate_posargs(RemovedAfterNextVersionWarning, ["c"]) def some_func(a, /, b, *, c=3): return a, b, c with self.assertDeprecated("'c'", "some_func"): result = some_func(10, 20, 30) self.assertEqual(result, (10, 20, 30)) def test_class_methods(self): """ Deprecations for class methods should be bound properly and should omit the `self` or `cls` argument from the suggested replacement. """ class SomeClass: @deprecate_posargs(RemovedAfterNextVersionWarning, ["a", "b"]) def __init__(self, *, a=0, b=1): self.a = a self.b = b @deprecate_posargs(RemovedAfterNextVersionWarning, ["a", "b"]) def some_method(self, *, a, b=1): return self.a, self.b, a, b @staticmethod @deprecate_posargs(RemovedAfterNextVersionWarning, ["a", "b"]) def some_static_method(*, a, b=1): return a, b @classmethod @deprecate_posargs(RemovedAfterNextVersionWarning, ["a", "b"]) def some_class_method(cls, *, a, b=1): return cls.__name__, a, b with ( self.subTest("Constructor"), # Warning should use the class name, not `__init__()`. self.assertDeprecated("'a', 'b'", "SomeClass"), ): instance = SomeClass(10, 20) self.assertEqual(instance.a, 10) self.assertEqual(instance.b, 20) with ( self.subTest("Instance method"), self.assertDeprecated("'a', 'b'", "some_method"), ): result = SomeClass().some_method(10, 20) self.assertEqual(result, (0, 1, 10, 20)) with ( self.subTest("Static method on instance"), self.assertDeprecated("'a', 'b'", "some_static_method"), ): result = SomeClass().some_static_method(10, 20) self.assertEqual(result, (10, 20)) with ( self.subTest("Static method on class"), self.assertDeprecated("'a', 'b'", "some_static_method"), ): result = SomeClass.some_static_method(10, 20) self.assertEqual(result, (10, 20)) with ( self.subTest("Class method on instance"), self.assertDeprecated("'a', 'b'", "some_class_method"), ): result = SomeClass().some_class_method(10, 20) self.assertEqual(result, ("SomeClass", 10, 20)) with ( self.subTest("Class method on class"), self.assertDeprecated("'a', 'b'", "some_class_method"), ): result = SomeClass.some_class_method(10, 20) self.assertEqual(result, ("SomeClass", 10, 20)) def test_incorrect_classmethod_order(self): """Catch classmethod applied in wrong order.""" with self.assertRaisesMessage( TypeError, "Apply @classmethod before @deprecate_posargs." ): class SomeClass: @deprecate_posargs(RemovedAfterNextVersionWarning, ["a"]) @classmethod def some_class_method(cls, *, a): pass def test_incorrect_staticmethod_order(self): """Catch staticmethod applied in wrong order.""" with self.assertRaisesMessage( TypeError, "Apply @staticmethod before @deprecate_posargs." ): class SomeClass: @deprecate_posargs(RemovedAfterNextVersionWarning, ["a"]) @staticmethod def some_static_method(*, a): pass async def test_async(self): """A decorated async function is still async.""" @deprecate_posargs(RemovedAfterNextVersionWarning, ["a", "b"]) async def some_func(*, a, b=1): return a, b self.assertTrue(inspect.iscoroutinefunction(some_func.__wrapped__)) self.assertTrue(inspect.iscoroutinefunction(some_func)) with ( self.subTest("With deprecation warning"), self.assertDeprecated("'a', 'b'", "some_func"), ): result = await some_func(10, 20) self.assertEqual(result, (10, 20)) with ( self.subTest("Without deprecation warning"), self.assertNoLogs(level="WARNING"), ): result = await some_func(a=10, b=20) self.assertEqual(result, (10, 20)) def test_applied_to_lambda(self): """ Please don't try to deprecate lambda args! What does that even mean?! (But if it happens, the decorator should do something reasonable.) """ lambda_func = deprecate_posargs(RemovedAfterNextVersionWarning, ["b"])( lambda a, *, b=1: (a, b) ) with self.assertDeprecated("'b'", "<lambda>"): result = lambda_func(10, 20) self.assertEqual(result, (10, 20)) def test_bare_init(self): """Can't replace '__init__' with class name if not in a class.""" @deprecate_posargs(RemovedAfterNextVersionWarning, ["a"]) def __init__(*, a): pass with self.assertDeprecated("'a'", "__init__"): __init__(10) def test_warning_source_location(self): """The warning points to caller, not the decorator implementation.""" @deprecate_posargs(RemovedAfterNextVersionWarning, "a") def some_func(*, a): return a with self.assertWarns(RemovedAfterNextVersionWarning) as cm: some_func(10) self.assertEqual(cm.filename, __file__) self.assertEqual(cm.lineno, inspect.currentframe().f_lineno - 2) def test_decorator_requires_keyword_only_params(self): with self.assertRaisesMessage( TypeError, "@deprecate_posargs() requires at least one keyword-only parameter " "(after a `*` entry in the parameters list).", ): @deprecate_posargs(RemovedAfterNextVersionWarning, ["b"]) def func(a, b=1): return a, b def test_decorator_rejects_var_positional_param(self): with self.assertRaisesMessage( TypeError, "@deprecate_posargs() cannot be used with variable positional `*args`.", ): @deprecate_posargs(RemovedAfterNextVersionWarning, ["b"]) def func(*args, b=1): return args, b @unittest.skipUnless(PY314, "Deferred annotations are Python 3.14+ only") def test_decorator_rejects_var_positional_param_with_deferred_annotation(self): with self.assertRaisesMessage( TypeError, "@deprecate_posargs() cannot be used with variable positional `*args`.", ): @deprecate_posargs(RemovedAfterNextVersionWarning, ["b"]) def func(*args, b: AnnotatedKwarg = 1): return args, b def test_decorator_does_not_apply_to_class(self): with self.assertRaisesMessage( TypeError, "@deprecate_posargs cannot be applied to a class. (Apply it to the " "__init__ method.)", ): @deprecate_posargs(RemovedAfterNextVersionWarning, ["b"]) class NotThisClass: pass def test_decorator_requires_remappable_names_be_keyword_only(self): """remappable_names cannot refer to positional-or-keyword params.""" with self.assertRaisesMessage( TypeError, "@deprecate_posargs() requires all remappable_names to be keyword-only " "parameters.", ): @deprecate_posargs(RemovedAfterNextVersionWarning, ["a", "b"]) def func(a, *, b=1): return a, b def test_decorator_requires_remappable_names_exist(self): """remappable_names cannot refer to variable kwargs.""" with self.assertRaisesMessage( TypeError, "@deprecate_posargs() requires all remappable_names to be keyword-only " "parameters.", ): @deprecate_posargs(RemovedAfterNextVersionWarning, ["b", "c"]) def func(a, *, b=1, **kwargs): c = kwargs.get("c") return a, b, c def test_decorator_preserves_signature_and_metadata(self): def original(a, b=1, *, c=2): """Docstring.""" return a, b, c decorated = deprecate_posargs(RemovedAfterNextVersionWarning, ["c"])(original) self.assertEqual(original.__name__, decorated.__name__) self.assertEqual(original.__qualname__, decorated.__qualname__) self.assertEqual(original.__doc__, decorated.__doc__) self.assertEqual(inspect.signature(original), inspect.signature(decorated))
{ "repo_id": "django/django", "file_path": "tests/deprecation/test_deprecate_posargs.py", "license": "BSD 3-Clause \"New\" or \"Revised\" License", "lines": 344, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
django/django:django/middleware/csp.py
from django.conf import settings from django.utils.csp import CSP, LazyNonce, build_policy from django.utils.deprecation import MiddlewareMixin def get_nonce(request): return getattr(request, "_csp_nonce", None) class ContentSecurityPolicyMiddleware(MiddlewareMixin): def process_request(self, request): request._csp_nonce = LazyNonce() def process_response(self, request, response): nonce = get_nonce(request) sentinel = object() if (csp_config := getattr(response, "_csp_config", sentinel)) is sentinel: csp_config = settings.SECURE_CSP if (csp_ro_config := getattr(response, "_csp_ro_config", sentinel)) is sentinel: csp_ro_config = settings.SECURE_CSP_REPORT_ONLY for header, config in [ (CSP.HEADER_ENFORCE, csp_config), (CSP.HEADER_REPORT_ONLY, csp_ro_config), ]: # If headers are already set on the response, don't overwrite them. # This allows for views to set their own CSP headers as needed. # An empty config means CSP headers are not added to the response. if config and header not in response: response.headers[str(header)] = build_policy(config, nonce) return response
{ "repo_id": "django/django", "file_path": "django/middleware/csp.py", "license": "BSD 3-Clause \"New\" or \"Revised\" License", "lines": 25, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
django/django:django/utils/csp.py
import secrets from enum import StrEnum from django.utils.functional import SimpleLazyObject, empty class CSP(StrEnum): """ Content Security Policy constants for directive values and special tokens. These constants represent: 1. Standard quoted string values from the CSP spec (e.g., 'self', 'unsafe-inline') 2. Special placeholder tokens (NONCE) that get replaced by the middleware Using this enum instead of raw strings provides better type checking, autocompletion, and protection against common mistakes like: - Typos (e.g., 'noone' instead of 'none') - Missing quotes (e.g., ["self"] instead of ["'self'"]) - Inconsistent quote styles (e.g., ["'self'", "\"unsafe-inline\""]) Example usage in Django settings: SECURE_CSP = { "default-src": [CSP.NONE], "script-src": [CSP.SELF, CSP.NONCE], } """ # HTTP Headers. HEADER_ENFORCE = "Content-Security-Policy" HEADER_REPORT_ONLY = "Content-Security-Policy-Report-Only" # Standard CSP directive values. NONE = "'none'" REPORT_SAMPLE = "'report-sample'" SELF = "'self'" STRICT_DYNAMIC = "'strict-dynamic'" UNSAFE_EVAL = "'unsafe-eval'" UNSAFE_HASHES = "'unsafe-hashes'" UNSAFE_INLINE = "'unsafe-inline'" WASM_UNSAFE_EVAL = "'wasm-unsafe-eval'" # Special placeholder that gets replaced by the middleware. # The value itself is arbitrary and should not be mistaken for a real # nonce. NONCE = "<CSP_NONCE_SENTINEL>" class LazyNonce(SimpleLazyObject): """ Lazily generates a cryptographically secure nonce string, for use in CSP headers. The nonce is only generated when first accessed (e.g., via string interpolation or inside a template). The nonce will evaluate as `True` if it has been generated, and `False` if it has not. This is useful for third-party Django libraries that want to support CSP without requiring it. Example Django template usage with context processors enabled: <script{% if csp_nonce %} nonce="{{ csp_nonce }}"...{% endif %}> The `{% if %}` block will only render if the nonce has been evaluated elsewhere. """ def __init__(self): super().__init__(generate_nonce) def __bool__(self): return self._wrapped is not empty def generate_nonce(): return secrets.token_urlsafe(16) def build_policy(config, nonce=None): policy = [] for directive, values in config.items(): if values in (None, False): continue if values is True: rendered_value = "" else: if isinstance(values, set): # Sort values for consistency, preventing cache invalidation # between requests and ensuring reliable browser caching. values = sorted(values) elif not isinstance(values, list | tuple): values = [values] # Replace the nonce sentinel with the actual nonce values, if the # sentinel is found and a nonce is provided. Otherwise, remove it. if (has_sentinel := CSP.NONCE in values) and nonce: values = [f"'nonce-{nonce}'" if v == CSP.NONCE else v for v in values] elif has_sentinel: values = [v for v in values if v != CSP.NONCE] if not values: continue rendered_value = " ".join(values) policy.append(f"{directive} {rendered_value}".rstrip()) return "; ".join(policy)
{ "repo_id": "django/django", "file_path": "django/utils/csp.py", "license": "BSD 3-Clause \"New\" or \"Revised\" License", "lines": 82, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
django/django:tests/middleware/test_csp.py
import time from utils_tests.test_csp import basic_config, basic_policy from django.contrib.staticfiles.testing import StaticLiveServerTestCase from django.test import SimpleTestCase from django.test.selenium import SeleniumTestCase from django.test.utils import modify_settings, override_settings from django.utils.csp import CSP from .views import csp_reports @override_settings( MIDDLEWARE=["django.middleware.csp.ContentSecurityPolicyMiddleware"], ROOT_URLCONF="middleware.urls", ) class CSPMiddlewareTest(SimpleTestCase): @override_settings(SECURE_CSP=None, SECURE_CSP_REPORT_ONLY=None) def test_csp_defaults_off(self): response = self.client.get("/csp-base/") self.assertNotIn(CSP.HEADER_ENFORCE, response) self.assertNotIn(CSP.HEADER_REPORT_ONLY, response) @override_settings(SECURE_CSP=basic_config, SECURE_CSP_REPORT_ONLY=None) def test_csp_basic(self): """ With SECURE_CSP set to a valid value, the middleware adds a "Content-Security-Policy" header to the response. """ response = self.client.get("/csp-base/") self.assertEqual(response[CSP.HEADER_ENFORCE], basic_policy) self.assertNotIn(CSP.HEADER_REPORT_ONLY, response) @override_settings(SECURE_CSP={"default-src": [CSP.SELF, CSP.NONCE]}) def test_csp_basic_with_nonce(self): """ Test the nonce is added to the header and matches what is in the view. """ response = self.client.get("/csp-nonce/") nonce = response.text self.assertTrue(nonce) self.assertEqual( response[CSP.HEADER_ENFORCE], f"default-src 'self' 'nonce-{nonce}'" ) @override_settings(SECURE_CSP={"default-src": [CSP.SELF, CSP.NONCE]}) def test_csp_basic_with_nonce_but_unused(self): """ Test if `request.csp_nonce` is never accessed, it is not added to the header. """ response = self.client.get("/csp-base/") nonce = response.text self.assertIsNotNone(nonce) self.assertEqual(response[CSP.HEADER_ENFORCE], basic_policy) @override_settings(SECURE_CSP=None, SECURE_CSP_REPORT_ONLY=basic_config) def test_csp_report_only_basic(self): """ With SECURE_CSP_REPORT_ONLY set to a valid value, the middleware adds a "Content-Security-Policy-Report-Only" header to the response. """ response = self.client.get("/csp-base/") self.assertEqual(response[CSP.HEADER_REPORT_ONLY], basic_policy) self.assertNotIn(CSP.HEADER_ENFORCE, response) @override_settings( SECURE_CSP=basic_config, SECURE_CSP_REPORT_ONLY=basic_config, ) def test_csp_both(self): """ If both SECURE_CSP and SECURE_CSP_REPORT_ONLY are set, the middleware adds both headers to the response. """ response = self.client.get("/csp-base/") self.assertEqual(response[CSP.HEADER_ENFORCE], basic_policy) self.assertEqual(response[CSP.HEADER_REPORT_ONLY], basic_policy) @override_settings( DEBUG=True, SECURE_CSP=basic_config, SECURE_CSP_REPORT_ONLY=basic_config, ) def test_csp_404_debug_view(self): """ Test that the CSP headers are not added to the debug view. """ response = self.client.get("/csp-404/") self.assertNotIn(CSP.HEADER_ENFORCE, response) self.assertNotIn(CSP.HEADER_REPORT_ONLY, response) @override_settings( DEBUG=True, SECURE_CSP=basic_config, SECURE_CSP_REPORT_ONLY=basic_config, ) def test_csp_500_debug_view(self): """ Test that the CSP headers are not added to the debug view. """ with self.assertLogs("django.request", "WARNING"): response = self.client.get("/csp-500/") self.assertNotIn(CSP.HEADER_ENFORCE, response) self.assertNotIn(CSP.HEADER_REPORT_ONLY, response) @override_settings( MIDDLEWARE=["django.middleware.csp.ContentSecurityPolicyMiddleware"], ROOT_URLCONF="middleware.urls", SECURE_CSP=basic_config, SECURE_CSP_REPORT_ONLY=basic_config, ) class CSPMiddlewareWithDecoratedViewsTest(SimpleTestCase): def test_no_decorators(self): response = self.client.get("/csp-base/") self.assertEqual(response[CSP.HEADER_ENFORCE], basic_policy) self.assertEqual(response[CSP.HEADER_REPORT_ONLY], basic_policy) def test_csp_disabled_enforced(self): """ `csp_override({})` only disables the enforced CSP header. """ response = self.client.get("/csp-disabled-enforced/") self.assertNotIn(CSP.HEADER_ENFORCE, response) self.assertEqual(response[CSP.HEADER_REPORT_ONLY], basic_policy) def test_csp_report_only_disabled(self): """ `csp_report_only_override({})` only disables the report-only header. """ response = self.client.get("/csp-disabled-report-only/") self.assertNotIn(CSP.HEADER_REPORT_ONLY, response) self.assertEqual(response[CSP.HEADER_ENFORCE], basic_policy) def test_csp_disabled_both(self): """ Using both CSP decorators with empty mappings will clear both headers. """ response = self.client.get("/csp-disabled-both/") self.assertNotIn(CSP.HEADER_ENFORCE, response) self.assertNotIn(CSP.HEADER_REPORT_ONLY, response) def test_csp_override_enforced(self): """ `csp_override` only overrides the enforced header. """ response = self.client.get("/csp-override-enforced/") self.assertEqual( response[CSP.HEADER_ENFORCE], "default-src 'self'; img-src 'self' data:" ) self.assertEqual(response[CSP.HEADER_REPORT_ONLY], basic_policy) def test_csp_report_only_override(self): """ `csp_report_only_override` only overrides the report-only header. """ response = self.client.get("/csp-override-report-only/") self.assertEqual( response[CSP.HEADER_REPORT_ONLY], "default-src 'self'; img-src 'self' data:" ) self.assertEqual(response[CSP.HEADER_ENFORCE], basic_policy) def test_csp_override_both_decorator(self): """ Using both CSP decorators overrides both CSP Django settings. """ response = self.client.get("/csp-override-both/") self.assertEqual( response[CSP.HEADER_ENFORCE], "default-src 'self'; img-src 'self' data:" ) self.assertEqual( response[CSP.HEADER_REPORT_ONLY], "default-src 'self'; img-src 'self' data:" ) @override_settings( ROOT_URLCONF="middleware.urls", SECURE_CSP_REPORT_ONLY={ "default-src": [CSP.NONE], "img-src": [CSP.SELF], "script-src": [CSP.SELF], "style-src": [CSP.SELF], "report-uri": "/csp-report/", }, ) @modify_settings( MIDDLEWARE={"append": "django.middleware.csp.ContentSecurityPolicyMiddleware"} ) class CSPSeleniumTestCase(SeleniumTestCase, StaticLiveServerTestCase): available_apps = ["middleware"] def setUp(self): self.addCleanup(csp_reports.clear) super().setUp() def test_reports_are_generated(self): url = self.live_server_url + "/csp-failure/" self.selenium.get(url) time.sleep(1) # Allow time for the CSP report to be sent. reports = sorted( (r["csp-report"]["document-uri"], r["csp-report"]["violated-directive"]) for r in csp_reports ) self.assertEqual(reports, [(url, "style-src-elem")])
{ "repo_id": "django/django", "file_path": "tests/middleware/test_csp.py", "license": "BSD 3-Clause \"New\" or \"Revised\" License", "lines": 182, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
django/django:tests/utils_tests/test_csp.py
from secrets import token_urlsafe from unittest.mock import patch from django.test import SimpleTestCase from django.utils.csp import CSP, LazyNonce, build_policy, generate_nonce from django.utils.functional import empty basic_config = { "default-src": [CSP.SELF], } alt_config = { "default-src": [CSP.SELF, CSP.UNSAFE_INLINE], } basic_policy = "default-src 'self'" class CSPConstantsTests(SimpleTestCase): def test_constants(self): self.assertEqual(CSP.NONE, "'none'") self.assertEqual(CSP.REPORT_SAMPLE, "'report-sample'") self.assertEqual(CSP.SELF, "'self'") self.assertEqual(CSP.STRICT_DYNAMIC, "'strict-dynamic'") self.assertEqual(CSP.UNSAFE_EVAL, "'unsafe-eval'") self.assertEqual(CSP.UNSAFE_HASHES, "'unsafe-hashes'") self.assertEqual(CSP.UNSAFE_INLINE, "'unsafe-inline'") self.assertEqual(CSP.WASM_UNSAFE_EVAL, "'wasm-unsafe-eval'") self.assertEqual(CSP.NONCE, "<CSP_NONCE_SENTINEL>") class CSPBuildPolicyTest(SimpleTestCase): def assertPolicyEqual(self, a, b): parts_a = sorted(a.split("; ")) if a is not None else None parts_b = sorted(b.split("; ")) if b is not None else None self.assertEqual(parts_a, parts_b, f"Policies not equal: {a!r} != {b!r}") def test_config_empty(self): self.assertPolicyEqual(build_policy({}), "") def test_config_basic(self): self.assertPolicyEqual(build_policy(basic_config), basic_policy) def test_config_multiple_directives(self): policy = { "default-src": [CSP.SELF], "script-src": [CSP.NONE], } self.assertPolicyEqual( build_policy(policy), "default-src 'self'; script-src 'none'" ) def test_config_value_as_string(self): """ Test that a single value can be passed as a string. """ policy = {"default-src": CSP.SELF} self.assertPolicyEqual(build_policy(policy), "default-src 'self'") def test_config_value_as_tuple(self): """ Test that a tuple can be passed as a value. """ policy = {"default-src": (CSP.SELF, "foo.com")} self.assertPolicyEqual(build_policy(policy), "default-src 'self' foo.com") def test_config_value_as_set(self): """ Test that a set can be passed as a value. Sets are often used in Django settings to ensure uniqueness, however, sets are unordered. The middleware ensures consistency via sorting if a set is passed. """ policy = {"default-src": {CSP.SELF, "foo.com", "bar.com"}} self.assertPolicyEqual( build_policy(policy), "default-src 'self' bar.com foo.com" ) def test_config_value_none(self): """ Test that `None` removes the directive from the policy. Useful in cases where the CSP config is scripted in some way or explicitly not wanting to set a directive. """ policy = {"default-src": [CSP.SELF], "script-src": None} self.assertPolicyEqual(build_policy(policy), basic_policy) def test_config_value_boolean_true(self): policy = {"default-src": [CSP.SELF], "block-all-mixed-content": True} self.assertPolicyEqual( build_policy(policy), "default-src 'self'; block-all-mixed-content" ) def test_config_value_boolean_false(self): policy = {"default-src": [CSP.SELF], "block-all-mixed-content": False} self.assertPolicyEqual(build_policy(policy), basic_policy) def test_config_value_multiple_boolean(self): policy = { "default-src": [CSP.SELF], "block-all-mixed-content": True, "upgrade-insecure-requests": True, } self.assertPolicyEqual( build_policy(policy), "default-src 'self'; block-all-mixed-content; upgrade-insecure-requests", ) def test_config_with_nonce_arg(self): """ Test when the `CSP.NONCE` is not in the defined policy, the nonce argument has no effect. """ self.assertPolicyEqual(build_policy(basic_config, nonce="abc123"), basic_policy) def test_config_with_nonce(self): policy = {"default-src": [CSP.SELF, CSP.NONCE]} self.assertPolicyEqual( build_policy(policy, nonce="abc123"), "default-src 'self' 'nonce-abc123'", ) def test_config_with_multiple_nonces(self): policy = { "default-src": [CSP.SELF, CSP.NONCE], "script-src": [CSP.SELF, CSP.NONCE], } self.assertPolicyEqual( build_policy(policy, nonce="abc123"), "default-src 'self' 'nonce-abc123'; script-src 'self' 'nonce-abc123'", ) def test_config_with_empty_directive(self): policy = {"default-src": []} self.assertPolicyEqual(build_policy(policy), "") class LazyNonceTests(SimpleTestCase): def test_generates_on_usage(self): generated_tokens = [] nonce = LazyNonce() self.assertFalse(nonce) self.assertIs(nonce._wrapped, empty) def memento_token_urlsafe(size): generated_tokens.append(result := token_urlsafe(size)) return result with patch("django.utils.csp.secrets.token_urlsafe", memento_token_urlsafe): # Force usage, similar to template rendering, to generate the # nonce. val = str(nonce) self.assertTrue(nonce) self.assertEqual(nonce, val) self.assertIsInstance(nonce, str) self.assertEqual(repr(nonce), f"<LazyNonce: '{nonce}'>") self.assertEqual(len(val), 22) # Based on secrets.token_urlsafe of 16 bytes. self.assertEqual(generated_tokens, [nonce]) # Also test the wrapped value. self.assertEqual(nonce._wrapped, val) def test_returns_same_value(self): nonce = LazyNonce() first = str(nonce) second = str(nonce) self.assertEqual(first, second) def test_repr(self): nonce = LazyNonce() self.assertEqual(repr(nonce), f"<LazyNonce: {repr(generate_nonce)}>") str(nonce) # Force nonce generation. self.assertRegex(repr(nonce), r"<LazyNonce: '[^']+'>")
{ "repo_id": "django/django", "file_path": "tests/utils_tests/test_csp.py", "license": "BSD 3-Clause \"New\" or \"Revised\" License", "lines": 144, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
django/django:tests/postgres_tests/test_app_installed_check.py
from django.core import checks from django.db import models from django.test import modify_settings from django.test.utils import isolate_apps from . import PostgreSQLTestCase from .fields import ( BigIntegerRangeField, DateRangeField, DateTimeRangeField, DecimalRangeField, HStoreField, IntegerRangeField, SearchVectorField, ) from .models import IntegerArrayModel, NestedIntegerArrayModel, PostgreSQLModel try: from django.contrib.postgres.constraints import ExclusionConstraint from django.contrib.postgres.fields.ranges import RangeOperators from django.contrib.postgres.indexes import GinIndex, PostgresIndex from django.contrib.postgres.search import SearchQueryField except ImportError: pass @isolate_apps("postgres_tests") class TestPostgresAppInstalledCheck(PostgreSQLTestCase): def _make_error(self, obj, klass_name): """Helper to create postgres.E005 error for specific objects.""" return checks.Error( "'django.contrib.postgres' must be in INSTALLED_APPS in order to " f"use {klass_name}.", obj=obj, id="postgres.E005", ) def assert_model_check_errors(self, model_class, expected_errors): errors = model_class.check(databases=self.databases) self.assertEqual(errors, []) with modify_settings(INSTALLED_APPS={"remove": "django.contrib.postgres"}): errors = model_class.check(databases=self.databases) self.assertEqual(errors, expected_errors) def test_indexes(self): class IndexModel(PostgreSQLModel): field = models.IntegerField() class Meta: indexes = [ PostgresIndex(fields=["id"], name="postgres_index_test"), GinIndex(fields=["field"], name="gin_index_test"), ] self.assert_model_check_errors( IndexModel, [ self._make_error(IndexModel, "PostgresIndex"), self._make_error(IndexModel, "GinIndex"), ], ) def test_exclusion_constraint(self): class ExclusionModel(PostgreSQLModel): value = models.IntegerField() class Meta: constraints = [ ExclusionConstraint( name="exclude_equal", expressions=[("value", RangeOperators.EQUAL)], ) ] self.assert_model_check_errors( ExclusionModel, [self._make_error(ExclusionModel, "ExclusionConstraint")] ) def test_array_field(self): field = IntegerArrayModel._meta.get_field("field") self.assert_model_check_errors( IntegerArrayModel, [self._make_error(field, "ArrayField")], ) def test_nested_array_field(self): """Inner ArrayField does not cause a postgres.E001 error.""" field = NestedIntegerArrayModel._meta.get_field("field") self.assert_model_check_errors( NestedIntegerArrayModel, [ self._make_error(field, "ArrayField"), ], ) def test_hstore_field(self): class HStoreFieldModel(PostgreSQLModel): field = HStoreField() field = HStoreFieldModel._meta.get_field("field") self.assert_model_check_errors( HStoreFieldModel, [ self._make_error(field, "HStoreField"), ], ) def test_range_fields(self): class RangeFieldsModel(PostgreSQLModel): int_range = IntegerRangeField() bigint_range = BigIntegerRangeField() decimal_range = DecimalRangeField() datetime_range = DateTimeRangeField() date_range = DateRangeField() expected_errors = [ self._make_error(field, field.__class__.__name__) for field in [ RangeFieldsModel._meta.get_field("int_range"), RangeFieldsModel._meta.get_field("bigint_range"), RangeFieldsModel._meta.get_field("decimal_range"), RangeFieldsModel._meta.get_field("datetime_range"), RangeFieldsModel._meta.get_field("date_range"), ] ] self.assert_model_check_errors(RangeFieldsModel, expected_errors) def test_search_vector_field(self): class SearchModel(PostgreSQLModel): search_vector = SearchVectorField() search_query = SearchQueryField() vector_field = SearchModel._meta.get_field("search_vector") query_field = SearchModel._meta.get_field("search_query") self.assert_model_check_errors( SearchModel, [ self._make_error(vector_field, "SearchVectorField"), self._make_error(query_field, "SearchQueryField"), ], )
{ "repo_id": "django/django", "file_path": "tests/postgres_tests/test_app_installed_check.py", "license": "BSD 3-Clause \"New\" or \"Revised\" License", "lines": 122, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
docling-project/docling:tests/test_picture_description_rgb_conversion.py
"""Test that PictureDescriptionBaseModel converts non-RGB images to RGB.""" from collections.abc import Iterable from typing import ClassVar, List, Type from docling_core.types.doc import DoclingDocument, PictureItem from PIL import Image from docling.datamodel.base_models import ItemAndImageEnrichmentElement from docling.datamodel.pipeline_options import PictureDescriptionBaseOptions from docling.models.picture_description_base_model import PictureDescriptionBaseModel class _TestOptions(PictureDescriptionBaseOptions): kind: ClassVar[str] = "test" class _RecordingPictureDescriptionModel(PictureDescriptionBaseModel): """Spy subclass that records image modes arriving at _annotate_images.""" def __init__(self) -> None: self.enabled = True self.options = _TestOptions() self.provenance = "test" self.received_modes: List[str] = [] @classmethod def get_options_type(cls) -> Type[PictureDescriptionBaseOptions]: return _TestOptions def _annotate_images(self, images: Iterable[Image.Image]) -> Iterable[str]: for image in images: self.received_modes.append(image.mode) yield "test description" def _make_element(mode: str) -> ItemAndImageEnrichmentElement: img = Image.new(mode, (100, 100)) item = PictureItem(self_ref="#/pictures/0") return ItemAndImageEnrichmentElement(item=item, image=img) def test_rgba_image_converted_to_rgb() -> None: """RGBA images must be converted to RGB before picture description.""" model = _RecordingPictureDescriptionModel() doc = DoclingDocument(name="test") list(model(doc=doc, element_batch=[_make_element("RGBA")])) assert model.received_modes == ["RGB"]
{ "repo_id": "docling-project/docling", "file_path": "tests/test_picture_description_rgb_conversion.py", "license": "MIT License", "lines": 34, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
docling-project/docling:docling/backend/xml/xbrl_backend.py
"""Backend to parse XBRL (eXtensible Business Reporting Language) documents. XBRL is a standard XML format used for business and financial reporting. It is widely used by companies, regulators, and financial institutions worldwide for exchanging financial information. This backend leverages the Arelle library for XBRL processing. Warning: This implementation uses DoclingDocument's GraphData object to represent key-value pairs extracted from XBRL numeric facts. The design of key-value pairs (and therefore the GraphData, GraphCell, GraphLink class) may soon change in a new release of the `docling-core` library. This implementation will need to be updated accordingly when that release is available. """ import logging import re import shutil import zipfile from io import BytesIO from pathlib import Path from tempfile import TemporaryDirectory from typing import Final from docling_core.types.doc import ( DoclingDocument, DocumentOrigin, GraphCell, GraphCellLabel, GraphData, GraphLink, GraphLinkLabel, ) from typing_extensions import override from docling.backend.abstract_backend import DeclarativeDocumentBackend from docling.backend.html_backend import HTMLDocumentBackend from docling.datamodel.backend_options import HTMLBackendOptions, XBRLBackendOptions from docling.datamodel.base_models import InputFormat from docling.datamodel.document import InputDocument from docling.exceptions import OperationNotAllowed _XBRL_AVAILABLE: bool = False _XBRL_IMPORT_ERROR: ImportError | None = None try: from arelle import Cntlr # type: ignore from arelle.ModelDocument import Type # type: ignore from arelle.ModelXbrl import ModelXbrl # type: ignore _XBRL_AVAILABLE = True except ImportError as e: _XBRL_IMPORT_ERROR = e _log = logging.getLogger(__name__) _WEB_CACHE_TIMEOUT: Final[int] = 10 class XBRLDocumentBackend(DeclarativeDocumentBackend): """Backend to parse XBRL (eXtensible Business Reporting Language) documents. XBRL is a standard XML-based format for business and financial reporting. It is used globally by companies and regulators for exchanging financial information in a structured, machine-readable format. The backend parses an XBRL instance file given a taxonomy package passed as a backend option. Refer to https://www.xbrl.org for more details on XBRL. In particular, refer to https://www.xbrl.org/Specification/taxonomy-package/REC-2016-04-19/taxonomy-package-REC-2016-04-19.html for details on how to provide a taxonomy package. This backend leverages the Arelle library for XBRL processing. """ @override def __init__( self, in_doc: "InputDocument", path_or_stream: BytesIO | Path, options: XBRLBackendOptions = XBRLBackendOptions(), ) -> None: # Check if arelle is available before proceeding if not _XBRL_AVAILABLE: raise ImportError( "The 'arelle-release' package is required to process XBRL documents. " "Please install it using `pip install 'docling[xbrl]'`" ) from _XBRL_IMPORT_ERROR super().__init__(in_doc, path_or_stream) self.options: XBRLBackendOptions = options self.model_xbrl: ModelXbrl | None = None try: if ( not self.options.enable_local_fetch and not self.options.enable_remote_fetch ): raise OperationNotAllowed( "Fetching local or remote resources is only allowed when set" " explicitly. Set 'options.enable_local_fetch=True' or" " 'options.enable_remote_fetch=True'. Either one or the other" " needs to be enabled to load taxonomies." ) with TemporaryDirectory() as tmpdir: tmp_path: Path = Path(tmpdir) zip_paths: list[str] = [] if self.options.taxonomy: taxonomy: Path = self.options.taxonomy.resolve() if not taxonomy.is_dir(): raise ValueError( "The 'taxonomy' backend option must be a directory" ) taxonomy_path = shutil.copytree( taxonomy, tmp_path, dirs_exist_ok=True ) zip_paths = [ str(item) for item in taxonomy_path.iterdir() if item.is_file() and item.suffix.lower() == ".zip" and zipfile.is_zipfile(item) ] if zip_paths: _log.debug( f"Files to be passed as taxonomy packages: {zip_paths}" ) if isinstance(path_or_stream, BytesIO): instance_path: Path = tmp_path / "instance.xml" instance_path.write_bytes(path_or_stream.getvalue()) elif isinstance(path_or_stream, Path): instance_path = Path(shutil.copy2(path_or_stream, tmp_path)) else: raise TypeError("path_or_stream must be Path or BytesIO") # cntlr = Cntlr.Cntlr(logFileName="logToPrint") cntlr = Cntlr.Cntlr() # Disable remote access for security purposes, unless explicitly set if not self.options.enable_remote_fetch: cntlr.webCache.workOffline = True cntlr.modelManager.validateDisclosureSystem = False else: # TODO: parametrize the timeout? cntlr.webCache.timeout = _WEB_CACHE_TIMEOUT # TODO: custom set cntlr.webCache.cacheDir? _log.debug( f"Web Cache for remote taxonomy is: {cntlr.webCache.cacheDir}" ) model = cntlr.modelManager.load( str(instance_path), taxonomyPackages=zip_paths ) if ( not isinstance(model, ModelXbrl) or not model or not model.modelDocument ): raise ValueError("Invalid or unreadable XBRL file") if model.modelDocument.type != Type.INSTANCE: raise ValueError("Document is not an XBRL instance") if model.errors: raise ValueError(f"XBRL loaded with errors: {model.errors}") self.model_xbrl = model self.valid = True except Exception as exc: raise RuntimeError( "Could not initialize XBRL backend for file with hash" f" {self.document_hash}." ) from exc @override def is_valid(self) -> bool: return self.valid @classmethod @override def supports_pagination(cls) -> bool: return False @override def unload(self): if self.model_xbrl: self.model_xbrl.close() @classmethod @override def supported_formats(cls) -> set[InputFormat]: return {InputFormat.XML_XBRL} @override def convert(self) -> DoclingDocument: """Convert XBRL document to DoclingDocument using Arelle library. This is a placeholder implementation that creates a basic document structure. Full XBRL parsing using Arelle library can be implemented here. """ _log.debug("Starting XBRL instance conversion...") if not self.is_valid() or not self.model_xbrl: raise RuntimeError("Invalid document with hash {self.document_hash}") origin = DocumentOrigin( filename=self.file.name or "file", mimetype="application/xml", binary_hash=self.document_hash, ) doc = DoclingDocument(name=self.file.stem or "file", origin=origin) doc_name = doc.name # Some metadata doc_type: str = "" doc_org: str = "" doc_period: str = "" for fact in self.model_xbrl.facts: if fact.qname.localName == "DocumentType" and fact.value: doc_type = fact.value if fact.qname.localName == "EntityRegistrantName" and fact.value: doc_org = fact.value if fact.qname.localName == "DocumentPeriodEndDate" and fact.value: doc_period = fact.value title = f"{doc_type} {doc_org} {doc_period}".strip() title = title if title else self.model_xbrl.modelDocument.basename doc.add_title(text=title) # Text blocks (as HTML) html_options = HTMLBackendOptions( enable_local_fetch=False, enable_remote_fetch=False, fetch_images=False, infer_furniture=False, add_title=False, ) _log.info("Parsing text block items and key-value items...") kv_idx: int = 0 cells: list[GraphCell] = [] links: list[GraphLink] = [] for fact in self.model_xbrl.facts: if fact.concept is None: continue if ( fact.concept.type is not None and fact.concept.type.name == "textBlockItemType" and fact.value ): content = re.sub(r"\s+", " ", fact.value).strip() stream = BytesIO(bytes(content, encoding="utf-8")) in_doc = InputDocument( path_or_stream=stream, format=InputFormat.HTML, backend=HTMLDocumentBackend, backend_options=html_options, filename="text_block.html", ) html_backend = HTMLDocumentBackend( in_doc=in_doc, path_or_stream=stream, options=html_options, ) html_doc = html_backend.convert() doc = DoclingDocument.concatenate(docs=(doc, html_doc)) if fact.concept.isNumeric: # TODO: deal with context, units, precision, types,... if not fact.localName or not fact.value: continue cells.append( GraphCell( label=GraphCellLabel.KEY, cell_id=kv_idx, text=fact.localName, orig=fact.localName, ) ) cells.append( GraphCell( label=GraphCellLabel.VALUE, cell_id=kv_idx + 1, text=fact.value, orig=fact.value, ) ) links.append( GraphLink( label=GraphLinkLabel.TO_VALUE, source_cell_id=kv_idx, target_cell_id=kv_idx + 1, ) ) kv_idx += 2 doc.name = doc_name if cells and links: graph_data: GraphData = GraphData(cells=cells, links=links) doc.add_key_values(graph=graph_data) return doc
{ "repo_id": "docling-project/docling", "file_path": "docling/backend/xml/xbrl_backend.py", "license": "MIT License", "lines": 263, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
docling-project/docling:tests/test_backend_xbrl.py
"""Test module for the XBRL backend parser. The data used in this test is in the public domain. It has been downloaded from the U.S. Securities and Exchange Commission (SEC)'s Electronic Data Gathering, Analysis, and Retrieval (EDGAR) system. """ import os from io import BytesIO from pathlib import Path import pytest from docling_core.types.doc import DoclingDocument from docling.datamodel.backend_options import XBRLBackendOptions from docling.datamodel.base_models import DocumentStream, InputFormat from docling.datamodel.document import ConversionResult from docling.document_converter import DocumentConverter, XBRLFormatOption from .test_data_gen_flag import GEN_TEST_DATA from .verify_utils import verify_document, verify_export GENERATE = GEN_TEST_DATA @pytest.fixture(scope="module") def xbrl_paths() -> list[tuple[Path, Path]]: directory = Path(os.path.dirname(__file__) + "/data/xbrl/") xml_files = sorted( [ item for item in directory.iterdir() if item.is_file() and item.suffix.lower() in {".xml", ".xbrl"} ], key=lambda p: p.name.lower(), ) taxonomy_dir = sorted( [ item for item in directory.iterdir() if item.is_dir() and str(item).endswith("-taxonomy") ], key=lambda p: p.name.lower(), ) assert len(xml_files) == len(taxonomy_dir), ( "Mismatch in XBRL instance reports and taxonomy directories" ) return zip(xml_files, taxonomy_dir) def test_e2e_xbrl_conversions(xbrl_paths, use_stream=False): for report, taxonomy in xbrl_paths: gt_path = report.parent.parent / "groundtruth" / "docling_v2" / report.name backend_options = XBRLBackendOptions(enable_local_fetch=True, taxonomy=taxonomy) # set enable_remote_fetch to download the necessary external taxonomy files in web cache # backend_options = XBRLBackendOptions(enable_local_fetch=True, enable_remote_fetch=True, taxonomy=taxonomy) converter = DocumentConverter( allowed_formats=[InputFormat.XML_XBRL], format_options={ InputFormat.XML_XBRL: XBRLFormatOption(backend_options=backend_options) }, ) if use_stream: buf = BytesIO(report.open("rb").read()) stream = DocumentStream(name=report.name, stream=buf) conv_result: ConversionResult = converter.convert(stream) else: conv_result: ConversionResult = converter.convert(report) doc: DoclingDocument = conv_result.document pred_md: str = doc.export_to_markdown() assert verify_export(pred_md, str(gt_path) + ".md"), "export to md" pred_itxt: str = doc._export_to_indented_text( max_text_len=70, explicit_tables=False ) assert verify_export(pred_itxt, str(gt_path) + ".itxt"), ( "export to indented-text" ) assert verify_document(doc, str(gt_path) + ".json", GENERATE), "export to json"
{ "repo_id": "docling-project/docling", "file_path": "tests/test_backend_xbrl.py", "license": "MIT License", "lines": 68, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
docling-project/docling:docling/datamodel/image_classification_engine_options.py
"""Engine option helpers for image-classification runtimes.""" from __future__ import annotations from typing import Any, Dict, List, Literal, Optional from pydantic import AnyUrl, Field from docling.datamodel.settings import default_compile_model from docling.models.inference_engines.image_classification.base import ( BaseImageClassificationEngineOptions, ImageClassificationEngineType, ) class OnnxRuntimeImageClassificationEngineOptions(BaseImageClassificationEngineOptions): """Runtime configuration for ONNX Runtime based image-classification models.""" engine_type: Literal[ImageClassificationEngineType.ONNXRUNTIME] = ( ImageClassificationEngineType.ONNXRUNTIME ) model_filename: str = Field( default="model.onnx", description="Filename of the ONNX export inside the model repository", ) providers: List[str] = Field( default_factory=lambda: ["CPUExecutionProvider"], description="Ordered list of ONNX Runtime execution providers to try", ) class TransformersImageClassificationEngineOptions( BaseImageClassificationEngineOptions ): """Runtime configuration for Transformers-based image-classification models.""" engine_type: Literal[ImageClassificationEngineType.TRANSFORMERS] = ( ImageClassificationEngineType.TRANSFORMERS ) torch_dtype: str | None = Field( default=None, description="PyTorch dtype for model inference (e.g., 'float32', 'float16', 'bfloat16')", ) compile_model: bool = Field( default_factory=default_compile_model, description="Whether to compile the model with torch.compile() for better performance.", ) class ApiKserveV2ImageClassificationEngineOptions(BaseImageClassificationEngineOptions): """Runtime configuration for remote KServe v2 inference.""" engine_type: Literal[ImageClassificationEngineType.API_KSERVE_V2] = ( ImageClassificationEngineType.API_KSERVE_V2 ) url: AnyUrl = Field( description=( "Base URL of the KServe v2 server (e.g., 'http://localhost:8000'). " "The full endpoint path is constructed automatically as " "/v2/models/{model_name}[/versions/{version}]/infer." ), ) model_name: Optional[str] = Field( default=None, description=( "Remote model name registered in the KServe v2 endpoint. " "If omitted, a repo_id-derived default is used." ), ) model_version: Optional[str] = Field( default=None, description="Optional model version. If omitted, the server default is used.", ) headers: Dict[str, str] = Field( default_factory=dict, description="Optional HTTP headers for authentication/routing.", ) timeout: float = Field( default=60.0, description="HTTP request timeout in seconds.", ) request_parameters: Dict[str, Any] = Field( default_factory=dict, description="Optional top-level KServe v2 infer request parameters.", )
{ "repo_id": "docling-project/docling", "file_path": "docling/datamodel/image_classification_engine_options.py", "license": "MIT License", "lines": 72, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
docling-project/docling:docling/datamodel/picture_classification_options.py
"""Options for picture classification stages.""" from __future__ import annotations from typing import ClassVar from pydantic import BaseModel, Field from docling.datamodel import stage_model_specs from docling.datamodel.stage_model_specs import ( ImageClassificationModelSpec, ImageClassificationStagePresetMixin, ) from docling.models.inference_engines.image_classification.base import ( ImageClassificationEngineOptionsMixin, ) class DocumentPictureClassifierOptions( ImageClassificationStagePresetMixin, ImageClassificationEngineOptionsMixin, BaseModel, ): """Options for configuring the DocumentPictureClassifier stage.""" kind: ClassVar[str] = "document_picture_classifier" # TODO: default should become False in a future release, and this field # may be removed entirely once docling-core drops the deprecated # `annotations` attribute from DoclingDocument items. _keep_deprecated_annotations: bool = True model_spec: ImageClassificationModelSpec = Field( default_factory=lambda: stage_model_specs.IMAGE_CLASSIFICATION_DOCUMENT_FIGURE.model_spec.model_copy( deep=True ), description="Image-classification model specification for picture classification.", ) @property def repo_id(self) -> str: return self.model_spec.get_repo_id(self.engine_options.engine_type) @property def revision(self) -> str: return self.model_spec.get_revision(self.engine_options.engine_type) @property def repo_cache_folder(self) -> str: return self.repo_id.replace("/", "--") DocumentPictureClassifierOptions.register_preset( stage_model_specs.IMAGE_CLASSIFICATION_DOCUMENT_FIGURE )
{ "repo_id": "docling-project/docling", "file_path": "docling/datamodel/picture_classification_options.py", "license": "MIT License", "lines": 41, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
docling-project/docling:docling/models/inference_engines/common/hf_vision_base.py
"""Shared HuggingFace helpers for vision inference engine families.""" from __future__ import annotations import logging from numbers import Integral, Real from pathlib import Path from typing import TYPE_CHECKING, Any, Dict, Optional, Union import numpy as np from docling.datamodel.accelerator_options import AcceleratorOptions from docling.models.inference_engines.vlm._utils import resolve_model_artifacts_path from docling.models.utils.hf_model_download import HuggingFaceModelDownloadMixin if TYPE_CHECKING: from transformers.image_processing_utils import BaseImageProcessor from docling.datamodel.stage_model_specs import EngineModelConfig _log = logging.getLogger(__name__) class HfVisionModelMixin(HuggingFaceModelDownloadMixin): """Shared utility mixin for HF vision model loading and label conversion.""" def _init_hf_vision_model( self, *, model_config: Optional[EngineModelConfig], accelerator_options: AcceleratorOptions, artifacts_path: Optional[Union[Path, str]], model_family_name: str, ) -> None: if model_config is None or model_config.repo_id is None: raise ValueError( f"{type(self).__name__} requires model_config with repo_id" ) self._model_config: EngineModelConfig = model_config self._repo_id: str = model_config.repo_id self._accelerator_options = accelerator_options self._artifacts_path = ( artifacts_path if artifacts_path is None else Path(artifacts_path) ) self._model_family_name = model_family_name self._processor: Optional[BaseImageProcessor] = None self._id_to_label: Dict[int, str] = {} def _resolve_model_folder(self, repo_id: str, revision: str) -> Path: """Resolve model folder from artifacts_path or HF download.""" def download_wrapper(download_repo_id: str, download_revision: str) -> Path: _log.info( "Downloading %s model from HuggingFace: %s@%s", self._model_family_name, download_repo_id, download_revision, ) return self.download_models( repo_id=download_repo_id, revision=download_revision, local_dir=None, force=False, progress=False, ) return resolve_model_artifacts_path( repo_id=repo_id, revision=revision, artifacts_path=self._artifacts_path, download_fn=download_wrapper, ) def _load_preprocessor(self, model_folder: Path) -> BaseImageProcessor: """Load HuggingFace image processor from model folder.""" preprocessor_config = model_folder / "preprocessor_config.json" if not preprocessor_config.exists(): raise FileNotFoundError( f"Image processor config not found: {preprocessor_config}" ) try: from transformers import AutoImageProcessor _log.debug("Loading image processor from %s", model_folder) return AutoImageProcessor.from_pretrained(str(model_folder)) except Exception as exc: raise RuntimeError( f"Failed to load image processor from {model_folder}: {exc}" ) def _load_label_mapping(self, model_folder: Path) -> Dict[int, str]: """Load label mapping from HuggingFace model config.""" try: from transformers import AutoConfig config = AutoConfig.from_pretrained(str(model_folder)) return { int(label_id): label_name for label_id, label_name in config.id2label.items() } except Exception as exc: raise RuntimeError( f"Failed to load label mapping from model config at {model_folder}: {exc}" ) def get_label_mapping(self) -> Dict[int, str]: """Get the label mapping for this model.""" return self._id_to_label @staticmethod def _as_float(value: Any) -> float: if isinstance(value, Real): return float(value) if isinstance(value, np.ndarray): if value.size != 1: raise TypeError( f"Expected scalar-like ndarray with size 1, got shape={value.shape}" ) return float(value.reshape(-1)[0]) import torch if isinstance(value, torch.Tensor): if value.numel() != 1: raise TypeError( f"Expected scalar-like tensor with one element, got shape={tuple(value.shape)}" ) return float(value.item()) raise TypeError(f"Unsupported score value type: {type(value)!r}") @staticmethod def _as_int(value: Any) -> int: if isinstance(value, Integral): return int(value) if isinstance(value, np.ndarray): if value.size != 1: raise TypeError( f"Expected scalar-like ndarray with size 1, got shape={value.shape}" ) return int(value.reshape(-1)[0]) import torch if isinstance(value, torch.Tensor): if value.numel() != 1: raise TypeError( f"Expected scalar-like tensor with one element, got shape={tuple(value.shape)}" ) return int(value.item()) raise TypeError(f"Unsupported label value type: {type(value)!r}")
{ "repo_id": "docling-project/docling", "file_path": "docling/models/inference_engines/common/hf_vision_base.py", "license": "MIT License", "lines": 126, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
docling-project/docling:docling/models/inference_engines/common/kserve_v2_http.py
"""Utilities for calling KServe v2 REST inference endpoints. Note: This is a minimal synchronous implementation. The official KServe Python SDK (https://github.com/kserve/kserve) provides an async InferenceRESTClient with similar functionality, but is currently in alpha and requires async/await support. """ from __future__ import annotations from dataclasses import dataclass from typing import Any, Dict, List, Mapping, Optional import numpy as np import requests from pydantic import BaseModel # KServe v2 protocol uses the same data type names as Triton Inference Server KSERVE_V2_NUMPY_DATATYPES: Dict[str, np.dtype[Any]] = { "BOOL": np.dtype(np.bool_), "UINT8": np.dtype(np.uint8), "UINT16": np.dtype(np.uint16), "UINT32": np.dtype(np.uint32), "UINT64": np.dtype(np.uint64), "INT8": np.dtype(np.int8), "INT16": np.dtype(np.int16), "INT32": np.dtype(np.int32), "INT64": np.dtype(np.int64), "FP16": np.dtype(np.float16), "FP32": np.dtype(np.float32), "FP64": np.dtype(np.float64), } NUMPY_KSERVE_V2_DATATYPES: Dict[np.dtype[Any], str] = { dtype: name for name, dtype in KSERVE_V2_NUMPY_DATATYPES.items() } def _encode_input_tensor(name: str, tensor: np.ndarray) -> Dict[str, Any]: kserve_dtype = NUMPY_KSERVE_V2_DATATYPES.get(tensor.dtype) if kserve_dtype is None: raise ValueError( f"Unsupported numpy dtype for KServe v2 input: {tensor.dtype!s}. " f"Supported types: {list(NUMPY_KSERVE_V2_DATATYPES.keys())}" ) return { "name": name, "shape": list(tensor.shape), "datatype": kserve_dtype, "data": tensor.reshape(-1).tolist(), } class KserveV2OutputTensor(BaseModel): """Single output tensor in KServe v2 response payload.""" name: str datatype: str shape: List[int] data: Optional[List[Any]] = None class KserveV2InferResponse(BaseModel): """KServe v2 infer response payload.""" outputs: List[KserveV2OutputTensor] class KserveV2ModelTensorSpec(BaseModel): """Tensor metadata entry returned by KServe v2 model metadata endpoint.""" name: str datatype: str shape: List[int | str] class KserveV2ModelMetadataResponse(BaseModel): """KServe v2 model metadata response payload.""" name: str versions: Optional[List[str]] = None platform: Optional[str] = None inputs: List[KserveV2ModelTensorSpec] outputs: List[KserveV2ModelTensorSpec] def _decode_output_tensor(raw_output: KserveV2OutputTensor) -> np.ndarray: np_dtype = KSERVE_V2_NUMPY_DATATYPES.get(raw_output.datatype) if np_dtype is None: raise RuntimeError( f"Unsupported KServe v2 output datatype: {raw_output.datatype}. " f"Supported types: {list(KSERVE_V2_NUMPY_DATATYPES.keys())}" ) if raw_output.data is None: raise RuntimeError( "KServe v2 binary output mode is not supported. " "Configure server/client for JSON outputs with inline data." ) shape = tuple(int(dim) for dim in raw_output.shape) array = np.asarray(raw_output.data, dtype=np_dtype) return array.reshape(shape) @dataclass(frozen=True) class KserveV2HttpClient: """Minimal client for KServe v2 JSON infer requests.""" base_url: str model_name: str model_version: Optional[str] timeout: float headers: Mapping[str, str] def _execute_http_request( self, url: str, method: str = "GET", **kwargs: Any, ) -> requests.Response: """Execute HTTP request with consistent error handling. Args: url: Target URL method: HTTP method (GET or POST) **kwargs: Additional arguments passed to requests Returns: HTTP response object Raises: requests.exceptions.Timeout: If request exceeds timeout requests.exceptions.ConnectionError: If cannot connect to server requests.exceptions.HTTPError: If server returns error status """ try: if method == "GET": response = requests.get( url, headers=dict(self.headers), timeout=self.timeout, **kwargs ) else: # POST response = requests.post( url, headers=dict(self.headers), timeout=self.timeout, **kwargs ) response.raise_for_status() return response except requests.exceptions.Timeout as exc: raise requests.exceptions.Timeout( f"Timeout during {method} request to {url}" ) from exc except requests.exceptions.ConnectionError as exc: raise requests.exceptions.ConnectionError( f"Failed to connect to {url}" ) from exc except requests.exceptions.HTTPError as exc: raise requests.exceptions.HTTPError( f"HTTP error {response.status_code} from {url}: {response.text}" ) from exc @property def model_metadata_url(self) -> str: """Get the model metadata endpoint URL. Expects base_url to be exactly the base URL without any path components (e.g., 'http://localhost:8000', not 'http://localhost:8000/v2'). Returns: Model metadata URL following KServe v2 protocol: - {base_url}/v2/models/{model_name}[/versions/{version}] """ base = self.base_url.rstrip("/") if self.model_version: return f"{base}/v2/models/{self.model_name}/versions/{self.model_version}" return f"{base}/v2/models/{self.model_name}" @property def infer_url(self) -> str: """Get the inference endpoint URL. Returns: Inference URL following KServe v2 protocol: - {base_url}/v2/models/{model_name}[/versions/{version}]/infer """ return f"{self.model_metadata_url}/infer" def get_model_metadata(self) -> KserveV2ModelMetadataResponse: """Fetch model metadata from KServe v2 endpoint. Returns: Validated model metadata including inputs/outputs schema Raises: requests.exceptions.Timeout: If request exceeds timeout requests.exceptions.ConnectionError: If cannot connect to server requests.exceptions.HTTPError: If server returns error status RuntimeError: If response format is invalid """ response = self._execute_http_request(self.model_metadata_url, method="GET") try: return KserveV2ModelMetadataResponse.model_validate(response.json()) except Exception as exc: raise RuntimeError( f"Invalid metadata response from {self.model_metadata_url}: {exc}" ) from exc def infer( self, *, inputs: Mapping[str, np.ndarray], output_names: list[str], request_parameters: Optional[Mapping[str, Any]] = None, ) -> Dict[str, np.ndarray]: """Execute inference request against KServe v2 endpoint. Args: inputs: Mapping of input tensor names to numpy arrays output_names: List of expected output tensor names request_parameters: Optional KServe v2 request-level parameters Returns: Mapping of output tensor names to numpy arrays Raises: requests.exceptions.Timeout: If request exceeds timeout requests.exceptions.ConnectionError: If cannot connect to server requests.exceptions.HTTPError: If server returns error status RuntimeError: If response format is invalid """ payload: Dict[str, Any] = { "inputs": [ _encode_input_tensor(name=input_name, tensor=tensor) for input_name, tensor in inputs.items() ] } if output_names: payload["outputs"] = [{"name": output_name} for output_name in output_names] if request_parameters: payload["parameters"] = dict(request_parameters) response = self._execute_http_request( self.infer_url, method="POST", json=payload ) try: body = KserveV2InferResponse.model_validate(response.json()) except Exception as exc: raise RuntimeError( f"Invalid inference response from {self.infer_url}: {exc}" ) from exc decoded_outputs: Dict[str, np.ndarray] = {} for output in body.outputs: decoded_outputs[output.name] = _decode_output_tensor(output) return decoded_outputs
{ "repo_id": "docling-project/docling", "file_path": "docling/models/inference_engines/common/kserve_v2_http.py", "license": "MIT License", "lines": 208, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
docling-project/docling:docling/models/inference_engines/image_classification/api_kserve_v2_engine.py
"""KServe v2 remote implementation for image-classification models.""" from __future__ import annotations import logging from pathlib import Path from typing import TYPE_CHECKING, List, Optional, Union import numpy as np from docling.datamodel.accelerator_options import AcceleratorOptions from docling.datamodel.image_classification_engine_options import ( ApiKserveV2ImageClassificationEngineOptions, ) from docling.exceptions import OperationNotAllowed from docling.models.inference_engines.common import KserveV2HttpClient from docling.models.inference_engines.image_classification.base import ( ImageClassificationEngineInput, ImageClassificationEngineOutput, ) from docling.models.inference_engines.image_classification.hf_base import ( HfImageClassificationEngineBase, ) if TYPE_CHECKING: from docling.datamodel.stage_model_specs import EngineModelConfig _log = logging.getLogger(__name__) class ApiKserveV2ImageClassificationEngine(HfImageClassificationEngineBase): """Remote image-classification engine backed by KServe v2-compatible serving.""" def __init__( self, *, enable_remote_services: bool, options: ApiKserveV2ImageClassificationEngineOptions, model_config: Optional[EngineModelConfig] = None, accelerator_options: AcceleratorOptions, artifacts_path: Optional[Union[Path, str]] = None, ): super().__init__( options=options, model_config=model_config, accelerator_options=accelerator_options, artifacts_path=artifacts_path, ) self.options: ApiKserveV2ImageClassificationEngineOptions = options self._kserve_client: Optional[KserveV2HttpClient] = None self._input_name: Optional[str] = None self._output_name: Optional[str] = None if not enable_remote_services: raise OperationNotAllowed( "Connections to remote services are only allowed when set explicitly. " "pipeline_options.enable_remote_services=True." ) def _resolve_model_name(self) -> str: if self.options.model_name: return self.options.model_name return self._repo_id.replace("/", "--") def _resolve_model_version(self) -> Optional[str]: return self.options.model_version def _resolve_tensor_names(self) -> tuple[str, str]: if self._kserve_client is None: raise RuntimeError("KServe v2 client is not initialized.") metadata = self._kserve_client.get_model_metadata() if not metadata.inputs: raise RuntimeError( f"Expected image-classification model metadata to expose at least 1 input, " f"got {len(metadata.inputs)} inputs." ) if not metadata.outputs: raise RuntimeError( f"Expected image-classification model metadata to expose at least 1 output, " f"got {len(metadata.outputs)} outputs." ) input_name = metadata.inputs[0].name output_name = metadata.outputs[0].name return input_name, output_name def initialize(self) -> None: """Initialize preprocessor/labels and prepare remote client.""" _log.info("Initializing KServe v2 image-classification engine") revision = self._model_config.revision or "main" model_folder = self._resolve_model_folder( repo_id=self._repo_id, revision=revision ) self._processor = self._load_preprocessor(model_folder) self._id_to_label = self._load_label_mapping(model_folder) self._kserve_client = KserveV2HttpClient( base_url=str(self.options.url), model_name=self._resolve_model_name(), model_version=self._resolve_model_version(), timeout=self.options.timeout, headers=self.options.headers, ) self._input_name, self._output_name = self._resolve_tensor_names() self._initialized = True _log.info( "KServe v2 image-classification engine ready (input=%s, output=%s)", self._input_name, self._output_name, ) def predict_batch( self, input_batch: List[ImageClassificationEngineInput] ) -> List[ImageClassificationEngineOutput]: """Run inference on a batch of images against a KServe v2 endpoint.""" if not input_batch: return [] if not self._initialized: raise RuntimeError("Engine not initialized. Call initialize() first.") # Type narrowing: _initialized guarantees these are non-None assert self._processor is not None assert self._kserve_client is not None assert self._input_name is not None assert self._output_name is not None images = [item.image.convert("RGB") for item in input_batch] processed_inputs = self._processor(images=images, return_tensors="np") pixel_values = np.asarray(processed_inputs["pixel_values"], dtype=np.float32) outputs = self._kserve_client.infer( inputs={self._input_name: pixel_values}, output_names=[self._output_name], request_parameters=self.options.request_parameters, ) try: logits_batch = outputs[self._output_name] except KeyError as exc: raise RuntimeError( f"Missing expected KServe v2 output: {self._output_name}" ) from exc logits_batch = np.asarray(logits_batch, dtype=np.float32) if logits_batch.ndim != 2: raise RuntimeError( "Expected logits output shape [batch_size, num_classes], " f"got shape={logits_batch.shape}" ) probs_batch = self._softmax(logits_batch) return self._build_batch_outputs_from_probabilities( input_batch=input_batch, probs_batch=probs_batch, )
{ "repo_id": "docling-project/docling", "file_path": "docling/models/inference_engines/image_classification/api_kserve_v2_engine.py", "license": "MIT License", "lines": 132, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
docling-project/docling:docling/models/inference_engines/image_classification/base.py
"""Base classes for image-classification inference engines.""" from __future__ import annotations import logging from abc import ABC, abstractmethod from enum import Enum from typing import ( TYPE_CHECKING, Any, ClassVar, Dict, List, Literal, Optional, Type, get_args, get_origin, ) from PIL.Image import Image from pydantic import BaseModel, ConfigDict, Field, field_validator from pydantic_core import PydanticUndefined if TYPE_CHECKING: from docling.datamodel.stage_model_specs import EngineModelConfig _log = logging.getLogger(__name__) class ImageClassificationEngineType(str, Enum): """Supported inference engine types for image-classification models.""" ONNXRUNTIME = "onnxruntime" TRANSFORMERS = "transformers" API_KSERVE_V2 = "api_kserve_v2" @classmethod def is_remote_variant(cls, engine_type: ImageClassificationEngineType) -> bool: """Check if an engine type is a remote API variant.""" return engine_type in {cls.API_KSERVE_V2} class BaseImageClassificationEngineOptions(BaseModel): """Base configuration shared across image-classification engines.""" model_config = ConfigDict(arbitrary_types_allowed=True) engine_type: ImageClassificationEngineType = Field( description="Type of inference engine to use", ) top_k: Optional[int] = Field( default=None, ge=1, description="Maximum number of classes to return. If None, all classes are returned.", ) _registry: ClassVar[ Dict[ImageClassificationEngineType, Type[BaseImageClassificationEngineOptions]] ] = {} @classmethod def __pydantic_init_subclass__(cls, **kwargs): super().__pydantic_init_subclass__(**kwargs) if cls is BaseImageClassificationEngineOptions: return field = cls.model_fields.get("engine_type") if not field: return engine_type = None ann = field.annotation if get_origin(ann) is Literal: values = get_args(ann) if len(values) == 1: engine_type = values[0] if engine_type is None and field.default is not PydanticUndefined: engine_type = field.default if engine_type is not None: BaseImageClassificationEngineOptions._registry[engine_type] = cls class ImageClassificationEngineOptionsMixin(BaseModel): engine_options: BaseImageClassificationEngineOptions = Field( description="Runtime configuration for the image-classification engine.", ) @field_validator("engine_options", mode="before") @classmethod def resolve_engine_options(cls, value): if isinstance(value, BaseImageClassificationEngineOptions): return value if isinstance(value, dict): engine_type = value.get("engine_type") model_cls = BaseImageClassificationEngineOptions._registry.get(engine_type) if model_cls: return model_cls.model_validate(value) return value class ImageClassificationEngineInput(BaseModel): """Generic input accepted by every image-classification engine.""" model_config = ConfigDict(arbitrary_types_allowed=True) image: Image = Field(description="PIL image to run inference on") metadata: Dict[str, Any] = Field( default_factory=dict, description="Optional metadata that is echoed back in the output", ) class ImageClassificationEngineOutput(BaseModel): """Output returned by image-classification engines.""" label_ids: List[int] = Field( default_factory=list, description="Predicted class indices sorted by confidence descending", ) scores: List[float] = Field( default_factory=list, description="Confidence scores sorted descending", ) metadata: Dict[str, Any] = Field( default_factory=dict, description="Optional metadata echoed back from the input or engine", ) class BaseImageClassificationEngine(ABC): """Abstract base-class for image-classification engines.""" def __init__( self, options: BaseImageClassificationEngineOptions, model_config: Optional[EngineModelConfig] = None, ) -> None: """Initialize the engine. Args: options: Engine-specific configuration options model_config: Model configuration (repo_id, revision, extra_config) """ self.options = options self.model_config = model_config self._initialized = False @abstractmethod def initialize(self) -> None: """Initialize engine resources (load models, allocate buffers, etc.).""" @abstractmethod def predict_batch( self, input_batch: List[ImageClassificationEngineInput] ) -> List[ImageClassificationEngineOutput]: """Run inference on a batch of inputs.""" @abstractmethod def get_label_mapping(self) -> Dict[int, str]: """Get the label mapping for this model. Returns: Dictionary mapping label IDs to label names """ def predict( self, input_data: ImageClassificationEngineInput ) -> ImageClassificationEngineOutput: """Helper to run inference on a single input.""" if not self._initialized: _log.debug("Initializing %s for single prediction", type(self).__name__) self.initialize() results = self.predict_batch([input_data]) return results[0] def __call__( self, input_data: ImageClassificationEngineInput | List[ImageClassificationEngineInput], ) -> ImageClassificationEngineOutput | List[ImageClassificationEngineOutput]: if not self._initialized: _log.debug("Initializing %s for call", type(self).__name__) self.initialize() if isinstance(input_data, list): return self.predict_batch(input_data) return self.predict(input_data)
{ "repo_id": "docling-project/docling", "file_path": "docling/models/inference_engines/image_classification/base.py", "license": "MIT License", "lines": 150, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
docling-project/docling:docling/models/inference_engines/image_classification/factory.py
"""Factory for creating image-classification engines.""" from __future__ import annotations from pathlib import Path from typing import TYPE_CHECKING, Optional, Union from docling.datamodel.accelerator_options import AcceleratorOptions from docling.models.inference_engines.image_classification.base import ( BaseImageClassificationEngine, BaseImageClassificationEngineOptions, ImageClassificationEngineType, ) if TYPE_CHECKING: from docling.datamodel.stage_model_specs import ( EngineModelConfig, ImageClassificationModelSpec, ) def create_image_classification_engine( *, options: BaseImageClassificationEngineOptions, model_spec: Optional[ImageClassificationModelSpec] = None, enable_remote_services: bool = False, accelerator_options: AcceleratorOptions, artifacts_path: Optional[Union[Path, str]] = None, ) -> BaseImageClassificationEngine: """Factory to create image-classification engines.""" model_config: Optional[EngineModelConfig] = None if model_spec is not None: model_config = model_spec.get_engine_config(options.engine_type) if options.engine_type == ImageClassificationEngineType.ONNXRUNTIME: from docling.datamodel.image_classification_engine_options import ( OnnxRuntimeImageClassificationEngineOptions, ) from docling.models.inference_engines.image_classification.onnxruntime_engine import ( OnnxRuntimeImageClassificationEngine, ) if not isinstance(options, OnnxRuntimeImageClassificationEngineOptions): raise ValueError( "Expected OnnxRuntimeImageClassificationEngineOptions, " f"got {type(options)}" ) return OnnxRuntimeImageClassificationEngine( options=options, model_config=model_config, artifacts_path=artifacts_path, accelerator_options=accelerator_options, ) if options.engine_type == ImageClassificationEngineType.TRANSFORMERS: from docling.datamodel.image_classification_engine_options import ( TransformersImageClassificationEngineOptions, ) from docling.models.inference_engines.image_classification.transformers_engine import ( TransformersImageClassificationEngine, ) if not isinstance(options, TransformersImageClassificationEngineOptions): raise ValueError( "Expected TransformersImageClassificationEngineOptions, " f"got {type(options)}" ) return TransformersImageClassificationEngine( options=options, model_config=model_config, artifacts_path=artifacts_path, accelerator_options=accelerator_options, ) if options.engine_type == ImageClassificationEngineType.API_KSERVE_V2: from docling.datamodel.image_classification_engine_options import ( ApiKserveV2ImageClassificationEngineOptions, ) from docling.models.inference_engines.image_classification.api_kserve_v2_engine import ( ApiKserveV2ImageClassificationEngine, ) if not isinstance(options, ApiKserveV2ImageClassificationEngineOptions): raise ValueError( "Expected ApiKserveV2ImageClassificationEngineOptions, " f"got {type(options)}" ) return ApiKserveV2ImageClassificationEngine( enable_remote_services=enable_remote_services, options=options, model_config=model_config, artifacts_path=artifacts_path, accelerator_options=accelerator_options, ) raise ValueError(f"Unknown engine type: {options.engine_type}")
{ "repo_id": "docling-project/docling", "file_path": "docling/models/inference_engines/image_classification/factory.py", "license": "MIT License", "lines": 83, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
docling-project/docling:docling/models/inference_engines/image_classification/hf_base.py
"""Shared HuggingFace-based helpers for image-classification engines.""" from __future__ import annotations from pathlib import Path from typing import TYPE_CHECKING, Any, Iterable, Optional, Union import numpy as np from docling.datamodel.accelerator_options import AcceleratorOptions from docling.models.inference_engines.common import HfVisionModelMixin from docling.models.inference_engines.image_classification.base import ( BaseImageClassificationEngine, BaseImageClassificationEngineOptions, ImageClassificationEngineInput, ImageClassificationEngineOutput, ) if TYPE_CHECKING: from docling.datamodel.stage_model_specs import EngineModelConfig class HfImageClassificationEngineBase( HfVisionModelMixin, BaseImageClassificationEngine ): """Base class for image-classification engines that load HF artifacts/configs.""" def __init__( self, *, options: BaseImageClassificationEngineOptions, model_config: Optional[EngineModelConfig] = None, accelerator_options: AcceleratorOptions, artifacts_path: Optional[Union[Path, str]] = None, ) -> None: super().__init__(options=options, model_config=model_config) self.options: BaseImageClassificationEngineOptions = options self._init_hf_vision_model( model_config=model_config, accelerator_options=accelerator_options, artifacts_path=artifacts_path, model_family_name="image-classification", ) def _build_output( self, *, input_item: ImageClassificationEngineInput, labels: Iterable[Any], scores: Iterable[Any], ) -> ImageClassificationEngineOutput: """Build standard engine output from class-score iterables. Note: Assumes labels and scores are already sorted by descending score. """ predictions: list[tuple[int, float]] = [] for label, score in zip(labels, scores): predictions.append((self._as_int(label), self._as_float(score))) if self.options.top_k is not None: predictions = predictions[: self.options.top_k] return ImageClassificationEngineOutput( label_ids=[label for label, _ in predictions], scores=[score for _, score in predictions], metadata=input_item.metadata.copy(), ) def _build_batch_outputs_from_probabilities( self, *, input_batch: list[ImageClassificationEngineInput], probs_batch: np.ndarray, ) -> list[ImageClassificationEngineOutput]: """Build outputs from a probability matrix of shape [batch, num_classes].""" batch_outputs: list[ImageClassificationEngineOutput] = [] for idx, input_item in enumerate(input_batch): probs = probs_batch[idx] if self.options.top_k is not None: k = min(self.options.top_k, len(probs)) top_k_indices = np.argpartition(-probs, k - 1)[:k] sorted_indices = top_k_indices[np.argsort(-probs[top_k_indices])] labels = sorted_indices scores = probs[labels] else: labels = np.argsort(-probs) scores = probs[labels] batch_outputs.append( self._build_output( input_item=input_item, labels=labels, scores=scores, ) ) return batch_outputs @staticmethod def _softmax(logits: np.ndarray) -> np.ndarray: """Compute softmax probabilities over class dimension.""" shifted = logits - np.max(logits, axis=1, keepdims=True) exp_values = np.exp(shifted) return exp_values / np.sum(exp_values, axis=1, keepdims=True)
{ "repo_id": "docling-project/docling", "file_path": "docling/models/inference_engines/image_classification/hf_base.py", "license": "MIT License", "lines": 88, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
docling-project/docling:docling/models/inference_engines/image_classification/onnxruntime_engine.py
"""ONNX Runtime implementation for image-classification models.""" from __future__ import annotations import logging from pathlib import Path from typing import TYPE_CHECKING, List, Optional, Union import numpy as np if TYPE_CHECKING: import onnxruntime as ort from docling.datamodel.accelerator_options import AcceleratorDevice, AcceleratorOptions from docling.datamodel.image_classification_engine_options import ( OnnxRuntimeImageClassificationEngineOptions, ) from docling.models.inference_engines.image_classification.base import ( ImageClassificationEngineInput, ImageClassificationEngineOutput, ) from docling.models.inference_engines.image_classification.hf_base import ( HfImageClassificationEngineBase, ) from docling.utils.accelerator_utils import decide_device if TYPE_CHECKING: from docling.datamodel.stage_model_specs import EngineModelConfig _log = logging.getLogger(__name__) class OnnxRuntimeImageClassificationEngine(HfImageClassificationEngineBase): """ONNX Runtime engine for image-classification models.""" def __init__( self, *, options: OnnxRuntimeImageClassificationEngineOptions, model_config: Optional[EngineModelConfig] = None, accelerator_options: AcceleratorOptions, artifacts_path: Optional[Union[Path, str]] = None, ): """Initialize the ONNX Runtime image-classification engine.""" super().__init__( options=options, model_config=model_config, accelerator_options=accelerator_options, artifacts_path=artifacts_path, ) self.options: OnnxRuntimeImageClassificationEngineOptions = options self._session: Optional[ort.InferenceSession] = None self._model_path: Optional[Path] = None self._input_name: Optional[str] = None self._output_name: Optional[str] = None def _resolve_model_artifacts(self) -> tuple[Path, Path]: """Resolve model artifacts from artifacts_path or HF download.""" repo_id = self._repo_id revision = self._model_config.revision or "main" model_filename = self._resolve_model_filename() model_folder = self._resolve_model_folder( repo_id=repo_id, revision=str(revision), ) model_path = model_folder / model_filename if not model_path.exists(): raise FileNotFoundError( f"ONNX model file '{model_filename}' not found: {model_path}" ) return model_folder, model_path def _resolve_model_filename(self) -> str: """Determine which ONNX filename to load.""" filename = self.options.model_filename extra_filename = self._model_config.extra_config.get("model_filename") if extra_filename and isinstance(extra_filename, str): filename = extra_filename return filename def _resolve_input_name(self, session: ort.InferenceSession) -> str: """Resolve ONNX input name from the loaded model graph.""" input_nodes = session.get_inputs() if not input_nodes: raise RuntimeError("ONNX model exposes no inputs") return input_nodes[0].name def _resolve_output_name(self, session: ort.InferenceSession) -> str: """Resolve ONNX output name from the loaded model graph.""" output_nodes = session.get_outputs() if not output_nodes: raise RuntimeError("ONNX model exposes no outputs") return output_nodes[0].name def initialize(self) -> None: """Initialize ONNX session and preprocessor.""" import onnxruntime as ort _log.info("Initializing ONNX Runtime image-classification engine") model_folder, self._model_path = self._resolve_model_artifacts() _log.debug("Using ONNX model at %s", self._model_path) self._processor = self._load_preprocessor(model_folder) self._id_to_label = self._load_label_mapping(model_folder) sess_options = ort.SessionOptions() sess_options.intra_op_num_threads = self._accelerator_options.num_threads providers = self._resolve_providers() self._session = ort.InferenceSession( str(self._model_path), sess_options=sess_options, providers=providers, ) self._input_name = self._resolve_input_name(self._session) self._output_name = self._resolve_output_name(self._session) _log.debug("Using ONNX input name: %s", self._input_name) _log.debug("Using ONNX output name: %s", self._output_name) self._initialized = True _log.info( "ONNX Runtime image-classification engine ready (providers=%s)", self._session.get_providers(), ) def _resolve_providers(self) -> List[str]: """Resolve ONNX Runtime providers from accelerator and engine options.""" configured_providers = self.options.providers or ["CPUExecutionProvider"] if configured_providers != ["CPUExecutionProvider"]: return configured_providers device = decide_device( self._accelerator_options.device, supported_devices=[AcceleratorDevice.CPU, AcceleratorDevice.CUDA], ) if device.startswith("cuda"): return ["CUDAExecutionProvider", "CPUExecutionProvider"] if device != AcceleratorDevice.CPU.value: _log.warning( "Unsupported ONNX device '%s' for image classification. Falling back to CPU provider.", device, ) return ["CPUExecutionProvider"] def predict_batch( self, input_batch: List[ImageClassificationEngineInput] ) -> List[ImageClassificationEngineOutput]: """Run inference on a batch of inputs.""" if not input_batch: return [] if ( self._session is None or self._processor is None or self._input_name is None or self._output_name is None ): raise RuntimeError("Engine not initialized. Call initialize() first.") images = [item.image.convert("RGB") for item in input_batch] inputs = self._processor(images=images, return_tensors="np") input_tensor = np.asarray(inputs["pixel_values"], dtype=np.float32) output_tensors = self._session.run( [self._output_name], { self._input_name: input_tensor, }, ) if len(output_tensors) < 1: raise RuntimeError( "Expected ONNX model to return at least 1 output containing logits" ) logits_batch = np.asarray(output_tensors[0], dtype=np.float32) if logits_batch.ndim != 2: raise RuntimeError( "Expected ONNX logits output shape [batch_size, num_classes], " f"got shape={logits_batch.shape}" ) probs_batch = self._softmax(logits_batch) return self._build_batch_outputs_from_probabilities( input_batch=input_batch, probs_batch=probs_batch, )
{ "repo_id": "docling-project/docling", "file_path": "docling/models/inference_engines/image_classification/onnxruntime_engine.py", "license": "MIT License", "lines": 158, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
docling-project/docling:docling/models/inference_engines/image_classification/transformers_engine.py
"""Transformers implementation for image-classification models.""" from __future__ import annotations import logging import sys from pathlib import Path from typing import TYPE_CHECKING, List, Optional, Union from packaging import version if TYPE_CHECKING: import torch from transformers import AutoModelForImageClassification from docling.datamodel.accelerator_options import AcceleratorDevice, AcceleratorOptions from docling.datamodel.image_classification_engine_options import ( TransformersImageClassificationEngineOptions, ) from docling.models.inference_engines.image_classification.base import ( ImageClassificationEngineInput, ImageClassificationEngineOutput, ) from docling.models.inference_engines.image_classification.hf_base import ( HfImageClassificationEngineBase, ) from docling.utils.accelerator_utils import decide_device if TYPE_CHECKING: from docling.datamodel.stage_model_specs import EngineModelConfig _log = logging.getLogger(__name__) class TransformersImageClassificationEngine(HfImageClassificationEngineBase): """Transformers engine for image-classification models.""" def __init__( self, *, options: TransformersImageClassificationEngineOptions, model_config: Optional[EngineModelConfig] = None, accelerator_options: AcceleratorOptions, artifacts_path: Optional[Union[Path, str]] = None, ): """Initialize the Transformers image-classification engine.""" super().__init__( options=options, model_config=model_config, accelerator_options=accelerator_options, artifacts_path=artifacts_path, ) self.options: TransformersImageClassificationEngineOptions = options self._model: Optional[AutoModelForImageClassification] = None self._device: Optional[torch.device] = None def _resolve_device(self) -> torch.device: """Resolve PyTorch device from accelerator options.""" import torch device_str = decide_device( self._accelerator_options.device, supported_devices=[ AcceleratorDevice.CPU, AcceleratorDevice.CUDA, AcceleratorDevice.MPS, ], ) if device_str.startswith("cuda"): return torch.device(device_str) if device_str == AcceleratorDevice.MPS.value: return torch.device("mps") return torch.device("cpu") def _resolve_torch_dtype(self) -> Optional[torch.dtype]: """Resolve PyTorch dtype from options or model config.""" import torch dtype_str = self.options.torch_dtype or self._model_config.torch_dtype if dtype_str is None: return None dtype_map = { "float32": torch.float32, "float16": torch.float16, "bfloat16": torch.bfloat16, } dtype = dtype_map.get(dtype_str) if dtype is None: _log.warning( "Unknown torch_dtype '%s', using auto dtype detection", dtype_str, ) return dtype def initialize(self) -> None: """Initialize PyTorch model and preprocessor.""" import torch from transformers import AutoModelForImageClassification _log.info("Initializing Transformers image-classification engine") revision = self._model_config.revision or "main" model_folder = self._resolve_model_folder( repo_id=self._repo_id, revision=revision, ) _log.debug("Using model at %s", model_folder) self._device = self._resolve_device() torch_dtype = self._resolve_torch_dtype() if self._device.type == "cpu": torch.set_num_threads(self._accelerator_options.num_threads) self._processor = self._load_preprocessor(model_folder) self._id_to_label = self._load_label_mapping(model_folder) _log.debug("Loading model from %s to device %s", model_folder, self._device) try: self._model = AutoModelForImageClassification.from_pretrained( str(model_folder), torch_dtype=torch_dtype, ) self._model.to(self._device) # type: ignore[union-attr] self._model.eval() # type: ignore[union-attr] # Optionally compile model for better performance (model must be in eval mode first) # Works for Python < 3.14 with any torch 2.x # Works for Python >= 3.14 with torch >= 2.10 if self.options.compile_model: if sys.version_info < (3, 14): self._model = torch.compile(self._model) # type: ignore[arg-type,assignment] _log.debug("Model compiled with torch.compile()") elif version.parse(torch.__version__) >= version.parse("2.10"): self._model = torch.compile(self._model) # type: ignore[arg-type,assignment] _log.debug("Model compiled with torch.compile()") else: _log.warning( "Model compilation requested but not available " "(requires Python < 3.14 or torch >= 2.10 for Python 3.14+)" ) except Exception as exc: raise RuntimeError(f"Failed to load model from {model_folder}: {exc}") self._initialized = True _log.info( "Transformers image-classification engine ready (device=%s, dtype=%s)", self._device, self._model.dtype, # type: ignore[union-attr] ) def predict_batch( self, input_batch: List[ImageClassificationEngineInput] ) -> List[ImageClassificationEngineOutput]: """Run inference on a batch of inputs.""" import torch if not input_batch: return [] if self._model is None or self._processor is None or self._device is None: raise RuntimeError("Engine not initialized. Call initialize() first.") images = [item.image.convert("RGB") for item in input_batch] inputs = self._processor(images=images, return_tensors="pt").to(self._device) with torch.inference_mode(): outputs = self._model(**inputs) # type: ignore[operator] probs_batch = torch.softmax(outputs.logits, dim=-1) batch_outputs: List[ImageClassificationEngineOutput] = [] for input_item, probs_vector in zip(input_batch, probs_batch): # Use topk for efficiency when top_k is specified if self.options.top_k is not None: k = min(self.options.top_k, len(probs_vector)) scores, labels = torch.topk(probs_vector, k=k) else: scores, labels = torch.sort(probs_vector, descending=True) batch_outputs.append( self._build_output( input_item=input_item, labels=labels, scores=scores, ) ) return batch_outputs
{ "repo_id": "docling-project/docling", "file_path": "docling/models/inference_engines/image_classification/transformers_engine.py", "license": "MIT License", "lines": 157, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
docling-project/docling:docling/models/inference_engines/object_detection/api_kserve_v2_engine.py
"""KServe v2 remote implementation for object-detection models.""" from __future__ import annotations import logging from pathlib import Path from typing import TYPE_CHECKING, List, Optional, Union import numpy as np from docling.datamodel.accelerator_options import AcceleratorOptions from docling.datamodel.object_detection_engine_options import ( ApiKserveV2ObjectDetectionEngineOptions, ) from docling.exceptions import OperationNotAllowed from docling.models.inference_engines.common import KserveV2HttpClient from docling.models.inference_engines.object_detection.base import ( ObjectDetectionEngineInput, ObjectDetectionEngineOutput, ) from docling.models.inference_engines.object_detection.hf_base import ( HfObjectDetectionEngineBase, ) if TYPE_CHECKING: from docling.datamodel.stage_model_specs import EngineModelConfig _log = logging.getLogger(__name__) class ApiKserveV2ObjectDetectionEngine(HfObjectDetectionEngineBase): """Remote object-detection engine backed by KServe v2-compatible serving.""" def __init__( self, *, enable_remote_services: bool, options: ApiKserveV2ObjectDetectionEngineOptions, model_config: Optional[EngineModelConfig] = None, accelerator_options: AcceleratorOptions, artifacts_path: Optional[Union[Path, str]] = None, ): super().__init__( options=options, model_config=model_config, accelerator_options=accelerator_options, artifacts_path=artifacts_path, ) self.options: ApiKserveV2ObjectDetectionEngineOptions = options self._kserve_client: Optional[KserveV2HttpClient] = None self._input_images_name: Optional[str] = None self._input_orig_target_sizes_name: Optional[str] = None self._output_labels_name: Optional[str] = None self._output_boxes_name: Optional[str] = None self._output_scores_name: Optional[str] = None if not enable_remote_services: raise OperationNotAllowed( "Connections to remote services are only allowed when set explicitly. " "pipeline_options.enable_remote_services=True." ) def _resolve_model_name(self) -> str: if self.options.model_name: return self.options.model_name return self._repo_id.replace("/", "--") def _resolve_model_version(self) -> Optional[str]: return self.options.model_version def _resolve_tensor_names(self) -> tuple[str, str, str, str, str]: if self._kserve_client is None: raise RuntimeError("KServe v2 client is not initialized.") metadata = self._kserve_client.get_model_metadata() if len(metadata.inputs) < 2: raise RuntimeError( "Expected object-detection model metadata to expose at least 2 inputs " f"(images, orig_target_sizes), got {len(metadata.inputs)}." ) if len(metadata.outputs) < 3: raise RuntimeError( "Expected object-detection model metadata to expose at least 3 outputs " f"(labels, boxes, scores), got {len(metadata.outputs)}." ) input_images_name = metadata.inputs[0].name input_orig_target_sizes_name = metadata.inputs[1].name output_labels_name = metadata.outputs[0].name output_boxes_name = metadata.outputs[1].name output_scores_name = metadata.outputs[2].name return ( input_images_name, input_orig_target_sizes_name, output_labels_name, output_boxes_name, output_scores_name, ) def initialize(self) -> None: """Initialize preprocessor/labels and prepare remote client.""" _log.info("Initializing KServe v2 object-detection engine") revision = self._model_config.revision or "main" model_folder = self._resolve_model_folder( repo_id=self._repo_id, revision=revision ) self._processor = self._load_preprocessor(model_folder) self._id_to_label = self._load_label_mapping(model_folder) self._kserve_client = KserveV2HttpClient( base_url=str(self.options.url), model_name=self._resolve_model_name(), model_version=self._resolve_model_version(), timeout=self.options.timeout, headers=self.options.headers, ) ( self._input_images_name, self._input_orig_target_sizes_name, self._output_labels_name, self._output_boxes_name, self._output_scores_name, ) = self._resolve_tensor_names() self._initialized = True _log.info( "KServe v2 object-detection engine ready (inputs=[%s, %s], outputs=[%s, %s, %s])", self._input_images_name, self._input_orig_target_sizes_name, self._output_labels_name, self._output_boxes_name, self._output_scores_name, ) def predict_batch( self, input_batch: List[ObjectDetectionEngineInput] ) -> List[ObjectDetectionEngineOutput]: """Run inference on a batch of images against a KServe v2 endpoint.""" if not input_batch: return [] if not self._initialized: raise RuntimeError("Engine not initialized. Call initialize() first.") # Type narrowing: _initialized guarantees these are non-None assert self._processor is not None assert self._kserve_client is not None assert self._input_images_name is not None assert self._input_orig_target_sizes_name is not None assert self._output_labels_name is not None assert self._output_boxes_name is not None assert self._output_scores_name is not None images = [item.image.convert("RGB") for item in input_batch] processed_inputs = self._processor(images=images, return_tensors="np") pixel_values = np.asarray(processed_inputs["pixel_values"], dtype=np.float32) orig_sizes = np.asarray( [[image.width, image.height] for image in images], dtype=np.int64, ) outputs = self._kserve_client.infer( inputs={ self._input_images_name: pixel_values, self._input_orig_target_sizes_name: orig_sizes, }, output_names=[ self._output_labels_name, self._output_boxes_name, self._output_scores_name, ], request_parameters=self.options.request_parameters, ) try: labels_batch = outputs[self._output_labels_name] boxes_batch = outputs[self._output_boxes_name] scores_batch = outputs[self._output_scores_name] except KeyError as exc: raise RuntimeError( "Missing one or more expected KServe v2 outputs: " f"{self._output_labels_name}, " f"{self._output_boxes_name}, " f"{self._output_scores_name}" ) from exc if len(labels_batch) != len(input_batch): raise RuntimeError( "KServe v2 output batch size mismatch for labels: " f"expected {len(input_batch)}, got {len(labels_batch)}" ) batch_outputs: List[ObjectDetectionEngineOutput] = [] for idx, input_item in enumerate(input_batch): batch_outputs.append( self._build_output( input_item=input_item, labels=labels_batch[idx], scores=scores_batch[idx], boxes=boxes_batch[idx], apply_score_threshold=True, ) ) return batch_outputs
{ "repo_id": "docling-project/docling", "file_path": "docling/models/inference_engines/object_detection/api_kserve_v2_engine.py", "license": "MIT License", "lines": 178, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
docling-project/docling:tests/test_api_kserve_v2_engine_scaffolding.py
"""Tests for API KServe v2 remote engine scaffolding.""" from __future__ import annotations from typing import ClassVar import pytest from pydantic import BaseModel from docling.datamodel.accelerator_options import AcceleratorOptions from docling.datamodel.image_classification_engine_options import ( ApiKserveV2ImageClassificationEngineOptions, BaseImageClassificationEngineOptions, ) from docling.datamodel.object_detection_engine_options import ( ApiKserveV2ObjectDetectionEngineOptions, BaseObjectDetectionEngineOptions, ) from docling.datamodel.stage_model_specs import ( ImageClassificationModelSpec, ImageClassificationStagePreset, ImageClassificationStagePresetMixin, ObjectDetectionModelSpec, ObjectDetectionStagePreset, ObjectDetectionStagePresetMixin, ) from docling.exceptions import OperationNotAllowed from docling.models.inference_engines.common.kserve_v2_http import KserveV2HttpClient from docling.models.inference_engines.image_classification import ( ImageClassificationEngineType, create_image_classification_engine, ) from docling.models.inference_engines.object_detection import ( ObjectDetectionEngineType, create_object_detection_engine, ) class _DummyObjectDetectionStageOptions(ObjectDetectionStagePresetMixin, BaseModel): kind: ClassVar[str] = "dummy_object_detection_stage_options" model_spec: ObjectDetectionModelSpec engine_options: BaseObjectDetectionEngineOptions class _DummyImageClassificationStageOptions( ImageClassificationStagePresetMixin, BaseModel ): kind: ClassVar[str] = "dummy_image_classification_stage_options" model_spec: ImageClassificationModelSpec engine_options: BaseImageClassificationEngineOptions def test_object_detection_preset_supports_api_kserve_v2_engine_default() -> None: preset = ObjectDetectionStagePreset( preset_id="od_api_kserve_v2_test", name="OD API KServe v2 Test", description="OD API KServe v2 preset", model_spec=ObjectDetectionModelSpec(name="od", repo_id="org/od"), default_engine_type=ObjectDetectionEngineType.API_KSERVE_V2, ) _DummyObjectDetectionStageOptions.register_preset(preset) # API_KSERVE_V2 presets require explicit engine_options with URL engine_opts = ApiKserveV2ObjectDetectionEngineOptions(url="http://localhost:8000") options = _DummyObjectDetectionStageOptions.from_preset( "od_api_kserve_v2_test", engine_options=engine_opts ) assert isinstance(options.engine_options, ApiKserveV2ObjectDetectionEngineOptions) def test_image_classification_preset_supports_api_kserve_v2_engine_default() -> None: preset = ImageClassificationStagePreset( preset_id="ic_api_kserve_v2_test", name="IC API KServe v2 Test", description="IC API KServe v2 preset", model_spec=ImageClassificationModelSpec(name="ic", repo_id="org/ic"), default_engine_type=ImageClassificationEngineType.API_KSERVE_V2, ) _DummyImageClassificationStageOptions.register_preset(preset) # API_KSERVE_V2 presets require explicit engine_options with URL engine_opts = ApiKserveV2ImageClassificationEngineOptions( url="http://localhost:8000" ) options = _DummyImageClassificationStageOptions.from_preset( "ic_api_kserve_v2_test", engine_options=engine_opts ) assert isinstance( options.engine_options, ApiKserveV2ImageClassificationEngineOptions ) def test_object_detection_factory_requires_remote_enablement() -> None: options = ApiKserveV2ObjectDetectionEngineOptions( url="http://localhost:8000", model_name="od_model", ) spec = ObjectDetectionModelSpec(name="od", repo_id="org/od") with pytest.raises(OperationNotAllowed): create_object_detection_engine( options=options, model_spec=spec, enable_remote_services=False, accelerator_options=AcceleratorOptions(), ) engine = create_object_detection_engine( options=options, model_spec=spec, enable_remote_services=True, accelerator_options=AcceleratorOptions(), ) assert engine.options.engine_type == ObjectDetectionEngineType.API_KSERVE_V2 def test_image_classification_factory_requires_remote_enablement() -> None: options = ApiKserveV2ImageClassificationEngineOptions( url="http://localhost:8000", model_name="ic_model", ) spec = ImageClassificationModelSpec(name="ic", repo_id="org/ic") with pytest.raises(OperationNotAllowed): create_image_classification_engine( options=options, model_spec=spec, enable_remote_services=False, accelerator_options=AcceleratorOptions(), ) engine = create_image_classification_engine( options=options, model_spec=spec, enable_remote_services=True, accelerator_options=AcceleratorOptions(), ) assert engine.options.engine_type == ImageClassificationEngineType.API_KSERVE_V2 def test_kserve_v2_client_infer_url_with_version() -> None: client = KserveV2HttpClient( base_url="http://localhost:8000", model_name="layout_model", model_version="1", timeout=30.0, headers={}, ) assert ( client.infer_url == "http://localhost:8000/v2/models/layout_model/versions/1/infer" ) def test_kserve_v2_client_infer_url_without_version() -> None: client = KserveV2HttpClient( base_url="http://localhost:8000", model_name="layout_model", model_version=None, timeout=30.0, headers={}, ) assert client.infer_url == "http://localhost:8000/v2/models/layout_model/infer"
{ "repo_id": "docling-project/docling", "file_path": "tests/test_api_kserve_v2_engine_scaffolding.py", "license": "MIT License", "lines": 137, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
docling-project/docling:tests/test_backend_docling_parse_legacy.py
from pathlib import Path import pytest from docling.backend.docling_parse_v2_backend import DoclingParseV2DocumentBackend from docling.backend.docling_parse_v4_backend import DoclingParseV4DocumentBackend from docling.datamodel.base_models import InputFormat from docling.datamodel.document import InputDocument INPUT_FILE = "./tests/data/pdf/2206.01062.pdf" @pytest.mark.parametrize( "cls", [DoclingParseV2DocumentBackend, DoclingParseV4DocumentBackend], ) def test_emits_future_warning(cls): with pytest.warns(FutureWarning, match="DoclingParse"): InputDocument( path_or_stream=Path(INPUT_FILE), format=InputFormat.PDF, backend=cls )
{ "repo_id": "docling-project/docling", "file_path": "tests/test_backend_docling_parse_legacy.py", "license": "MIT License", "lines": 16, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
docling-project/docling:tests/test_failed_pages.py
"""Tests for failed page handling in StandardPdfPipeline. These tests verify that when some PDF pages fail to parse, they are still added to DoclingDocument.pages to maintain correct page numbering and ensure page break markers are generated correctly during export. Related: https://github.com/docling-project/docling-core/pull/466 """ from pathlib import Path import pytest from docling.datamodel.base_models import ConversionStatus, InputFormat from docling.datamodel.pipeline_options import PdfPipelineOptions from docling.document_converter import DocumentConverter, PdfFormatOption from docling.pipeline.standard_pdf_pipeline import StandardPdfPipeline @pytest.fixture def skipped_1page_path(): return Path("./tests/data/pdf/skipped_1page.pdf") @pytest.fixture def skipped_2pages_path(): return Path("./tests/data/pdf/skipped_2pages.pdf") @pytest.fixture def normal_4pages_path(): return Path("./tests/data/pdf/normal_4pages.pdf") def test_normal_pages_all_present(normal_4pages_path): """Test that all pages are present in DoclingDocument.pages for a normal PDF.""" converter = DocumentConverter( format_options={ InputFormat.PDF: PdfFormatOption( pipeline_cls=StandardPdfPipeline, pipeline_options=PdfPipelineOptions( do_ocr=False, do_table_structure=False, ), ) } ) result = converter.convert(normal_4pages_path, raises_on_error=False) # Document should succeed completely assert result.status == ConversionStatus.SUCCESS, ( f"Expected SUCCESS status, got: {result.status}" ) # Get expected page count from input expected_page_count = result.input.page_count # DoclingDocument.pages should contain all pages assert result.document is not None, "Document should not be None" actual_page_count = len(result.document.pages) assert actual_page_count == expected_page_count, ( f"DoclingDocument.pages should contain all {expected_page_count} pages, " f"but got {actual_page_count}" ) # Verify all page numbers are present expected_page_nos = set(range(1, expected_page_count + 1)) actual_page_nos = set(result.document.pages.keys()) assert actual_page_nos == expected_page_nos, ( f"Missing page numbers in DoclingDocument.pages. " f"Expected: {expected_page_nos}, Got: {actual_page_nos}" ) # No errors should be recorded assert len(result.errors) == 0, ( f"No errors should be recorded for normal PDF, but got: {result.errors}" ) def test_failed_pages_added_to_document_1page(skipped_1page_path): """Test that a single failed page is added to DoclingDocument.pages.""" converter = DocumentConverter( format_options={ InputFormat.PDF: PdfFormatOption( pipeline_cls=StandardPdfPipeline, pipeline_options=PdfPipelineOptions( do_ocr=False, do_table_structure=False, ), ) } ) result = converter.convert(skipped_1page_path, raises_on_error=False) # Document should have partial success due to failed page(s) assert result.status == ConversionStatus.PARTIAL_SUCCESS, ( f"Unexpected status: {result.status}" ) # Get expected page count from input expected_page_count = result.input.page_count # DoclingDocument.pages should contain all pages (including failed ones) assert result.document is not None, "Document should not be None" actual_page_count = len(result.document.pages) assert actual_page_count == expected_page_count, ( f"DoclingDocument.pages should contain all {expected_page_count} pages " f"(including failed ones), but got {actual_page_count}" ) # Verify all page numbers are present expected_page_nos = set(range(1, expected_page_count + 1)) actual_page_nos = set(result.document.pages.keys()) assert actual_page_nos == expected_page_nos, ( f"Missing page numbers in DoclingDocument.pages. " f"Expected: {expected_page_nos}, Got: {actual_page_nos}" ) def test_failed_pages_added_to_document_2pages(skipped_2pages_path): """Test that multiple failed pages are added to DoclingDocument.pages.""" converter = DocumentConverter( format_options={ InputFormat.PDF: PdfFormatOption( pipeline_cls=StandardPdfPipeline, pipeline_options=PdfPipelineOptions( do_ocr=False, do_table_structure=False, ), ) } ) result = converter.convert(skipped_2pages_path, raises_on_error=False) # Document should have partial success due to failed page(s) assert result.status == ConversionStatus.PARTIAL_SUCCESS, ( f"Unexpected status: {result.status}" ) # Get expected page count from input expected_page_count = result.input.page_count # DoclingDocument.pages should contain all pages (including failed ones) assert result.document is not None, "Document should not be None" actual_page_count = len(result.document.pages) assert actual_page_count == expected_page_count, ( f"DoclingDocument.pages should contain all {expected_page_count} pages " f"(including failed ones), but got {actual_page_count}" ) # Verify all page numbers are present expected_page_nos = set(range(1, expected_page_count + 1)) actual_page_nos = set(result.document.pages.keys()) assert actual_page_nos == expected_page_nos, ( f"Missing page numbers in DoclingDocument.pages. " f"Expected: {expected_page_nos}, Got: {actual_page_nos}" ) def test_failed_pages_have_size_info(skipped_1page_path): """Test that failed pages have size information when available.""" converter = DocumentConverter( format_options={ InputFormat.PDF: PdfFormatOption( pipeline_cls=StandardPdfPipeline, pipeline_options=PdfPipelineOptions( do_ocr=False, do_table_structure=False, ), ) } ) result = converter.convert(skipped_1page_path, raises_on_error=False) assert result.document is not None, "Document should not be None" # All pages should have size information for page_no, page_item in result.document.pages.items(): assert page_item.size is not None, ( f"Page {page_no} should have size information" ) # Size should be valid (either from backend or default 0.0) assert page_item.size.width >= 0, f"Page {page_no} width should be >= 0" assert page_item.size.height >= 0, f"Page {page_no} height should be >= 0" def test_errors_recorded_for_failed_pages(skipped_1page_path): """Test that errors are recorded in conv_res.errors for failed pages.""" converter = DocumentConverter( format_options={ InputFormat.PDF: PdfFormatOption( pipeline_cls=StandardPdfPipeline, pipeline_options=PdfPipelineOptions( do_ocr=False, do_table_structure=False, ), ) } ) result = converter.convert(skipped_1page_path, raises_on_error=False) # If status is PARTIAL_SUCCESS, there should be errors recorded if result.status == ConversionStatus.PARTIAL_SUCCESS: assert len(result.errors) > 0, ( "PARTIAL_SUCCESS status should have errors recorded" )
{ "repo_id": "docling-project/docling", "file_path": "tests/test_failed_pages.py", "license": "MIT License", "lines": 169, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
docling-project/docling:docling/datamodel/object_detection_engine_options.py
"""Engine option helpers for object-detection runtimes.""" from __future__ import annotations from typing import Any, Dict, List, Literal, Optional from pydantic import AnyUrl, Field from docling.datamodel.settings import default_compile_model from docling.models.inference_engines.object_detection.base import ( BaseObjectDetectionEngineOptions, ObjectDetectionEngineType, ) class OnnxRuntimeObjectDetectionEngineOptions(BaseObjectDetectionEngineOptions): """Runtime configuration for ONNX Runtime based object-detection models. Preprocessing parameters come from HuggingFace preprocessor configs, not from these options. """ engine_type: Literal[ObjectDetectionEngineType.ONNXRUNTIME] = ( ObjectDetectionEngineType.ONNXRUNTIME ) model_filename: str = Field( default="model.onnx", description="Filename of the ONNX export inside the model repository", ) providers: List[str] = Field( default_factory=lambda: ["CPUExecutionProvider"], description="Ordered list of ONNX Runtime execution providers to try", ) class TransformersObjectDetectionEngineOptions(BaseObjectDetectionEngineOptions): """Runtime configuration for Transformers-based object-detection models.""" engine_type: Literal[ObjectDetectionEngineType.TRANSFORMERS] = ( ObjectDetectionEngineType.TRANSFORMERS ) torch_dtype: str | None = Field( default=None, description="PyTorch dtype for model inference (e.g., 'float32', 'float16', 'bfloat16')", ) compile_model: bool = Field( default_factory=default_compile_model, description="Whether to compile the model with torch.compile() for better performance.", ) class ApiKserveV2ObjectDetectionEngineOptions(BaseObjectDetectionEngineOptions): """Runtime configuration for remote KServe v2 inference.""" engine_type: Literal[ObjectDetectionEngineType.API_KSERVE_V2] = ( ObjectDetectionEngineType.API_KSERVE_V2 ) url: AnyUrl = Field( description=( "Base URL of the KServe v2 server (e.g., 'http://localhost:8000'). " "The full endpoint path is constructed automatically as " "/v2/models/{model_name}[/versions/{version}]/infer." ), ) model_name: Optional[str] = Field( default=None, description=( "Remote model name registered in the KServe v2 endpoint. " "If omitted, a repo_id-derived default is used." ), ) model_version: Optional[str] = Field( default=None, description="Optional model version. If omitted, the server default is used.", ) headers: Dict[str, str] = Field( default_factory=dict, description="Optional HTTP headers for authentication/routing.", ) timeout: float = Field( default=60.0, description="HTTP request timeout in seconds.", ) request_parameters: Dict[str, Any] = Field( default_factory=dict, description="Optional top-level KServe v2 infer request parameters.", )
{ "repo_id": "docling-project/docling", "file_path": "docling/datamodel/object_detection_engine_options.py", "license": "MIT License", "lines": 73, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
docling-project/docling:docling/models/inference_engines/object_detection/base.py
"""Base classes for object-detection inference engines.""" from __future__ import annotations import logging from abc import ABC, abstractmethod from enum import Enum from typing import ( TYPE_CHECKING, Any, ClassVar, Dict, List, Literal, Optional, Type, get_args, get_origin, ) from PIL.Image import Image from pydantic import BaseModel, ConfigDict, Field, field_validator from pydantic_core import PydanticUndefined if TYPE_CHECKING: from docling.datamodel.stage_model_specs import EngineModelConfig _log = logging.getLogger(__name__) class ObjectDetectionEngineType(str, Enum): """Supported inference engine types for object-detection models.""" ONNXRUNTIME = "onnxruntime" TRANSFORMERS = "transformers" API_KSERVE_V2 = "api_kserve_v2" @classmethod def is_remote_variant(cls, engine_type: ObjectDetectionEngineType) -> bool: """Check if an engine type is a remote API variant.""" return engine_type in {cls.API_KSERVE_V2} class BaseObjectDetectionEngineOptions(BaseModel): """Base configuration shared across object-detection engines.""" model_config = ConfigDict(arbitrary_types_allowed=True) engine_type: ObjectDetectionEngineType = Field( description="Type of inference engine to use", ) score_threshold: float = Field( default=0.3, description="Minimum confidence score to keep a detection (0.0 to 1.0)", ) _registry: ClassVar[ dict[ObjectDetectionEngineType, Type[BaseObjectDetectionEngineOptions]] ] = {} @classmethod def __pydantic_init_subclass__(cls, **kwargs): super().__pydantic_init_subclass__(**kwargs) # Skip base class itself if cls is BaseObjectDetectionEngineOptions: return # only register concrete subclasses that fix engine_type via Literal field = cls.model_fields.get("engine_type") if not field: return engine_type = None # 1. Literal[...] annotation ann = field.annotation if get_origin(ann) is Literal: values = get_args(ann) if len(values) == 1: engine_type = values[0] # 2. Explicit default if engine_type is None and field.default is not PydanticUndefined: engine_type = field.default if engine_type is not None: BaseObjectDetectionEngineOptions._registry[engine_type] = cls class ObjectDetectionEngineOptionsMixin(BaseModel): engine_options: BaseObjectDetectionEngineOptions = Field( description="Runtime configuration for the object-detection engine", ) @field_validator("engine_options", mode="before") @classmethod def resolve_engine_options(cls, value): # already concrete if isinstance(value, BaseObjectDetectionEngineOptions): return value # dict / JSON case if isinstance(value, dict): engine_type = value.get("engine_type") model_cls = BaseObjectDetectionEngineOptions._registry.get(engine_type) if model_cls: return model_cls.model_validate(value) return value class ObjectDetectionEngineInput(BaseModel): """Generic input accepted by every object-detection engine.""" model_config = ConfigDict(arbitrary_types_allowed=True) image: Image = Field(description="PIL image to run inference on") metadata: Dict[str, Any] = Field( default_factory=dict, description="Optional metadata that is echoed back in the output", ) class ObjectDetectionEngineOutput(BaseModel): """Output returned by object-detection engines.""" label_ids: List[int] = Field( default_factory=list, description="Predicted class indices", ) scores: List[float] = Field( default_factory=list, description="Confidence scores for the predictions", ) bboxes: List[List[float]] = Field( default_factory=list, description="Bounding boxes as [x_min, y_min, x_max, y_max] in pixels", ) metadata: Dict[str, Any] = Field( default_factory=dict, description="Optional metadata echoed back from the input or engine", ) class BaseObjectDetectionEngine(ABC): """Abstract base-class for object-detection engines.""" def __init__( self, options: BaseObjectDetectionEngineOptions, model_config: Optional[EngineModelConfig] = None, ) -> None: """Initialize the engine. Args: options: Engine-specific configuration options model_config: Model configuration (repo_id, revision, extra_config) """ self.options = options self.model_config = model_config self._initialized = False @abstractmethod def initialize(self) -> None: """Initialize engine resources (load models, allocate buffers, etc.).""" @abstractmethod def predict_batch( self, input_batch: List[ObjectDetectionEngineInput] ) -> List[ObjectDetectionEngineOutput]: """Run inference on a batch of inputs.""" @abstractmethod def get_label_mapping(self) -> Dict[int, str]: """Get the label mapping for this model. Returns: Dictionary mapping label IDs to label names """ def predict( self, input_data: ObjectDetectionEngineInput ) -> ObjectDetectionEngineOutput: """Helper to run inference on a single input.""" if not self._initialized: _log.debug("Initializing %s for single prediction", type(self).__name__) self.initialize() results = self.predict_batch([input_data]) return results[0] def __call__( self, input_data: ObjectDetectionEngineInput | List[ObjectDetectionEngineInput], ) -> ObjectDetectionEngineOutput | List[ObjectDetectionEngineOutput]: if not self._initialized: _log.debug("Initializing %s for call", type(self).__name__) self.initialize() if isinstance(input_data, list): return self.predict_batch(input_data) return self.predict(input_data)
{ "repo_id": "docling-project/docling", "file_path": "docling/models/inference_engines/object_detection/base.py", "license": "MIT License", "lines": 158, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
docling-project/docling:docling/models/inference_engines/object_detection/factory.py
"""Factory for creating object detection engines.""" from __future__ import annotations import logging from pathlib import Path from typing import TYPE_CHECKING, Optional, Union from docling.datamodel.accelerator_options import AcceleratorOptions from docling.models.inference_engines.object_detection.base import ( BaseObjectDetectionEngine, BaseObjectDetectionEngineOptions, ObjectDetectionEngineType, ) if TYPE_CHECKING: from docling.datamodel.stage_model_specs import ( EngineModelConfig, ObjectDetectionModelSpec, ) _log = logging.getLogger(__name__) def create_object_detection_engine( *, options: BaseObjectDetectionEngineOptions, model_spec: Optional[ObjectDetectionModelSpec] = None, enable_remote_services: bool = False, accelerator_options: AcceleratorOptions, artifacts_path: Optional[Union[Path, str]] = None, ) -> BaseObjectDetectionEngine: """Factory to create object detection engines. Args: options: Engine-specific options model_spec: Model specification used to derive engine configuration enable_remote_services: Whether external remote inference calls are allowed accelerator_options: Hardware accelerator configuration artifacts_path: Optional path to local model artifacts root Returns: Initialized engine instance (call .initialize() before use) """ model_config: Optional[EngineModelConfig] = None if model_spec is not None: model_config = model_spec.get_engine_config(options.engine_type) if options.engine_type == ObjectDetectionEngineType.ONNXRUNTIME: from docling.datamodel.object_detection_engine_options import ( OnnxRuntimeObjectDetectionEngineOptions, ) from docling.models.inference_engines.object_detection.onnxruntime_engine import ( OnnxRuntimeObjectDetectionEngine, ) if not isinstance(options, OnnxRuntimeObjectDetectionEngineOptions): raise ValueError( f"Expected OnnxRuntimeObjectDetectionEngineOptions, got {type(options)}" ) return OnnxRuntimeObjectDetectionEngine( options=options, model_config=model_config, artifacts_path=artifacts_path, accelerator_options=accelerator_options, ) elif options.engine_type == ObjectDetectionEngineType.TRANSFORMERS: from docling.datamodel.object_detection_engine_options import ( TransformersObjectDetectionEngineOptions, ) from docling.models.inference_engines.object_detection.transformers_engine import ( TransformersObjectDetectionEngine, ) if not isinstance(options, TransformersObjectDetectionEngineOptions): raise ValueError( f"Expected TransformersObjectDetectionEngineOptions, got {type(options)}" ) return TransformersObjectDetectionEngine( options=options, model_config=model_config, accelerator_options=accelerator_options, artifacts_path=artifacts_path, ) elif options.engine_type == ObjectDetectionEngineType.API_KSERVE_V2: from docling.datamodel.object_detection_engine_options import ( ApiKserveV2ObjectDetectionEngineOptions, ) from docling.models.inference_engines.object_detection.api_kserve_v2_engine import ( ApiKserveV2ObjectDetectionEngine, ) if not isinstance(options, ApiKserveV2ObjectDetectionEngineOptions): raise ValueError( f"Expected ApiKserveV2ObjectDetectionEngineOptions, got {type(options)}" ) return ApiKserveV2ObjectDetectionEngine( enable_remote_services=enable_remote_services, options=options, model_config=model_config, accelerator_options=accelerator_options, artifacts_path=artifacts_path, ) else: raise ValueError(f"Unknown engine type: {options.engine_type}")
{ "repo_id": "docling-project/docling", "file_path": "docling/models/inference_engines/object_detection/factory.py", "license": "MIT License", "lines": 92, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
docling-project/docling:docling/models/inference_engines/object_detection/hf_base.py
"""Shared HuggingFace-based helpers for object-detection engines.""" from __future__ import annotations from pathlib import Path from typing import TYPE_CHECKING, Any, Iterable, Optional, Sequence, Union from docling.datamodel.accelerator_options import AcceleratorOptions from docling.models.inference_engines.common import HfVisionModelMixin from docling.models.inference_engines.object_detection.base import ( BaseObjectDetectionEngine, BaseObjectDetectionEngineOptions, ObjectDetectionEngineInput, ObjectDetectionEngineOutput, ) if TYPE_CHECKING: from docling.datamodel.stage_model_specs import EngineModelConfig class HfObjectDetectionEngineBase(HfVisionModelMixin, BaseObjectDetectionEngine): """Base class for object-detection engines that load HF artifacts and configs.""" def __init__( self, *, options: BaseObjectDetectionEngineOptions, model_config: Optional[EngineModelConfig] = None, accelerator_options: AcceleratorOptions, artifacts_path: Optional[Union[Path, str]] = None, ) -> None: super().__init__(options=options, model_config=model_config) self.options: BaseObjectDetectionEngineOptions = options self._init_hf_vision_model( model_config=model_config, accelerator_options=accelerator_options, artifacts_path=artifacts_path, model_family_name="object-detection", ) def _build_output( self, *, input_item: ObjectDetectionEngineInput, labels: Iterable[Any], scores: Iterable[Any], boxes: Iterable[Sequence[Any]], apply_score_threshold: bool = False, ) -> ObjectDetectionEngineOutput: """Build standard engine output from raw detection iterables.""" label_ids: list[int] = [] output_scores: list[float] = [] bboxes: list[list[float]] = [] for label, score, box in zip(labels, scores, boxes): score_float = self._as_float(score) if apply_score_threshold and score_float < self.options.score_threshold: continue label_ids.append(self._as_int(label)) output_scores.append(score_float) bboxes.append([self._as_float(value) for value in box]) return ObjectDetectionEngineOutput( label_ids=label_ids, scores=output_scores, bboxes=bboxes, metadata=input_item.metadata.copy(), )
{ "repo_id": "docling-project/docling", "file_path": "docling/models/inference_engines/object_detection/hf_base.py", "license": "MIT License", "lines": 58, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
docling-project/docling:docling/models/inference_engines/object_detection/onnxruntime_engine.py
"""ONNX Runtime implementation for RT-DETR style object-detection models.""" from __future__ import annotations import logging from pathlib import Path from typing import TYPE_CHECKING, List, Optional, Union import numpy as np if TYPE_CHECKING: import onnxruntime as ort from docling.datamodel.accelerator_options import AcceleratorDevice, AcceleratorOptions from docling.datamodel.object_detection_engine_options import ( OnnxRuntimeObjectDetectionEngineOptions, ) from docling.models.inference_engines.object_detection.base import ( ObjectDetectionEngineInput, ObjectDetectionEngineOutput, ) from docling.models.inference_engines.object_detection.hf_base import ( HfObjectDetectionEngineBase, ) from docling.utils.accelerator_utils import decide_device if TYPE_CHECKING: from docling.datamodel.stage_model_specs import EngineModelConfig _log = logging.getLogger(__name__) class OnnxRuntimeObjectDetectionEngine(HfObjectDetectionEngineBase): """ONNX Runtime engine for object detection models. Uses HuggingFace AutoImageProcessor for preprocessing to ensure consistency with transformers-based models. This is the source of truth for preprocessing parameters. """ def __init__( self, *, options: OnnxRuntimeObjectDetectionEngineOptions, model_config: Optional[EngineModelConfig] = None, accelerator_options: AcceleratorOptions, artifacts_path: Optional[Union[Path, str]] = None, ): """Initialize the ONNX Runtime engine. Args: options: ONNX Runtime-specific runtime options accelerator_options: Hardware accelerator configuration artifacts_path: Path to cached model artifacts model_config: Model configuration (repo_id, revision, extra_config) """ super().__init__( options=options, model_config=model_config, accelerator_options=accelerator_options, artifacts_path=artifacts_path, ) self.options: OnnxRuntimeObjectDetectionEngineOptions = options self._session: Optional[ort.InferenceSession] = None self._model_path: Optional[Path] = None def _resolve_model_artifacts(self) -> tuple[Path, Path]: """Resolve model artifacts from artifacts_path or HF download. Returns: Tuple of (model_folder, model_path) """ repo_id = self._repo_id revision = self._model_config.revision or "main" model_filename = self._resolve_model_filename() model_folder = self._resolve_model_folder( repo_id=repo_id, revision=str(revision), ) model_path = model_folder / model_filename if not model_path.exists(): raise FileNotFoundError( f"ONNX model file '{model_filename}' not found: {model_path}" ) return model_folder, model_path def _resolve_model_filename(self) -> str: """Determine which ONNX filename to load.""" filename = self.options.model_filename extra_filename = self._model_config.extra_config.get("model_filename") if extra_filename and isinstance(extra_filename, str): filename = extra_filename return filename def initialize(self) -> None: """Initialize ONNX session and preprocessor.""" import onnxruntime as ort _log.info("Initializing ONNX Runtime object-detection engine") # Resolve model folder and model path in one step model_folder, self._model_path = self._resolve_model_artifacts() _log.debug(f"Using ONNX model at {self._model_path}") # Load preprocessor (source of truth for preprocessing) self._processor = self._load_preprocessor(model_folder) _log.debug(f"Loaded preprocessor with size: {self._processor.size}") # type: ignore[attr-defined] # Load label mapping from config self._id_to_label = self._load_label_mapping(model_folder) _log.debug(f"Loaded label mapping with {len(self._id_to_label)} labels") # Create ONNX session sess_options = ort.SessionOptions() sess_options.intra_op_num_threads = self._accelerator_options.num_threads providers = self._resolve_providers() self._session = ort.InferenceSession( str(self._model_path), sess_options=sess_options, providers=providers, ) self._initialized = True _log.info( f"ONNX Runtime engine ready (providers={self._session.get_providers()})" ) def _resolve_providers(self) -> List[str]: """Resolve ONNX Runtime providers from accelerator and engine options.""" configured_providers = self.options.providers or ["CPUExecutionProvider"] if configured_providers != ["CPUExecutionProvider"]: return configured_providers device = decide_device( self._accelerator_options.device, supported_devices=[AcceleratorDevice.CPU, AcceleratorDevice.CUDA], ) if device.startswith("cuda"): return ["CUDAExecutionProvider", "CPUExecutionProvider"] if device != AcceleratorDevice.CPU.value: _log.warning( "Unsupported ONNX device '%s' for object detection. Falling back to CPU provider.", device, ) return ["CPUExecutionProvider"] def predict_batch( self, input_batch: List[ObjectDetectionEngineInput] ) -> List[ObjectDetectionEngineOutput]: """Run inference on a batch of inputs. Args: input_batch: List of input images with metadata Returns: List of detection outputs """ if not input_batch: return [] if self._session is None or self._processor is None: raise RuntimeError("Engine not initialized. Call initialize() first.") # Preprocess images using HF processor (source of truth) images = [item.image.convert("RGB") for item in input_batch] inputs = self._processor(images=images, return_tensors="np") # Get original sizes for post-processing orig_sizes = np.array( [[img.width, img.height] for img in images], dtype=np.int64 ) # Run ONNX inference output_tensors = self._session.run( None, { "images": inputs["pixel_values"], "orig_target_sizes": orig_sizes, }, ) if len(output_tensors) < 3: raise RuntimeError( "Expected ONNX model to return at least 3 outputs: " "[labels, boxes, scores]" ) labels_batch, boxes_batch, scores_batch = output_tensors[:3] batch_outputs: List[ObjectDetectionEngineOutput] = [] for idx, input_item in enumerate(input_batch): batch_outputs.append( self._build_output( input_item=input_item, labels=labels_batch[idx], scores=scores_batch[idx], boxes=boxes_batch[idx], apply_score_threshold=True, ) ) return batch_outputs
{ "repo_id": "docling-project/docling", "file_path": "docling/models/inference_engines/object_detection/onnxruntime_engine.py", "license": "MIT License", "lines": 167, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
docling-project/docling:docling/models/inference_engines/object_detection/transformers_engine.py
"""Transformers implementation for object-detection models.""" from __future__ import annotations import logging import sys from pathlib import Path from typing import TYPE_CHECKING, List, Optional, Union from packaging import version if TYPE_CHECKING: import torch from transformers import AutoModelForObjectDetection from docling.datamodel.accelerator_options import AcceleratorDevice, AcceleratorOptions from docling.datamodel.object_detection_engine_options import ( TransformersObjectDetectionEngineOptions, ) from docling.models.inference_engines.object_detection.base import ( ObjectDetectionEngineInput, ObjectDetectionEngineOutput, ) from docling.models.inference_engines.object_detection.hf_base import ( HfObjectDetectionEngineBase, ) from docling.utils.accelerator_utils import decide_device if TYPE_CHECKING: from docling.datamodel.stage_model_specs import EngineModelConfig _log = logging.getLogger(__name__) class TransformersObjectDetectionEngine(HfObjectDetectionEngineBase): """Transformers engine for object detection models. Uses HuggingFace Transformers and PyTorch for inference. Supports AutoModelForObjectDetection-compatible models. """ def __init__( self, *, options: TransformersObjectDetectionEngineOptions, model_config: Optional[EngineModelConfig] = None, accelerator_options: AcceleratorOptions, artifacts_path: Optional[Union[Path, str]] = None, ): """Initialize the Transformers engine. Args: options: Transformers-specific runtime options model_config: Model configuration (repo_id, revision, extra_config) accelerator_options: Hardware accelerator configuration artifacts_path: Path to cached model artifacts """ super().__init__( options=options, model_config=model_config, accelerator_options=accelerator_options, artifacts_path=artifacts_path, ) self.options: TransformersObjectDetectionEngineOptions = options self._model: Optional[AutoModelForObjectDetection] = None self._device: Optional[torch.device] = None def _resolve_device(self) -> torch.device: """Resolve PyTorch device from accelerator options.""" import torch device_str = decide_device( self._accelerator_options.device, supported_devices=[ AcceleratorDevice.CPU, AcceleratorDevice.CUDA, AcceleratorDevice.MPS, ], ) # Map to PyTorch device if device_str.startswith("cuda"): return torch.device(device_str) elif device_str == AcceleratorDevice.MPS.value: return torch.device("mps") else: return torch.device("cpu") def _resolve_torch_dtype(self) -> Optional[torch.dtype]: """Resolve PyTorch dtype from options or model config.""" import torch # Priority: options > model_config > None (auto) dtype_str = self.options.torch_dtype or self._model_config.torch_dtype if dtype_str is None: return None dtype_map = { "float32": torch.float32, "float16": torch.float16, "bfloat16": torch.bfloat16, } dtype = dtype_map.get(dtype_str) if dtype is None: _log.warning( f"Unknown torch_dtype '{dtype_str}', using auto dtype detection" ) return dtype def initialize(self) -> None: """Initialize PyTorch model and preprocessor.""" import torch from transformers import AutoModelForObjectDetection _log.info("Initializing Transformers object-detection engine") revision = self._model_config.revision or "main" model_folder = self._resolve_model_folder( repo_id=self._repo_id, revision=revision, ) _log.debug(f"Using model at {model_folder}") # Resolve device and dtype self._device = self._resolve_device() torch_dtype = self._resolve_torch_dtype() # Set num_threads for CPU inference if self._device.type == "cpu": torch.set_num_threads(self._accelerator_options.num_threads) # Load preprocessor (source of truth for preprocessing) self._processor = self._load_preprocessor(model_folder) _log.debug(f"Loaded preprocessor with size: {self._processor.size}") # type: ignore[attr-defined] # Load label mapping from config self._id_to_label = self._load_label_mapping(model_folder) _log.debug(f"Loaded label mapping with {len(self._id_to_label)} labels") # Load model _log.debug(f"Loading model from {model_folder} to device {self._device}") try: self._model = AutoModelForObjectDetection.from_pretrained( str(model_folder), torch_dtype=torch_dtype, ) self._model.to(self._device) # type: ignore[union-attr] self._model.eval() # type: ignore[union-attr] # Optionally compile model for better performance (model must be in eval mode first) # Works for Python < 3.14 with any torch 2.x # Works for Python >= 3.14 with torch >= 2.10 if self.options.compile_model: if sys.version_info < (3, 14): self._model = torch.compile(self._model) # type: ignore[arg-type,assignment] _log.debug("Model compiled with torch.compile()") elif version.parse(torch.__version__) >= version.parse("2.10"): self._model = torch.compile(self._model) # type: ignore[arg-type,assignment] _log.debug("Model compiled with torch.compile()") else: _log.warning( "Model compilation requested but not available " "(requires Python < 3.14 or torch >= 2.10 for Python 3.14+)" ) except Exception as e: raise RuntimeError(f"Failed to load model from {model_folder}: {e}") self._initialized = True _log.info( f"Transformers engine ready (device={self._device}, dtype={self._model.dtype})" # type: ignore[union-attr] ) def predict_batch( self, input_batch: List[ObjectDetectionEngineInput] ) -> List[ObjectDetectionEngineOutput]: """Run inference on a batch of inputs. Args: input_batch: List of input images with metadata Returns: List of detection outputs """ import torch if not input_batch: return [] if self._model is None or self._processor is None: raise RuntimeError("Engine not initialized. Call initialize() first.") # Preprocess images using HF processor images = [item.image.convert("RGB") for item in input_batch] inputs = self._processor(images=images, return_tensors="pt").to(self._device) # Get target sizes for post-processing target_sizes = torch.tensor( [[img.height, img.width] for img in images], device=self._device ) # Run inference with torch.inference_mode(): outputs = self._model(**inputs) # type: ignore[operator] # Post-process using HuggingFace processor results = self._processor.post_process_object_detection( # type: ignore[attr-defined] outputs, target_sizes=target_sizes, # type: ignore[arg-type] threshold=self.options.score_threshold, ) # Convert to our output format batch_outputs: List[ObjectDetectionEngineOutput] = [] for input_item, result in zip(input_batch, results): batch_outputs.append( self._build_output( input_item=input_item, labels=result["labels"], scores=result["scores"], boxes=result["boxes"], ) ) return batch_outputs
{ "repo_id": "docling-project/docling", "file_path": "docling/models/inference_engines/object_detection/transformers_engine.py", "license": "MIT License", "lines": 185, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
docling-project/docling:docling/models/stages/layout/layout_object_detection_model.py
"""Layout detection stage backed by object-detection runtimes.""" from __future__ import annotations import logging from pathlib import Path from typing import Dict, List, Optional, Sequence import numpy as np from docling_core.types.doc import CoordOrigin, DocItemLabel from PIL import Image from docling.datamodel.accelerator_options import AcceleratorOptions from docling.datamodel.base_models import BoundingBox, Cluster, LayoutPrediction, Page from docling.datamodel.document import ConversionResult from docling.datamodel.pipeline_options import LayoutObjectDetectionOptions from docling.models.base_layout_model import BaseLayoutModel from docling.models.inference_engines.object_detection import ( BaseObjectDetectionEngine, ObjectDetectionEngineInput, ObjectDetectionEngineOutput, create_object_detection_engine, ) from docling.utils.layout_postprocessor import LayoutPostprocessor from docling.utils.profiling import TimeRecorder _log = logging.getLogger(__name__) class LayoutObjectDetectionModel(BaseLayoutModel): """Layout detection using the generic object-detection inference engines.""" def __init__( self, artifacts_path: Optional[Path], accelerator_options: AcceleratorOptions, options: LayoutObjectDetectionOptions, enable_remote_services: bool = False, ) -> None: self.options = options self.engine: BaseObjectDetectionEngine = create_object_detection_engine( options=options.engine_options, model_spec=self.options.model_spec, artifacts_path=artifacts_path, enable_remote_services=enable_remote_services, accelerator_options=accelerator_options, ) self.engine.initialize() # Convert engine's string labels to DocItemLabel enums self._label_map = self._build_label_map() def _build_label_map(self) -> Dict[int, DocItemLabel]: """Build label mapping from engine's label names to DocItemLabel enums. Raises: RuntimeError: If labels don't match DocItemLabel enum. """ id_to_label_str = self.engine.get_label_mapping() label_map = {} for label_id, label_name in id_to_label_str.items(): # Convert label name to uppercase to match DocItemLabel enum convention label_enum_name = label_name.upper() try: label_map[label_id] = DocItemLabel[label_enum_name] except KeyError: raise RuntimeError( f"Label '{label_name}' (ID {label_id}) from model config " f"does not match any DocItemLabel enum value." ) return label_map @classmethod def get_options_type(cls) -> type[LayoutObjectDetectionOptions]: return LayoutObjectDetectionOptions def predict_layout( self, conv_res: ConversionResult, pages: Sequence[Page], ) -> Sequence[LayoutPrediction]: pages = list(pages) predictions: list[LayoutPrediction] = [] for page in pages: assert page._backend is not None if not page._backend.is_valid(): existing_prediction = page.predictions.layout or LayoutPrediction() page.predictions.layout = existing_prediction predictions.append(existing_prediction) continue page_image = page.get_image(scale=1.0) if page_image is None: empty_prediction = page.predictions.layout or LayoutPrediction() page.predictions.layout = empty_prediction predictions.append(empty_prediction) continue with TimeRecorder(conv_res, "layout"): engine_input = ObjectDetectionEngineInput( image=page_image, metadata={"page_no": page.page_no}, ) engine_output = self.engine.predict(engine_input) clusters = self._predictions_to_clusters( page=page, image=page_image, engine_output=engine_output, ) processed_clusters, processed_cells = LayoutPostprocessor( page=page, clusters=clusters, options=self.options, ).postprocess() layout_prediction = LayoutPrediction(clusters=processed_clusters) page.predictions.layout = layout_prediction if processed_clusters: layout_scores = [c.confidence for c in processed_clusters] conv_res.confidence.pages[page.page_no].layout_score = float( np.mean(layout_scores) ) else: conv_res.confidence.pages[page.page_no].layout_score = 0.0 if processed_cells: ocr_scores = [c.confidence for c in processed_cells if c.from_ocr] if ocr_scores: conv_res.confidence.pages[page.page_no].ocr_score = float( np.mean(ocr_scores) ) predictions.append(layout_prediction) return predictions def _predictions_to_clusters( self, page: Page, image: Image.Image, engine_output: ObjectDetectionEngineOutput, ) -> List[Cluster]: assert page.size is not None scale_x = page.size.width / image.width scale_y = page.size.height / image.height clusters: List[Cluster] = [] for idx, (label_id, score, bbox_coords) in enumerate( zip(engine_output.label_ids, engine_output.scores, engine_output.bboxes) ): label = self._label_map.get(label_id, DocItemLabel.TEXT) bbox = BoundingBox( l=bbox_coords[0] * scale_x, t=bbox_coords[1] * scale_y, r=bbox_coords[2] * scale_x, b=bbox_coords[3] * scale_y, coord_origin=CoordOrigin.TOPLEFT, ) clusters.append( Cluster( id=idx, label=label, confidence=score, bbox=bbox, cells=[], ) ) return clusters
{ "repo_id": "docling-project/docling", "file_path": "docling/models/stages/layout/layout_object_detection_model.py", "license": "MIT License", "lines": 147, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
docling-project/docling:docling/backend/latex_backend.py
import logging import re from copy import deepcopy from io import BytesIO from pathlib import Path from typing import Callable, List, Optional, Union import pypdfium2 from docling_core.types.doc import ( DocItemLabel, DoclingDocument, GroupLabel, ImageRef, NodeItem, TableCell, TableData, TextItem, ) from docling_core.types.doc.document import Formatting from PIL import Image from pylatexenc.latex2text import LatexNodes2Text from pylatexenc.latexwalker import ( LatexCharsNode, LatexEnvironmentNode, LatexGroupNode, LatexMacroNode, LatexMathNode, LatexWalker, ) from docling.backend.abstract_backend import DeclarativeDocumentBackend from docling.datamodel.backend_options import LatexBackendOptions from docling.datamodel.base_models import InputFormat from docling.datamodel.document import InputDocument _log = logging.getLogger(__name__) class LatexDocumentBackend(DeclarativeDocumentBackend): def __init__( self, in_doc: InputDocument, path_or_stream: Union[BytesIO, Path], options: LatexBackendOptions = LatexBackendOptions(), ): super().__init__(in_doc, path_or_stream, options) self.latex_text = "" self.labels: dict[str, bool] = {} self._custom_macros: dict[str, str] = {} if isinstance(self.path_or_stream, BytesIO): raw_bytes = self.path_or_stream.getvalue() for encoding in ["utf-8", "latin-1", "cp1252"]: try: self.latex_text = raw_bytes.decode(encoding) break except UnicodeDecodeError: continue if not self.latex_text: _log.warning("Failed to decode LaTeX content, using replacement mode") self.latex_text = raw_bytes.decode("utf-8", errors="replace") elif isinstance(self.path_or_stream, Path): # Try multiple encodings for file for encoding in ["utf-8", "latin-1", "cp1252"]: try: with open(self.path_or_stream, encoding=encoding) as f: self.latex_text = f.read() break except UnicodeDecodeError: continue except FileNotFoundError: _log.error(f"LaTeX file not found: {self.path_or_stream}") break except OSError as e: _log.error(f"Error reading LaTeX file: {e}") break def is_valid(self) -> bool: return bool(self.latex_text.strip()) @classmethod def supports_pagination(cls) -> bool: return False @classmethod def supported_formats(cls) -> set[InputFormat]: return {InputFormat.LATEX} def _preprocess_custom_macros(self, latex_text: str) -> str: """Pre-process LaTeX to expand common problematic macros before parsing""" # Common equation shortcuts that cause parsing issues latex_text = re.sub(r"\\be\b", r"\\begin{equation}", latex_text) latex_text = re.sub(r"\\ee\b", r"\\end{equation}", latex_text) latex_text = re.sub(r"\\bea\b", r"\\begin{eqnarray}", latex_text) latex_text = re.sub(r"\\eea\b", r"\\end{eqnarray}", latex_text) latex_text = re.sub(r"\\beq\b", r"\\begin{equation}", latex_text) latex_text = re.sub(r"\\eeq\b", r"\\end{equation}", latex_text) return latex_text def convert(self) -> DoclingDocument: doc = DoclingDocument(name=self.file.stem) # Pre-process: expand common custom equation macros preprocessed_text = self._preprocess_custom_macros(self.latex_text) walker = LatexWalker(preprocessed_text, tolerant_parsing=True) try: nodes, pos, len_ = walker.get_latex_nodes() except Exception as e: _log.warning(f"LaTeX parsing failed: {e}. Using fallback text extraction.") doc.add_text(label=DocItemLabel.TEXT, text=self.latex_text) return doc try: # First pass: Extract custom macros from ALL nodes (including preamble) # This must happen before finding the document environment self._extract_custom_macros(nodes) doc_node = self._find_document_env(nodes) if doc_node: self._process_nodes(doc_node.nodelist, doc) else: self._process_nodes(nodes, doc) except Exception as e: _log.error(f"Error processing LaTeX nodes: {e}") return doc def _extract_custom_macros(self, nodes, depth: int = 0): """Extract custom macro definitions from the document""" if nodes is None or depth > 5: return for node in nodes: if isinstance(node, LatexMacroNode) and node.macroname == "newcommand": if node.nodeargd and node.nodeargd.argnlist: argnlist = node.nodeargd.argnlist # Find the name argument (typically at index 1) name_arg = argnlist[1] if len(argnlist) > 1 else None # Find the definition argument (last non-None argument) def_arg = None for arg in reversed(argnlist): if arg is not None: def_arg = arg break if name_arg and def_arg and name_arg is not def_arg: # Extract macro name from the first argument # The macro name comes as raw latex like "{\myterm}" or "\myterm" macro_name_raw = name_arg.latex_verbatim() # Clean up: remove braces, spaces, and leading backslash # This handles both {\myterm} and \myterm formats macro_name = macro_name_raw.strip("{} \n\t") # Remove leading backslash if present if macro_name.startswith("\\"): macro_name = macro_name[1:] # Extract definition as raw LaTeX (for use in math expansion) if hasattr(def_arg, "nodelist"): # Get raw LaTeX content for proper math expansion macro_def = def_arg.latex_verbatim() # Only strip outermost braces if they wrap the entire content if macro_def.startswith("{") and macro_def.endswith("}"): macro_def = macro_def[1:-1] if macro_name: # Only register if we got a valid name self._custom_macros[macro_name] = macro_def _log.debug( f"Registered custom macro: \\{macro_name} -> '{macro_def}'" ) # Recursively search in nested structures if hasattr(node, "nodelist") and node.nodelist: self._extract_custom_macros(node.nodelist, depth + 1) if hasattr(node, "nodeargd") and node.nodeargd: argnlist = getattr(node.nodeargd, "argnlist", None) if argnlist: for arg in argnlist: if hasattr(arg, "nodelist") and arg.nodelist: self._extract_custom_macros(arg.nodelist, depth + 1) def _find_document_env(self, nodes, depth: int = 0): """Recursively search for document environment""" if nodes is None or depth > 5: return None for node in nodes: if isinstance(node, LatexEnvironmentNode) and node.envname == "document": return node if hasattr(node, "nodelist") and node.nodelist: result = self._find_document_env(node.nodelist, depth + 1) if result: return result if hasattr(node, "nodeargd") and node.nodeargd: argnlist = getattr(node.nodeargd, "argnlist", None) if argnlist: for arg in argnlist: if hasattr(arg, "nodelist") and arg.nodelist: result = self._find_document_env(arg.nodelist, depth + 1) if result: return result return None def _process_chars_node( self, node: LatexCharsNode, doc: DoclingDocument, parent: Optional[NodeItem], formatting: Optional[Formatting], text_label: Optional[DocItemLabel], text_buffer: List[str], flush_fn: Callable[[], None], ): text = node.chars if "\n\n" in text: # Split by paragraph breaks, keeping any content before first break parts = text.split("\n\n") # First part goes into current buffer (e.g., "." before a paragraph break) first_part = parts[0].strip() if first_part: text_buffer.append(first_part) # Flush buffer (now includes content before the break) flush_fn() # Remaining parts are separate paragraphs for part in parts[1:]: part_stripped = part.strip() if part_stripped: doc.add_text( parent=parent, label=text_label or DocItemLabel.PARAGRAPH, text=part_stripped, formatting=formatting, ) else: text_buffer.append(text) def _process_macro_node_inline( self, node: LatexMacroNode, doc: DoclingDocument, parent: Optional[NodeItem], formatting: Optional[Formatting], text_label: Optional[DocItemLabel], text_buffer: List[str], flush_fn: Callable[[], None], ): if node.macroname in ["%", "$", "&", "#", "_", "{", "}", "~"]: if node.macroname == "~": text_buffer.append(" ") # Non-breaking space else: text_buffer.append(node.macroname) elif node.macroname == " ": text_buffer.append(" ") # Handle inline formatting macros - keep in buffer elif node.macroname in [ "textbf", "textit", "emph", "texttt", "underline", ]: formatted_text = self._extract_macro_arg(node) if formatted_text: text_buffer.append(formatted_text) # Handle custom macros - expand and keep in buffer elif node.macroname in self._custom_macros: expansion = self._custom_macros[node.macroname] _log.debug(f"Expanding custom macro \\{node.macroname} -> '{expansion}'") text_buffer.append(expansion) # Handle citations and references inline to avoid line breaks elif node.macroname in ["cite", "citep", "citet", "ref", "eqref"]: ref_arg = self._extract_macro_arg(node) if ref_arg: text_buffer.append(f"[{ref_arg}]") # Handle URLs inline elif node.macroname == "url": url_text = self._extract_macro_arg(node) if url_text: text_buffer.append(url_text) # Skip formatting switches that take arguments we don't want to output elif node.macroname in ["color", "definecolor", "colorlet"]: pass # Ignore color commands entirely else: # Check if this is a structural macro that needs special handling structural_macros = { "section", "subsection", "subsubsection", "chapter", "part", "paragraph", "subparagraph", "caption", "label", "includegraphics", "bibliography", "title", "author", "maketitle", "footnote", "marginpar", "textsc", "textsf", "textrm", "textnormal", "mbox", "href", "newline", "hfill", "break", "centering", "textcolor", "colorbox", "item", "input", "include", } if node.macroname in structural_macros: # Structural macro - flush buffer and process with _process_macro flush_fn() self._process_macro(node, doc, parent, formatting, text_label) elif node.nodeargd and node.nodeargd.argnlist: # Unknown macro with arguments - extract all args as inline text inline_text = self._extract_all_macro_args_inline(node) if inline_text: text_buffer.append(inline_text) else: _log.debug( f"Skipping unknown macro with no extractable content: {node.macroname}" ) else: _log.debug( f"Skipping unknown macro without arguments: {node.macroname}" ) def _process_math_node( self, node: LatexMathNode, doc: DoclingDocument, parent: Optional[NodeItem], text_buffer: List[str], flush_fn: Callable[[], None], ): is_display = getattr(node, "displaytype", None) == "display" if not is_display: math_verbatim = node.latex_verbatim() is_display = math_verbatim.startswith( ( "$$", "\\[", "\\begin{equation}", "\\begin{align}", "\\begin{gather}", "\\begin{displaymath}", ) ) if is_display: flush_fn() math_text = self._clean_math(node.latex_verbatim(), "display") doc.add_text(parent=parent, label=DocItemLabel.FORMULA, text=math_text) else: # Expand custom macros in inline math for KaTeX compatibility text_buffer.append(self._expand_macros(node.latex_verbatim())) def _process_group_node( self, node: LatexGroupNode, doc: DoclingDocument, parent: Optional[NodeItem], formatting: Optional[Formatting], text_label: Optional[DocItemLabel], text_buffer: List[str], flush_fn: Callable[[], None], ): if node.nodelist and self._is_text_only_group(node): group_text = self._nodes_to_text(node.nodelist) if group_text: text_buffer.append(group_text) elif node.nodelist: flush_fn() self._process_nodes(node.nodelist, doc, parent, formatting, text_label) def _process_nodes( self, nodes, doc: DoclingDocument, parent: Optional[NodeItem] = None, formatting: Optional[Formatting] = None, text_label: Optional[DocItemLabel] = None, ): if nodes is None: return text_buffer: list[str] = [] def flush_text_buffer(): if text_buffer: combined_text = "".join(text_buffer).strip() if combined_text: doc.add_text( parent=parent, label=text_label or DocItemLabel.TEXT, text=combined_text, formatting=formatting, ) text_buffer.clear() for node in nodes: try: if isinstance(node, LatexCharsNode): self._process_chars_node( node, doc, parent, formatting, text_label, text_buffer, flush_text_buffer, ) elif isinstance(node, LatexMacroNode): self._process_macro_node_inline( node, doc, parent, formatting, text_label, text_buffer, flush_text_buffer, ) elif isinstance(node, LatexEnvironmentNode): flush_text_buffer() self._process_environment(node, doc, parent, formatting, text_label) elif isinstance(node, LatexMathNode): self._process_math_node( node, doc, parent, text_buffer, flush_text_buffer ) elif isinstance(node, LatexGroupNode): self._process_group_node( node, doc, parent, formatting, text_label, text_buffer, flush_text_buffer, ) except Exception as e: _log.warning(f"Failed to process node {type(node).__name__}: {e}") continue # Continue with next node flush_text_buffer() def _process_macro( # noqa: C901 self, node: LatexMacroNode, doc: DoclingDocument, parent: Optional[NodeItem] = None, formatting: Optional[Formatting] = None, text_label: Optional[DocItemLabel] = None, ): """Process LaTeX macro nodes""" if node.macroname in [ "part", "chapter", "section", "subsection", "subsubsection", ]: title = self._extract_macro_arg(node) if title: level = self._get_heading_level(node.macroname) doc.add_heading(parent=parent, text=title, level=level) elif node.macroname == "title": title = self._extract_macro_arg(node) if title: doc.add_text(parent=parent, label=DocItemLabel.TITLE, text=title) elif node.macroname == "author": pass elif node.macroname in ["date", "thanks", "maketitle"]: pass elif node.macroname in ["textsc", "textsf", "textrm", "textnormal", "mbox"]: # Similar recursion if node.nodeargd and node.nodeargd.argnlist: arg = node.nodeargd.argnlist[-1] if hasattr(arg, "nodelist"): self._process_nodes( arg.nodelist, doc, parent, formatting, text_label ) elif node.macroname in ["cite", "citep", "citet", "ref", "eqref"]: ref_arg = self._extract_macro_arg(node) if ref_arg: ref_text = f"[{ref_arg}]" doc.add_text(parent=parent, label=DocItemLabel.REFERENCE, text=ref_text) elif node.macroname == "url": url_text = self._extract_macro_arg(node) if url_text: doc.add_text(parent=parent, label=DocItemLabel.REFERENCE, text=url_text) elif node.macroname == "label": # Store labels for potential cross-referencing label_text = self._extract_macro_arg(node) if label_text: self.labels[label_text] = True elif node.macroname == "caption": caption_text = self._extract_macro_arg(node) if caption_text: doc.add_text( parent=parent, label=DocItemLabel.CAPTION, text=caption_text ) elif node.macroname in ["footnote", "marginpar"]: footnote_text = self._extract_macro_arg(node) if footnote_text: doc.add_text( parent=parent, label=DocItemLabel.FOOTNOTE, text=footnote_text ) elif node.macroname == "includegraphics": img_path = self._extract_macro_arg(node) if img_path: image = None try: if isinstance(self.path_or_stream, Path): img_full_path = self.path_or_stream.parent / img_path if img_full_path.exists(): suffix = img_full_path.suffix.lower() if suffix == ".pdf": pdf = pypdfium2.PdfDocument(img_full_path) page = pdf[0] pil_image = page.render(scale=2).to_pil() page.close() pdf.close() dpi = 144 _log.debug( f"Rendered PDF image {img_path}: {pil_image.size}" ) else: pil_image = Image.open(img_full_path) dpi = pil_image.info.get("dpi", (72, 72)) if isinstance(dpi, tuple): dpi = dpi[0] _log.debug( f"Loaded image {img_path}: {pil_image.size}, DPI={dpi}" ) image = ImageRef.from_pil(image=pil_image, dpi=int(dpi)) except Exception as e: _log.debug(f"Could not load image {img_path}: {e}") caption = doc.add_text( label=DocItemLabel.CAPTION, text=f"Image: {img_path}" ) doc.add_picture( parent=parent, caption=caption, image=image, ) elif node.macroname == "\\": pass elif node.macroname in [ "documentclass", "usepackage", "geometry", "hypersetup", "lstset", "bibliographystyle", "newcommand", "renewcommand", "def", "let", "edef", "gdef", "xdef", "newenvironment", "renewenvironment", "DeclareMathOperator", "DeclareMathSymbol", "setlength", "setcounter", "addtolength", "color", "definecolor", "colorlet", "AtBeginDocument", "AtEndDocument", "newlength", "newcounter", "newif", "providecommand", "DeclareOption", "RequirePackage", "ProvidesPackage", "LoadClass", "makeatletter", "makeatother", "NeedsTeXFormat", "ProvidesClass", "DeclareRobustCommand", ]: pass elif node.macroname in ["input", "include"]: filepath = self._extract_macro_arg(node) if filepath and isinstance(self.path_or_stream, Path): input_path = self.path_or_stream.parent / filepath if not input_path.suffix: input_path = input_path.with_suffix(".tex") if input_path.exists(): try: content = input_path.read_text(encoding="utf-8") sub_walker = LatexWalker(content, tolerant_parsing=True) sub_nodes, _, _ = sub_walker.get_latex_nodes() self._process_nodes( sub_nodes, doc, parent, formatting, text_label ) _log.debug(f"Loaded input file: {input_path}") except Exception as e: _log.debug(f"Failed to load input file {filepath}: {e}") elif node.macroname in ["&", "%", "$", "#", "_", "{", "}"]: # Escaped symbols: \& -> & doc.add_text( parent=parent, text=node.macroname, formatting=formatting, label=(text_label or DocItemLabel.TEXT), ) elif node.macroname in [ "'", '"', "^", "`", "~", "=", ".", "c", "d", "b", "H", "k", "r", "t", "u", "v", ]: # Accents and diacritics try: text = LatexNodes2Text().nodelist_to_text([node]) doc.add_text( parent=parent, text=text, formatting=formatting, label=(text_label or DocItemLabel.TEXT), ) except Exception: pass elif node.macroname == "href": # \href{url}{text} if node.nodeargd and len(node.nodeargd.argnlist) >= 2: # url_arg = node.nodeargd.argnlist[0] text_arg = node.nodeargd.argnlist[1] if hasattr(text_arg, "nodelist"): self._process_nodes( text_arg.nodelist, doc, parent, formatting, text_label ) elif node.macroname in ["newline", "hfill", "break", "centering"]: if node.macroname == "newline": doc.add_text( parent=parent, text="\n", formatting=formatting, label=(text_label or DocItemLabel.TEXT), ) elif node.macroname in [ "bf", "it", "rm", "sc", "sf", "sl", "tt", "cal", "em", "tiny", "scriptsize", "footnotesize", "small", "large", "Large", "LARGE", "huge", "Huge", "color", # \color{red} - ignore color switch ]: # Legacy formatting and size switches - ignore to preserve content flow (prevent "Unknown macro" skip) pass elif node.macroname in ["textcolor", "colorbox"]: # \textcolor{color}{text} - process only the text content (last argument) if node.nodeargd and node.nodeargd.argnlist: # Find the last non-None argument (the text content) for arg in reversed(node.nodeargd.argnlist): if arg is not None and hasattr(arg, "nodelist"): self._process_nodes( arg.nodelist, doc, parent, formatting, text_label ) break elif node.macroname == "item": pass else: # Unknown macro - try to extract content from arguments if node.nodeargd and node.nodeargd.argnlist: processed_any = False for arg in node.nodeargd.argnlist: if hasattr(arg, "nodelist"): self._process_nodes( arg.nodelist, doc, parent, formatting, text_label ) processed_any = True if processed_any: _log.debug(f"Processed content of unknown macro: {node.macroname}") else: _log.debug(f"Skipping unknown macro: {node.macroname}") else: _log.debug(f"Skipping unknown macro: {node.macroname}") def _process_environment( self, node: LatexEnvironmentNode, doc: DoclingDocument, parent: Optional[NodeItem] = None, formatting: Optional[Formatting] = None, text_label: Optional[DocItemLabel] = None, ): """Process LaTeX environment nodes""" if node.envname == "document": self._process_nodes(node.nodelist, doc, parent, formatting, text_label) elif node.envname == "abstract": doc.add_heading(parent=parent, text="Abstract", level=1) self._process_nodes(node.nodelist, doc, parent, formatting, text_label) elif node.envname.replace("*", "") in [ "equation", "align", "gather", "multline", "flalign", "alignat", "displaymath", "eqnarray", ]: math_text = self._clean_math(node.latex_verbatim(), node.envname) doc.add_text(parent=parent, label=DocItemLabel.FORMULA, text=math_text) elif node.envname == "math": math_text = self._clean_math(node.latex_verbatim(), node.envname) doc.add_text(parent=parent, label=DocItemLabel.FORMULA, text=math_text) elif node.envname in ["itemize", "enumerate", "description"]: self._process_list(node, doc, parent, formatting, text_label) elif node.envname == "tabular": table_data = self._parse_table(node) if table_data: doc.add_table(parent=parent, data=table_data) elif node.envname in ["table", "table*"]: self._process_nodes(node.nodelist, doc, parent, formatting, text_label) elif node.envname in ["figure", "figure*"]: # Process figure environment with proper grouping self._process_figure(node, doc, parent, formatting, text_label) elif node.envname in ["verbatim", "lstlisting", "minted"]: code_text = self._extract_verbatim_content( node.latex_verbatim(), node.envname ) doc.add_text(parent=parent, label=DocItemLabel.CODE, text=code_text) elif node.envname == "thebibliography": doc.add_heading(parent=parent, text="References", level=1) self._process_bibliography(node, doc, parent, formatting) elif node.envname in ["filecontents", "filecontents*"]: pass else: self._process_nodes(node.nodelist, doc, parent, formatting, text_label) def _process_figure( self, node: LatexEnvironmentNode, doc: DoclingDocument, parent: Optional[NodeItem] = None, formatting: Optional[Formatting] = None, text_label: Optional[DocItemLabel] = None, ): """Process figure environment with proper grouping""" # Create a group for the figure to contain images and captions together figure_group = doc.add_group( parent=parent, name="figure", label=GroupLabel.SECTION ) # Process all nodes within the figure self._process_nodes(node.nodelist, doc, figure_group, formatting, text_label) def _process_list( self, node: LatexEnvironmentNode, doc: DoclingDocument, parent: Optional[NodeItem] = None, formatting: Optional[Formatting] = None, text_label: Optional[DocItemLabel] = None, ): """Process itemize/enumerate environments""" list_group = doc.add_group(parent=parent, name="list", label=GroupLabel.LIST) items = [] current_item: list = [] for n in node.nodelist: if isinstance(n, LatexMacroNode) and n.macroname == "item": if current_item: items.append(current_item) current_item = [] if n.nodeargd and n.nodeargd.argnlist: current_item.append(n) else: current_item.append(n) if current_item: items.append(current_item) for item_nodes in items: self._process_nodes( item_nodes, doc, list_group, formatting, text_label=DocItemLabel.LIST_ITEM, ) def _process_table_macro_node( self, n: LatexMacroNode, source_latex: str, current_cell_nodes: List, finish_cell_fn: Callable[..., None], finish_row_fn: Callable[[], None], parse_brace_args_fn: Callable[[str], List[str]], ): if n.macroname == "\\": # Row break finish_row_fn() elif n.macroname == "multicolumn": # \multicolumn{num_cols}{alignment}{content} # Extract from source using node position if hasattr(n, "pos") and n.pos is not None: remaining = source_latex[n.pos :] args = parse_brace_args_fn(remaining) if len(args) >= 3: try: num_cols = int(args[0]) except (ValueError, TypeError): num_cols = 1 content_text = args[2] # Skip alignment arg[1] if content_text: current_cell_nodes.append(LatexCharsNode(chars=content_text)) finish_cell_fn(col_span=num_cols) else: # Fallback current_cell_nodes.append(n) else: current_cell_nodes.append(n) elif n.macroname == "multirow": # \multirow{num_rows}{width}{content} if hasattr(n, "pos") and n.pos is not None: remaining = source_latex[n.pos :] args = parse_brace_args_fn(remaining) if len(args) >= 3: try: num_rows = int(args[0]) except (ValueError, TypeError): num_rows = 1 content_text = args[2] # Skip width arg[1] if content_text: current_cell_nodes.append(LatexCharsNode(chars=content_text)) finish_cell_fn(row_span=num_rows) else: # Fallback current_cell_nodes.append(n) else: current_cell_nodes.append(n) elif n.macroname in [ "hline", "cline", "toprule", "midrule", "bottomrule", "cmidrule", "specialrule", ]: # Ignore rule lines for data extraction pass elif n.macroname in [ "rule", "vspace", "hspace", "vskip", "hskip", "smallskip", "medskip", "bigskip", "strut", "phantom", "hphantom", "vphantom", "noalign", ]: # Ignore formatting commands - don't add to cell content pass elif n.macroname == "&": # Cell break (if parsed as macro) finish_cell_fn() elif n.macroname in ["%", "$", "#", "_", "{", "}"]: # Escaped characters - add to current cell current_cell_nodes.append(n) else: current_cell_nodes.append(n) def _parse_table(self, node: LatexEnvironmentNode) -> Optional[TableData]: """Parse tabular environment into TableData with multicolumn/multirow support""" rows = [] current_row = [] current_cell_nodes: list = [] # Get source latex for parsing multicolumn/multirow # These macros don't have their args parsed by pylatexenc by default source_latex = node.latex_verbatim() def parse_brace_args(text: str) -> list: """Parse {arg1}{arg2}{arg3} from text, return list of args""" args = [] i = 0 while i < len(text): if text[i] == "{": depth = 1 start = i + 1 i += 1 while i < len(text) and depth > 0: if text[i] == "{": depth += 1 elif text[i] == "}": depth -= 1 i += 1 args.append(text[start : i - 1]) else: i += 1 return args def finish_cell(col_span: int = 1, row_span: int = 1): """Finish current cell with optional spanning""" text = self._nodes_to_text(current_cell_nodes).strip() cell = TableCell( text=text, start_row_offset_idx=0, end_row_offset_idx=0, start_col_offset_idx=0, end_col_offset_idx=0, ) # Store span info temporarily (will be set properly later) cell._col_span = col_span # type: ignore[attr-defined] cell._row_span = row_span # type: ignore[attr-defined] current_row.append(cell) current_cell_nodes.clear() # Add placeholder cells for column span for _ in range(col_span - 1): placeholder = TableCell( text="", start_row_offset_idx=0, end_row_offset_idx=0, start_col_offset_idx=0, end_col_offset_idx=0, ) placeholder._is_placeholder = True # type: ignore[attr-defined] current_row.append(placeholder) def finish_row(): """Finish current row and handle multirow placeholders""" if current_cell_nodes: finish_cell() # Finish the last cell of the row if current_row: rows.append(current_row[:]) # Copy current_row.clear() # Guard against None nodelist if node.nodelist is None: return None for n in node.nodelist: if isinstance(n, LatexMacroNode): self._process_table_macro_node( n, source_latex, current_cell_nodes, finish_cell, finish_row, parse_brace_args, ) elif isinstance(n, LatexCharsNode): text = n.chars if "&" in text: parts = text.split("&") for i, part in enumerate(parts): if part: # Add text node for the part current_cell_nodes.append(LatexCharsNode(chars=part)) if i < len(parts) - 1: finish_cell() else: current_cell_nodes.append(n) else: if hasattr(n, "specials_chars") and n.specials_chars == "&": finish_cell() else: current_cell_nodes.append(n) finish_row() if not rows: return None # Calculate dimensions num_rows = len(rows) num_cols = max(len(row) for row in rows) if rows else 0 # Build flat cell list with proper indices and spans flat_cells = [] for i, row in enumerate(rows): for j in range(num_cols): if j < len(row): cell = row[j] # Skip placeholder cells created by multicolumn if getattr(cell, "_is_placeholder", False): continue else: cell = TableCell( text="", start_row_offset_idx=0, end_row_offset_idx=0, start_col_offset_idx=0, end_col_offset_idx=0, ) # Set row/col indices cell.start_row_offset_idx = i cell.start_col_offset_idx = j # Apply spans if stored col_span = getattr(cell, "_col_span", 1) row_span = getattr(cell, "_row_span", 1) cell.end_row_offset_idx = i + row_span cell.end_col_offset_idx = j + col_span flat_cells.append(cell) return TableData(num_rows=num_rows, num_cols=num_cols, table_cells=flat_cells) def _extract_verbatim_content(self, latex_str: str, env_name: str) -> str: """Extract content from verbatim environments""" pattern = rf"\\begin\{{{env_name}\}}.*?(.*?)\\end\{{{env_name}\}}" match = re.search(pattern, latex_str, re.DOTALL) if match: return match.group(1).strip() return latex_str def _process_bibliography( self, node: LatexEnvironmentNode, doc: DoclingDocument, parent: Optional[NodeItem] = None, formatting: Optional[Formatting] = None, ): """Process bibliography environment""" bib_group = doc.add_group( parent=parent, name="bibliography", label=GroupLabel.LIST ) items = [] current_item: list = [] current_key = "" # Pre process to group by bibitem for n in node.nodelist: if isinstance(n, LatexMacroNode) and n.macroname == "bibitem": if current_item: items.append((current_key, current_item)) current_item = [] current_key = self._extract_macro_arg(n) else: current_item.append(n) if current_item: items.append((current_key, current_item)) for key, item_nodes in items: if key: doc.add_text( parent=bib_group, label=DocItemLabel.LIST_ITEM, text=f"[{key}] ", formatting=formatting, ) self._process_nodes( item_nodes, doc, bib_group, formatting, text_label=DocItemLabel.LIST_ITEM, ) def _nodes_to_text(self, nodes) -> str: """Convert a list of nodes to plain text""" text_parts = [] for node in nodes: if isinstance(node, LatexCharsNode): text_parts.append(node.chars) elif isinstance(node, LatexGroupNode): text_parts.append(self._nodes_to_text(node.nodelist)) elif isinstance(node, LatexMacroNode): if node.macroname in [ "textbf", "textit", "emph", "texttt", "underline", ]: text = self._extract_macro_arg(node) if text: text_parts.append(text) elif node.macroname in ["cite", "citep", "citet", "ref", "eqref"]: text_parts.append(node.latex_verbatim()) elif node.macroname == "\\": text_parts.append("\n") elif node.macroname in ["~"]: text_parts.append(" ") elif node.macroname == "item": if node.nodeargd and node.nodeargd.argnlist: arg = node.nodeargd.argnlist[0] if arg: opt_text = arg.latex_verbatim().strip("[] ") text_parts.append(f"{opt_text}: ") elif node.macroname in ["%", "$", "&", "#", "_", "{", "}"]: # Escaped characters text_parts.append(node.macroname) # Handle custom macros in _nodes_to_text as well elif node.macroname in self._custom_macros: expansion = self._custom_macros[node.macroname] text_parts.append(expansion) else: # Unknown macro - extract all arguments inline arg_parts = [] if node.nodeargd and node.nodeargd.argnlist: for arg in node.nodeargd.argnlist: if arg is not None: if hasattr(arg, "nodelist"): text = self._nodes_to_text(arg.nodelist) if text: arg_parts.append(text) else: text = arg.latex_verbatim().strip("{} ") if text: arg_parts.append(text) if arg_parts: text_parts.append(" ".join(arg_parts)) elif isinstance(node, LatexMathNode): # Expand custom macros in math for KaTeX compatibility text_parts.append(self._expand_macros(node.latex_verbatim())) elif isinstance(node, LatexEnvironmentNode): if node.envname in ["equation", "align", "gather"]: text_parts.append(node.latex_verbatim()) else: text_parts.append(self._nodes_to_text(node.nodelist)) result = "".join(text_parts) result = re.sub(r" +", " ", result) result = re.sub(r"\n\n+", "\n\n", result) return result.strip() def _extract_macro_arg(self, node: LatexMacroNode) -> str: """Extract text from macro argument (last argument only)""" if node.nodeargd and node.nodeargd.argnlist: arg = node.nodeargd.argnlist[-1] if arg: if hasattr(arg, "nodelist"): return self._nodes_to_text(arg.nodelist) return arg.latex_verbatim().strip("{} ") return "" def _extract_macro_arg_by_index(self, node: LatexMacroNode, index: int) -> str: """Extract text from macro argument by index (0-based)""" if node.nodeargd and node.nodeargd.argnlist: if 0 <= index < len(node.nodeargd.argnlist): arg = node.nodeargd.argnlist[index] if arg: if hasattr(arg, "nodelist"): return self._nodes_to_text(arg.nodelist) return arg.latex_verbatim().strip("{} ") return "" def _extract_macro_arg_nodes(self, node: LatexMacroNode, index: int) -> list: """Extract node list from macro argument by index (0-based)""" if node.nodeargd and node.nodeargd.argnlist: if 0 <= index < len(node.nodeargd.argnlist): arg = node.nodeargd.argnlist[index] if arg and hasattr(arg, "nodelist"): return arg.nodelist return [] def _extract_all_macro_args_inline(self, node: LatexMacroNode) -> str: """Extract all macro arguments as inline text, concatenated with spaces.""" if not node.nodeargd or not node.nodeargd.argnlist: return "" parts = [] for arg in node.nodeargd.argnlist: if arg is not None: if hasattr(arg, "nodelist"): text = self._nodes_to_text(arg.nodelist) if text: parts.append(text) else: text = arg.latex_verbatim().strip("{} ") if text: parts.append(text) return " ".join(parts) def _is_text_only_group(self, node: LatexGroupNode) -> bool: """Check if a group contains only text-like content (no structural elements).""" if not node.nodelist: return True # Macros that indicate structural content structural_macros = { "section", "subsection", "subsubsection", "chapter", "part", "caption", "label", "includegraphics", "bibliography", "title", "author", "maketitle", "footnote", "marginpar", } for n in node.nodelist: if isinstance(n, LatexEnvironmentNode): # Environments are usually structural return False elif isinstance(n, LatexMacroNode): if n.macroname in structural_macros: return False elif isinstance(n, LatexGroupNode): # Recursively check nested groups if not self._is_text_only_group(n): return False return True def _expand_macros(self, latex_str: str) -> str: """Expand custom macros in LaTeX string for KaTeX/MathJax compatibility""" for macro_name, macro_def in self._custom_macros.items(): # Replace \macroname with its definition (word boundary to avoid partial matches) # Use lambda to avoid backslash interpretation in replacement string latex_str = re.sub( rf"\\{re.escape(macro_name)}(?![a-zA-Z])", lambda m: macro_def, latex_str, ) return latex_str def _clean_math(self, latex_str: str, env_name: str) -> str: """Clean math expressions for better readability""" envs_to_strip = [ "equation", "equation*", "displaymath", "math", "eqnarray", "eqnarray*", ] if env_name in envs_to_strip: pattern = rf"\\begin\{{{re.escape(env_name)}\}}(.*?)\\end\{{{re.escape(env_name)}\}}" match = re.search(pattern, latex_str, re.DOTALL) if match: latex_str = match.group(1) latex_str = latex_str.strip() if latex_str.startswith("$$") and latex_str.endswith("$$"): latex_str = latex_str[2:-2] elif latex_str.startswith("$") and latex_str.endswith("$"): latex_str = latex_str[1:-1] elif latex_str.startswith("\\[") and latex_str.endswith("\\]"): latex_str = latex_str[2:-2] elif latex_str.startswith("\\(") and latex_str.endswith("\\)"): latex_str = latex_str[2:-2] latex_str = re.sub(r"\\label\{.*?\}", "", latex_str) # Expand custom macros for KaTeX/MathJax compatibility latex_str = self._expand_macros(latex_str) return latex_str.strip() def _get_heading_level(self, macroname: str) -> int: """Get heading level for sectioning commands""" levels = { "part": 1, "chapter": 1, "section": 1, "subsection": 2, "subsubsection": 3, "paragraph": 4, } return levels.get(macroname, 1)
{ "repo_id": "docling-project/docling", "file_path": "docling/backend/latex_backend.py", "license": "MIT License", "lines": 1228, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
docling-project/docling:tests/test_backend_latex.py
from io import BytesIO from pathlib import Path import pytest from docling_core.types.doc import DocItemLabel, GroupLabel from docling.backend.latex_backend import LatexDocumentBackend from docling.datamodel.base_models import InputFormat from docling.datamodel.document import ConversionResult, DoclingDocument, InputDocument from docling.document_converter import DocumentConverter from .test_data_gen_flag import GEN_TEST_DATA from .verify_utils import verify_document, verify_export GENERATE = GEN_TEST_DATA LATEX_DATA_DIR = Path("./tests/data/latex/") def test_latex_basic_conversion(): latex_content = b""" \\documentclass{article} \\begin{document} \\section{Introduction} Hello World. \\end{document} """ in_doc = InputDocument( path_or_stream=BytesIO(latex_content), format=InputFormat.LATEX, backend=LatexDocumentBackend, filename="test.tex", ) backend = LatexDocumentBackend(in_doc=in_doc, path_or_stream=BytesIO(latex_content)) doc = backend.convert() assert len(doc.texts) > 0 # Check structure headers = [t for t in doc.texts if t.label == DocItemLabel.SECTION_HEADER] paragraphs = [t for t in doc.texts if t.label != DocItemLabel.SECTION_HEADER] assert len(headers) == 1 assert headers[0].text == "Introduction" assert "Hello World" in paragraphs[0].text def test_latex_preamble_filter(): latex_content = b""" \\documentclass{article} \\usepackage{test} \\title{Ignored Title} \\begin{document} Real Content \\end{document} """ in_doc = InputDocument( path_or_stream=BytesIO(latex_content), format=InputFormat.LATEX, backend=LatexDocumentBackend, filename="test.tex", ) backend = LatexDocumentBackend(in_doc=in_doc, path_or_stream=BytesIO(latex_content)) doc = backend.convert() # Title in preamble should be ignored by the backend (unless we explicitly parse it, which current logic doesn't for simplistic Document extraction) # The current logic filters for 'document' environment, so "Real Content" should be there, "Ignored Title" should not (if inside structure but outside document env) full_text = doc.export_to_markdown() assert "Real Content" in full_text assert "Ignored Title" not in full_text assert "usepackage" not in full_text def test_latex_table_parsing(): latex_content = b""" \\documentclass{article} \\begin{document} \\begin{tabular}{cc} Header1 & Header2 \\\\ Row1Col1 & Row1Col2 \\\\ Row2Col1 & \\%Escaped \\end{tabular} \\end{document} """ in_doc = InputDocument( path_or_stream=BytesIO(latex_content), format=InputFormat.LATEX, backend=LatexDocumentBackend, filename="test.tex", ) backend = LatexDocumentBackend(in_doc=in_doc, path_or_stream=BytesIO(latex_content)) doc = backend.convert() assert len(doc.tables) == 1 table = doc.tables[0] assert table.data.num_rows == 3 assert table.data.num_cols == 2 # Check content cells = [c.text.strip() for c in table.data.table_cells] assert "Header1" in cells assert "row1col1" not in cells # Case sensitivity check (should preserve) assert "Row1Col1" in cells assert "%Escaped" in cells # Should be unescaped or at least cleanly parsed def test_latex_math_parsing(): # Test align environment (starred and unstarred) and inline/display math latex_content = rb""" \documentclass{article} \begin{document} Inline math: $E=mc^2$. Display math: $$ x = \frac{-b \pm \sqrt{b^2-4ac}}{2a} $$ Aligned equations: \begin{align} a &= b + c \\ d &= e + f \end{align} \end{document} """ in_doc = InputDocument( path_or_stream=BytesIO(latex_content), format=InputFormat.LATEX, backend=LatexDocumentBackend, filename="test.tex", ) backend = LatexDocumentBackend(in_doc=in_doc, path_or_stream=BytesIO(latex_content)) doc = backend.convert() formulas = [t for t in doc.texts if t.label == DocItemLabel.FORMULA] assert len(formulas) >= 2 # Display math and Align environment # Inline math should be part of the paragraph text paragraphs = [ t for t in doc.texts if t.label in [DocItemLabel.PARAGRAPH, DocItemLabel.TEXT] ] full_text = " ".join([p.text for p in paragraphs]) assert "$E=mc^2$" in full_text md = doc.export_to_markdown() # Check delimiters assert "$E=mc^2$" in md or r"\( E=mc^2 \)" in md assert r"\frac" in md assert r"\begin{align}" in md # Should preserve align tag for proper rendering def test_latex_escaped_chars(): # Test correct handling of escaped chars to ensure text isn't split latex_content = b""" \\documentclass{article} \\begin{document} value is 23\\% which is high. Costs \\$100. \\end{document} """ in_doc = InputDocument( path_or_stream=BytesIO(latex_content), format=InputFormat.LATEX, backend=LatexDocumentBackend, filename="test.tex", ) backend = LatexDocumentBackend(in_doc=in_doc, path_or_stream=BytesIO(latex_content)) doc = backend.convert() text_items = [ t.text for t in doc.texts if t.label == DocItemLabel.TEXT or t.label == DocItemLabel.PARAGRAPH ] full_text = " ".join(text_items) # "23%" should be together, not "23" and "%" split assert "23%" in full_text or "23\\%" in full_text # Should not have loose "%" newline assert "which is high" in full_text assert "$100" in full_text or "\\$100" in full_text def test_latex_unknown_macro_fallback(): latex_content = b""" \\documentclass{article} \\begin{document} \\unknownmacro{Known Content} \\end{document} """ in_doc = InputDocument( path_or_stream=BytesIO(latex_content), format=InputFormat.LATEX, backend=LatexDocumentBackend, filename="test.tex", ) backend = LatexDocumentBackend(in_doc=in_doc, path_or_stream=BytesIO(latex_content)) doc = backend.convert() md = doc.export_to_markdown() assert "Known Content" in md def test_latex_abstract_environment(): """Test abstract environment parsing""" latex_content = b""" \\documentclass{article} \\begin{document} \\begin{abstract} This is the abstract content. \\end{abstract} \\end{document} """ in_doc = InputDocument( path_or_stream=BytesIO(latex_content), format=InputFormat.LATEX, backend=LatexDocumentBackend, filename="test.tex", ) backend = LatexDocumentBackend(in_doc=in_doc, path_or_stream=BytesIO(latex_content)) doc = backend.convert() md = doc.export_to_markdown() assert "Abstract" in md assert "abstract content" in md def test_latex_list_itemize(): """Test itemize list environment""" latex_content = b""" \\documentclass{article} \\begin{document} \\begin{itemize} \\item First item \\item Second item \\item Third item \\end{itemize} \\end{document} """ in_doc = InputDocument( path_or_stream=BytesIO(latex_content), format=InputFormat.LATEX, backend=LatexDocumentBackend, filename="test.tex", ) backend = LatexDocumentBackend(in_doc=in_doc, path_or_stream=BytesIO(latex_content)) doc = backend.convert() list_items = [t for t in doc.texts if t.label == DocItemLabel.LIST_ITEM] assert len(list_items) >= 3 item_texts = [item.text for item in list_items] assert any("First item" in t for t in item_texts) assert any("Second item" in t for t in item_texts) def test_latex_list_enumerate(): """Test enumerate list environment""" latex_content = b""" \\documentclass{article} \\begin{document} \\begin{enumerate} \\item Alpha \\item Beta \\end{enumerate} \\end{document} """ in_doc = InputDocument( path_or_stream=BytesIO(latex_content), format=InputFormat.LATEX, backend=LatexDocumentBackend, filename="test.tex", ) backend = LatexDocumentBackend(in_doc=in_doc, path_or_stream=BytesIO(latex_content)) doc = backend.convert() list_items = [t for t in doc.texts if t.label == DocItemLabel.LIST_ITEM] assert len(list_items) >= 2 def test_latex_description_list(): """Test description list with optional item labels""" latex_content = b""" \\documentclass{article} \\begin{document} \\begin{description} \\item[Term1] Definition one \\item[Term2] Definition two \\end{description} \\end{document} """ in_doc = InputDocument( path_or_stream=BytesIO(latex_content), format=InputFormat.LATEX, backend=LatexDocumentBackend, filename="test.tex", ) backend = LatexDocumentBackend(in_doc=in_doc, path_or_stream=BytesIO(latex_content)) doc = backend.convert() list_items = [t for t in doc.texts if t.label == DocItemLabel.LIST_ITEM] assert len(list_items) >= 2 def test_latex_verbatim_environment(): """Test verbatim code environment""" latex_content = b""" \\documentclass{article} \\begin{document} \\begin{verbatim} def hello(): print("world") \\end{verbatim} \\end{document} """ in_doc = InputDocument( path_or_stream=BytesIO(latex_content), format=InputFormat.LATEX, backend=LatexDocumentBackend, filename="test.tex", ) backend = LatexDocumentBackend(in_doc=in_doc, path_or_stream=BytesIO(latex_content)) doc = backend.convert() code_items = [t for t in doc.texts if t.label == DocItemLabel.CODE] assert len(code_items) >= 1 assert "hello" in code_items[0].text or "print" in code_items[0].text def test_latex_lstlisting_environment(): """Test lstlisting code environment""" latex_content = b""" \\documentclass{article} \\begin{document} \\begin{lstlisting} int main() { return 0; } \\end{lstlisting} \\end{document} """ in_doc = InputDocument( path_or_stream=BytesIO(latex_content), format=InputFormat.LATEX, backend=LatexDocumentBackend, filename="test.tex", ) backend = LatexDocumentBackend(in_doc=in_doc, path_or_stream=BytesIO(latex_content)) doc = backend.convert() code_items = [t for t in doc.texts if t.label == DocItemLabel.CODE] assert len(code_items) >= 1 def test_latex_bibliography(): """Test bibliography environment parsing""" latex_content = b""" \\documentclass{article} \\begin{document} Some text. \\begin{thebibliography}{9} \\bibitem{ref1} Author One, Title One, 2020. \\bibitem{ref2} Author Two, Title Two, 2021. \\end{thebibliography} \\end{document} """ in_doc = InputDocument( path_or_stream=BytesIO(latex_content), format=InputFormat.LATEX, backend=LatexDocumentBackend, filename="test.tex", ) backend = LatexDocumentBackend(in_doc=in_doc, path_or_stream=BytesIO(latex_content)) doc = backend.convert() md = doc.export_to_markdown() assert "References" in md def test_latex_caption(): """Test caption macro parsing via includegraphics""" latex_content = b""" \\documentclass{article} \\begin{document} \\includegraphics{test.png} \\end{document} """ in_doc = InputDocument( path_or_stream=BytesIO(latex_content), format=InputFormat.LATEX, backend=LatexDocumentBackend, filename="test.tex", ) backend = LatexDocumentBackend(in_doc=in_doc, path_or_stream=BytesIO(latex_content)) doc = backend.convert() # includegraphics creates a caption with the image path caption_items = [t for t in doc.texts if t.label == DocItemLabel.CAPTION] assert len(caption_items) >= 1 assert "test.png" in caption_items[0].text def test_latex_footnote(): """Test footnote macro parsing""" latex_content = b""" \\documentclass{article} \\begin{document} Main text\\footnote{This is a footnote}. \\end{document} """ in_doc = InputDocument( path_or_stream=BytesIO(latex_content), format=InputFormat.LATEX, backend=LatexDocumentBackend, filename="test.tex", ) backend = LatexDocumentBackend(in_doc=in_doc, path_or_stream=BytesIO(latex_content)) doc = backend.convert() footnote_items = [t for t in doc.texts if t.label == DocItemLabel.FOOTNOTE] assert len(footnote_items) >= 1 assert "footnote" in footnote_items[0].text def test_latex_citet_macro(): """Test citet macro (textual citation)""" latex_content = b""" \\documentclass{article} \\begin{document} According to \\citet{author2020}, this is correct. \\end{document} """ in_doc = InputDocument( path_or_stream=BytesIO(latex_content), format=InputFormat.LATEX, backend=LatexDocumentBackend, filename="test.tex", ) backend = LatexDocumentBackend(in_doc=in_doc, path_or_stream=BytesIO(latex_content)) doc = backend.convert() # Citations are now inline with text md = doc.export_to_markdown() assert "[author2020]" in md assert "According to" in md def test_latex_list_nested(): """Test nested lists (itemize within itemize, enumerate within itemize)""" latex_content = b""" \\documentclass{article} \\begin{document} \\begin{itemize} \\item Outer item one \\item Outer item two \\begin{itemize} \\item Inner item A \\item Inner item B \\end{itemize} \\item Outer item three \\begin{enumerate} \\item Numbered inner 1 \\item Numbered inner 2 \\end{enumerate} \\end{itemize} \\end{document} """ in_doc = InputDocument( path_or_stream=BytesIO(latex_content), format=InputFormat.LATEX, backend=LatexDocumentBackend, filename="test.tex", ) backend = LatexDocumentBackend(in_doc=in_doc, path_or_stream=BytesIO(latex_content)) doc = backend.convert() # Check that we have list groups list_groups = [g for g in doc.groups if g.label == GroupLabel.LIST] assert len(list_groups) >= 1 # At least the outer list # Check that list items exist # Note: Current implementation merges nested list items into their parent items list_items = [t for t in doc.texts if t.label == DocItemLabel.LIST_ITEM] assert len(list_items) >= 3 # 3 outer items (nested items are merged) # Verify some item content - nested items should appear within outer items item_texts = [item.text for item in list_items] assert any("Outer item one" in t for t in item_texts) # Nested items appear in the outer item text assert any("Inner item A" in t or "Inner item B" in t for t in item_texts) assert any("Numbered inner 1" in t or "Numbered inner 2" in t for t in item_texts) def test_latex_label(): """Test label macro parsing""" latex_content = b""" \\documentclass{article} \\begin{document} \\section{Introduction} \\label{sec:intro} Some content. \\end{document} """ in_doc = InputDocument( path_or_stream=BytesIO(latex_content), format=InputFormat.LATEX, backend=LatexDocumentBackend, filename="test.tex", ) backend = LatexDocumentBackend(in_doc=in_doc, path_or_stream=BytesIO(latex_content)) backend.convert() # Labels are stored internally assert "sec:intro" in backend.labels def test_latex_includegraphics(): """Test includegraphics with actual image file""" import tempfile from pathlib import Path from PIL import Image as PILImage # Create a temporary directory and test image with tempfile.TemporaryDirectory() as tmpdir: tmpdir_path = Path(tmpdir) tex_file = tmpdir_path / "test.tex" img_file = tmpdir_path / "test_image.png" # Create a simple test image with known DPI test_img = PILImage.new("RGB", (100, 50), color="red") test_img.save(img_file, dpi=(96, 96)) latex_content = b""" \\documentclass{article} \\begin{document} \\includegraphics{test_image.png} \\end{document} """ # Write LaTeX content to file tex_file.write_bytes(latex_content) in_doc = InputDocument( path_or_stream=tex_file, format=InputFormat.LATEX, backend=LatexDocumentBackend, filename="test.tex", ) backend = LatexDocumentBackend(in_doc=in_doc, path_or_stream=tex_file) doc = backend.convert() # Verify picture was created assert len(doc.pictures) >= 1 picture = doc.pictures[0] # Verify image was embedded (not None) assert picture.image is not None # Verify caption was created assert len(picture.captions) >= 1 assert "test_image.png" in picture.captions[0].resolve(doc).text def test_latex_includegraphics_missing_image(): """Test includegraphics gracefully handles missing images""" latex_content = b""" \\documentclass{article} \\begin{document} \\includegraphics{nonexistent_image.png} \\end{document} """ in_doc = InputDocument( path_or_stream=BytesIO(latex_content), format=InputFormat.LATEX, backend=LatexDocumentBackend, filename="test.tex", ) backend = LatexDocumentBackend(in_doc=in_doc, path_or_stream=BytesIO(latex_content)) doc = backend.convert() # Picture should still be created with caption assert len(doc.pictures) >= 1 picture = doc.pictures[0] # Image should be None (couldn't load) assert picture.image is None # Caption should still exist assert len(picture.captions) >= 1 assert "nonexistent_image.png" in picture.captions[0].resolve(doc).text def test_latex_citations(): """Test cite macros parsing""" latex_content = b""" \\documentclass{article} \\begin{document} As shown in \\cite{smith2020} and \\citep{jones2021}. Also see \\ref{fig:1} and \\eqref{eq:main}. \\end{document} """ in_doc = InputDocument( path_or_stream=BytesIO(latex_content), format=InputFormat.LATEX, backend=LatexDocumentBackend, filename="test.tex", ) backend = LatexDocumentBackend(in_doc=in_doc, path_or_stream=BytesIO(latex_content)) doc = backend.convert() # Citations are now inline with text md = doc.export_to_markdown() assert "[smith2020]" in md assert "[jones2021]" in md assert "[fig:1]" in md assert "[eq:main]" in md def test_latex_title_macro(): """Test title macro inside document""" latex_content = b""" \\documentclass{article} \\begin{document} \\title{Document Title} \\maketitle Some content. \\end{document} """ in_doc = InputDocument( path_or_stream=BytesIO(latex_content), format=InputFormat.LATEX, backend=LatexDocumentBackend, filename="test.tex", ) backend = LatexDocumentBackend(in_doc=in_doc, path_or_stream=BytesIO(latex_content)) doc = backend.convert() title_items = [t for t in doc.texts if t.label == DocItemLabel.TITLE] assert len(title_items) >= 1 def test_latex_various_math_environments(): """Test various math environments""" latex_content = rb""" \documentclass{article} \begin{document} Equation starred: \begin{equation*} a = b \end{equation*} Gather: \begin{gather} x = y \\ z = w \end{gather} Multline: \begin{multline} first \\ second \end{multline} \end{document} """ in_doc = InputDocument( path_or_stream=BytesIO(latex_content), format=InputFormat.LATEX, backend=LatexDocumentBackend, filename="test.tex", ) backend = LatexDocumentBackend(in_doc=in_doc, path_or_stream=BytesIO(latex_content)) doc = backend.convert() formulas = [t for t in doc.texts if t.label == DocItemLabel.FORMULA] assert len(formulas) >= 3 def test_latex_heading_levels(): """Test different heading levels""" latex_content = b""" \\documentclass{article} \\begin{document} \\part{Part One} \\chapter{Chapter One} \\section{Section One} \\subsection{Subsection One} \\subsubsection{Subsubsection One} \\end{document} """ in_doc = InputDocument( path_or_stream=BytesIO(latex_content), format=InputFormat.LATEX, backend=LatexDocumentBackend, filename="test.tex", ) backend = LatexDocumentBackend(in_doc=in_doc, path_or_stream=BytesIO(latex_content)) doc = backend.convert() headers = [t for t in doc.texts if t.label == DocItemLabel.SECTION_HEADER] assert len(headers) >= 3 def test_latex_text_formatting(): """Test text formatting macros""" latex_content = b""" \\documentclass{article} \\begin{document} This is \\textbf{bold} and \\textit{italic} and \\emph{emphasized}. Also \\texttt{monospace} and \\underline{underlined}. \\end{document} """ in_doc = InputDocument( path_or_stream=BytesIO(latex_content), format=InputFormat.LATEX, backend=LatexDocumentBackend, filename="test.tex", ) backend = LatexDocumentBackend(in_doc=in_doc, path_or_stream=BytesIO(latex_content)) doc = backend.convert() md = doc.export_to_markdown() assert "bold" in md assert "italic" in md assert "emphasized" in md def test_latex_table_environment(): """Test table environment (wrapper around tabular)""" latex_content = b""" \\documentclass{article} \\begin{document} \\begin{table} \\begin{tabular}{cc} A & B \\\\ C & D \\end{tabular} \\caption{Sample table} \\end{table} \\end{document} """ in_doc = InputDocument( path_or_stream=BytesIO(latex_content), format=InputFormat.LATEX, backend=LatexDocumentBackend, filename="test.tex", ) backend = LatexDocumentBackend(in_doc=in_doc, path_or_stream=BytesIO(latex_content)) doc = backend.convert() assert len(doc.tables) >= 1 def test_latex_figure_environment(): """Test figure environment parsing""" latex_content = b""" \\documentclass{article} \\begin{document} \\begin{figure} \\includegraphics{test.png} \\caption{Test figure} \\end{figure} \\end{document} """ in_doc = InputDocument( path_or_stream=BytesIO(latex_content), format=InputFormat.LATEX, backend=LatexDocumentBackend, filename="test.tex", ) backend = LatexDocumentBackend(in_doc=in_doc, path_or_stream=BytesIO(latex_content)) doc = backend.convert() assert len(doc.pictures) >= 1 captions = [t for t in doc.texts if t.label == DocItemLabel.CAPTION] assert len(captions) >= 1 def test_latex_is_valid(): """Test is_valid method""" # Valid document latex_content = b"\\documentclass{article}\\begin{document}Content\\end{document}" in_doc = InputDocument( path_or_stream=BytesIO(latex_content), format=InputFormat.LATEX, backend=LatexDocumentBackend, filename="test.tex", ) backend = LatexDocumentBackend(in_doc=in_doc, path_or_stream=BytesIO(latex_content)) assert backend.is_valid() is True # Empty document empty_content = b" " in_doc_empty = InputDocument( path_or_stream=BytesIO(empty_content), format=InputFormat.LATEX, backend=LatexDocumentBackend, filename="empty.tex", ) backend_empty = LatexDocumentBackend( in_doc=in_doc_empty, path_or_stream=BytesIO(empty_content) ) assert backend_empty.is_valid() is False def test_latex_supports_pagination(): """Test supports_pagination class method""" assert LatexDocumentBackend.supports_pagination() is False def test_latex_supported_formats(): """Test supported_formats class method""" formats = LatexDocumentBackend.supported_formats() assert InputFormat.LATEX in formats def test_latex_file_path_loading(tmp_path): """Test loading LaTeX from file path instead of BytesIO""" latex_file = tmp_path / "test.tex" latex_file.write_text( r""" \documentclass{article} \begin{document} File content here. \end{document} """ ) in_doc = InputDocument( path_or_stream=latex_file, format=InputFormat.LATEX, backend=LatexDocumentBackend, filename="test.tex", ) backend = LatexDocumentBackend(in_doc=in_doc, path_or_stream=latex_file) doc = backend.convert() md = doc.export_to_markdown() assert "File content here" in md def test_latex_empty_table(): """Test table with no parseable content""" latex_content = b""" \\documentclass{article} \\begin{document} \\begin{tabular}{cc} \\end{tabular} \\end{document} """ in_doc = InputDocument( path_or_stream=BytesIO(latex_content), format=InputFormat.LATEX, backend=LatexDocumentBackend, filename="test.tex", ) backend = LatexDocumentBackend(in_doc=in_doc, path_or_stream=BytesIO(latex_content)) doc = backend.convert() assert doc is not None def test_latex_marginpar(): """Test marginpar macro is handled without error""" latex_content = b""" \\documentclass{article} \\begin{document} Main text with marginpar. \\end{document} """ in_doc = InputDocument( path_or_stream=BytesIO(latex_content), format=InputFormat.LATEX, backend=LatexDocumentBackend, filename="test.tex", ) backend = LatexDocumentBackend(in_doc=in_doc, path_or_stream=BytesIO(latex_content)) doc = backend.convert() assert doc is not None def test_latex_no_document_env(): """Test LaTeX without document environment processes all nodes""" latex_content = b""" \\section{Direct Section} Some direct content without document environment. """ in_doc = InputDocument( path_or_stream=BytesIO(latex_content), format=InputFormat.LATEX, backend=LatexDocumentBackend, filename="test.tex", ) backend = LatexDocumentBackend(in_doc=in_doc, path_or_stream=BytesIO(latex_content)) doc = backend.convert() md = doc.export_to_markdown() assert "Direct Section" in md or "direct content" in md def test_latex_starred_table_and_figure(): """Test starred table* and figure* environments""" latex_content = b""" \\documentclass{article} \\begin{document} \\begin{table*} \\begin{tabular}{c} Wide table \\end{tabular} \\end{table*} \\begin{figure*} \\includegraphics{wide.png} \\end{figure*} \\end{document} """ in_doc = InputDocument( path_or_stream=BytesIO(latex_content), format=InputFormat.LATEX, backend=LatexDocumentBackend, filename="test.tex", ) backend = LatexDocumentBackend(in_doc=in_doc, path_or_stream=BytesIO(latex_content)) doc = backend.convert() assert len(doc.tables) >= 1 assert len(doc.pictures) >= 1 def test_latex_newline_macro(): """Test handling of \\\\ newline macro""" latex_content = b""" \\documentclass{article} \\begin{document} Line one\\\\ Line two \\end{document} """ in_doc = InputDocument( path_or_stream=BytesIO(latex_content), format=InputFormat.LATEX, backend=LatexDocumentBackend, filename="test.tex", ) backend = LatexDocumentBackend(in_doc=in_doc, path_or_stream=BytesIO(latex_content)) doc = backend.convert() # Should not crash assert doc is not None def test_latex_filecontents_ignored(): """Test filecontents environment is ignored""" latex_content = b""" \\documentclass{article} \\begin{filecontents}{sample.bib} @article{test, author={A}, title={B}} \\end{filecontents} \\begin{document} Actual content. \\end{document} """ in_doc = InputDocument( path_or_stream=BytesIO(latex_content), format=InputFormat.LATEX, backend=LatexDocumentBackend, filename="test.tex", ) backend = LatexDocumentBackend(in_doc=in_doc, path_or_stream=BytesIO(latex_content)) doc = backend.convert() md = doc.export_to_markdown() assert "Actual content" in md # filecontents should not appear in output assert "@article" not in md def test_latex_tilde_macro(): """Test ~ (non-breaking space) macro handling""" latex_content = b""" \\documentclass{article} \\begin{document} Dr.~Smith arrived. \\end{document} """ in_doc = InputDocument( path_or_stream=BytesIO(latex_content), format=InputFormat.LATEX, backend=LatexDocumentBackend, filename="test.tex", ) backend = LatexDocumentBackend(in_doc=in_doc, path_or_stream=BytesIO(latex_content)) doc = backend.convert() md = doc.export_to_markdown() assert "Smith" in md def test_latex_math_environment(): """Test math environment (not displaymath)""" latex_content = rb""" \documentclass{article} \begin{document} Inline: \begin{math}a+b\end{math}. \end{document} """ in_doc = InputDocument( path_or_stream=BytesIO(latex_content), format=InputFormat.LATEX, backend=LatexDocumentBackend, filename="test.tex", ) backend = LatexDocumentBackend(in_doc=in_doc, path_or_stream=BytesIO(latex_content)) doc = backend.convert() formulas = [t for t in doc.texts if t.label == DocItemLabel.FORMULA] assert len(formulas) >= 1 def test_latex_displaymath_brackets(): """Test \\[ \\] display math""" latex_content = rb""" \documentclass{article} \begin{document} Display: \[ x^2 + y^2 = z^2 \] \end{document} """ in_doc = InputDocument( path_or_stream=BytesIO(latex_content), format=InputFormat.LATEX, backend=LatexDocumentBackend, filename="test.tex", ) backend = LatexDocumentBackend(in_doc=in_doc, path_or_stream=BytesIO(latex_content)) doc = backend.convert() formulas = [t for t in doc.texts if t.label == DocItemLabel.FORMULA] assert len(formulas) >= 1 def test_latex_citet_macro_2(): """Test citet citation macro - citations inline with text""" latex_content = b""" \\documentclass{article} \\begin{document} \\citet{author2022} showed this. \\end{document} """ in_doc = InputDocument( path_or_stream=BytesIO(latex_content), format=InputFormat.LATEX, backend=LatexDocumentBackend, filename="test.tex", ) backend = LatexDocumentBackend(in_doc=in_doc, path_or_stream=BytesIO(latex_content)) doc = backend.convert() # Citations are now inline with text md = doc.export_to_markdown() assert "[author2022]" in md assert "showed this" in md # E2E Ground-Truth Tests @pytest.fixture(scope="module") def latex_paths() -> list[Path]: """Find all LaTeX files in the test data directory.""" directory = Path("./tests/data/latex/") if not directory.exists(): return [] paths = list(directory.glob("*.tex")) for subdir in directory.iterdir(): if subdir.is_dir(): if (subdir / "main.tex").exists(): paths.append(subdir / "main.tex") elif (subdir / f"arxiv_{subdir.name}.tex").exists(): paths.append(subdir / f"arxiv_{subdir.name}.tex") return sorted(paths) def get_latex_converter(): """Create a DocumentConverter for LaTeX files.""" converter = DocumentConverter(allowed_formats=[InputFormat.LATEX]) return converter def test_e2e_latex_conversions(latex_paths): """E2E test for LaTeX conversions with ground-truth comparison.""" if not latex_paths: pytest.skip("No LaTeX test files found") converter = get_latex_converter() for latex_path in latex_paths: if latex_path.parent.resolve() == LATEX_DATA_DIR.resolve(): gt_name = latex_path.name else: gt_name = f"{latex_path.parent.name}_{latex_path.name}" gt_path = LATEX_DATA_DIR.parent / "groundtruth" / "docling_v2" / gt_name conv_result: ConversionResult = converter.convert(latex_path) doc: DoclingDocument = conv_result.document pred_md: str = doc.export_to_markdown() assert verify_export(pred_md, str(gt_path) + ".md", generate=GENERATE), ( f"Markdown export mismatch for {latex_path}" ) pred_itxt: str = doc._export_to_indented_text( max_text_len=70, explicit_tables=False ) assert verify_export(pred_itxt, str(gt_path) + ".itxt", generate=GENERATE), ( f"Indented text export mismatch for {latex_path}" ) assert verify_document(doc, str(gt_path) + ".json", GENERATE), ( f"Document JSON mismatch for {latex_path}" ) def test_latex_document_with_leading_comments(): """Test that documents starting with comment lines don't cause regex errors""" latex_content = b"""% This is a leading comment % Another comment line \\documentclass{article} \\begin{document} \\section{Test Section} This is test content. \\end{document} """ in_doc = InputDocument( path_or_stream=BytesIO(latex_content), format=InputFormat.LATEX, backend=LatexDocumentBackend, filename="test.tex", ) backend = LatexDocumentBackend(in_doc=in_doc, path_or_stream=BytesIO(latex_content)) doc = backend.convert() # Should parse successfully without regex errors assert len(doc.texts) > 0 md = doc.export_to_markdown() assert "Test Section" in md assert "test content" in md def test_latex_custom_macro_with_backslash(): """Test that custom macros containing backslashes don't cause regex errors""" latex_content = b"""\\documentclass{article} \\newcommand{\\myterm}{special term} \\newcommand{\\myvalue}{42} \\begin{document} This is \\myterm and the value is \\myvalue. \\end{document} """ in_doc = InputDocument( path_or_stream=BytesIO(latex_content), format=InputFormat.LATEX, backend=LatexDocumentBackend, filename="test.tex", ) backend = LatexDocumentBackend(in_doc=in_doc, path_or_stream=BytesIO(latex_content)) doc = backend.convert() # Should parse successfully without regex errors assert len(doc.texts) > 0 md = doc.export_to_markdown() # The macro expansion should work assert "special term" in md and "42" in md def test_latex_figure_with_caption(): """Test that figure environment properly groups caption and image""" latex_content = b"""\\documentclass{article} \\begin{document} \\begin{figure} \\includegraphics{test.png} \\caption{This is a test figure caption} \\label{fig:test} \\end{figure} \\end{document} """ in_doc = InputDocument( path_or_stream=BytesIO(latex_content), format=InputFormat.LATEX, backend=LatexDocumentBackend, filename="test.tex", ) backend = LatexDocumentBackend(in_doc=in_doc, path_or_stream=BytesIO(latex_content)) doc = backend.convert() # Should have a figure group figure_groups = [g for g in doc.groups if g.name == "figure"] assert len(figure_groups) >= 1 # Should have picture and caption assert len(doc.pictures) >= 1 captions = [t for t in doc.texts if t.label == DocItemLabel.CAPTION] # includegraphics creates one caption, and \caption macro creates another assert len(captions) >= 1 def extract_macro_name_old(raw_string): """The OLD buggy implementation""" # This was the original broken code macro_name_raw = raw_string.strip("{} ") macro_name = macro_name_raw.lstrip("\\") return macro_name def extract_macro_name_new(raw_string): """The NEW fixed implementation""" # This is the fixed code macro_name = raw_string.strip("{} \n\t\\") if macro_name.startswith("\\"): macro_name = macro_name[1:] return macro_name def test_macro_extraction(): """Test various formats of macro names""" print("\n" + "=" * 80) print("TESTING MACRO NAME EXTRACTION LOGIC") print("=" * 80) # Test cases: (input, expected_output) test_cases = [ (r"{\myterm}", "myterm"), (r"\myterm", "myterm"), (r"{ \myterm }", "myterm"), (r"{ \myvalue }", "myvalue"), (r"{\important}", "important"), (r"{ \test }", "test"), (r"{\alpha}", "alpha"), ] print("\n" + "-" * 80) print("OLD (BUGGY) IMPLEMENTATION:") print("-" * 80) old_passed = 0 for input_str, expected in test_cases: result = extract_macro_name_old(input_str) status = "✓" if result == expected else "✗" print( f"{status} Input: {input_str!r:20} → Result: {result!r:15} (expected: {expected!r})" ) if result == expected: old_passed += 1 print("\n" + "-" * 80) print("NEW (FIXED) IMPLEMENTATION:") print("-" * 80) new_passed = 0 for input_str, expected in test_cases: result = extract_macro_name_new(input_str) status = "✓" if result == expected else "✗" print( f"{status} Input: {input_str!r:20} → Result: {result!r:15} (expected: {expected!r})" ) if result == expected: new_passed += 1 print("\n" + "=" * 80) print("RESULTS:") print("=" * 80) print(f"OLD implementation: {old_passed}/{len(test_cases)} tests passed") print(f"NEW implementation: {new_passed}/{len(test_cases)} tests passed") if new_passed == len(test_cases): print("\n✓✓ SUCCESS: New implementation fixes the bug!") else: print("\n✗✗ FAILURE: New implementation still has issues") assert new_passed == len(test_cases), ( f"New implementation failed: {new_passed}/{len(test_cases)} tests passed" ) def test_edge_cases(): """Test edge cases and special characters""" print("\n" + "=" * 80) print("TESTING EDGE CASES") print("=" * 80) edge_cases = [ # Format: (input, expected, description) (r"{\cmd}", "cmd", "Simple macro"), (r"{\\cmd}", "cmd", "Double backslash"), (r"{ \cmd }", "cmd", "Spaces around macro"), (r"{\ cmd }", "cmd", "Extra spaces"), (r"{\my_macro}", "my_macro", "Underscore in name"), (r"{\MyMacro}", "MyMacro", "CamelCase"), (r"{\MACRO}", "MACRO", "Uppercase"), ] all_passed = True for input_str, expected, description in edge_cases: result = extract_macro_name_new(input_str) status = "✓" if result == expected else "✗" print(f"{status} {description:25} | Input: {input_str!r:20} → {result!r}") if result != expected: print(f" Expected: {expected!r}") all_passed = False if all_passed: print("\n✓✓ All edge cases passed!") else: print("\n✗✗ Some edge cases failed") assert all_passed, "Some edge cases failed" """ Add this to test_backend_latex.py to debug what's happening """ def test_debug_macro_extraction(): """Debug test to see what's actually being extracted""" from io import BytesIO from docling.backend.latex_backend import LatexDocumentBackend from docling.datamodel.base_models import InputFormat from docling.datamodel.document import InputDocument latex_content = b"""\\documentclass{article} \\newcommand{\\myterm}{special term} \\newcommand{\\myvalue}{42} \\begin{document} This is \\myterm and the value is \\myvalue. \\end{document} """ in_doc = InputDocument( path_or_stream=BytesIO(latex_content), format=InputFormat.LATEX, backend=LatexDocumentBackend, filename="test.tex", ) backend = LatexDocumentBackend(in_doc=in_doc, path_or_stream=BytesIO(latex_content)) # BEFORE conversion - check if macros get extracted doc = backend.convert() # Print debug info print(f"\n{'=' * 80}") print("DEBUG INFO:") print(f"{'=' * 80}") print(f"Custom macros extracted: {backend._custom_macros}") print(f"Number of text items: {len(doc.texts)}") print("\nText items:") for i, text_item in enumerate(doc.texts): print(f" {i}: {text_item.label} = {text_item.text!r}") md = doc.export_to_markdown() print("\nMarkdown output:") print(md) print(f"{'=' * 80}\n") # Check if macros were registered assert "myterm" in backend._custom_macros, "myterm not in _custom_macros!" assert backend._custom_macros["myterm"] == "special term" # Check if they were expanded assert "special term" in md, f"'special term' not in output: {md!r}" assert "42" in md, f"'42' not in output: {md!r}"
{ "repo_id": "docling-project/docling", "file_path": "tests/test_backend_latex.py", "license": "MIT License", "lines": 1164, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
docling-project/docling:docling/datamodel/stage_model_specs.py
"""Model specifications and presets for stage models. This module defines: 1. VlmModelSpec - Model configuration with engine-specific overrides 2. StageModelPreset - Preset combining model, engine, and stage config 3. StagePresetMixin - Mixin for stage options to manage presets """ import logging from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Set from pydantic import BaseModel, Field from docling.datamodel.pipeline_options_vlm_model import ( ResponseFormat, TransformersModelType, TransformersPromptStyle, ) from docling.datamodel.vlm_engine_options import BaseVlmEngineOptions from docling.models.inference_engines.image_classification.base import ( ImageClassificationEngineType, ) from docling.models.inference_engines.object_detection.base import ( ObjectDetectionEngineType, ) from docling.models.inference_engines.vlm.base import VlmEngineType if TYPE_CHECKING: from docling.datamodel.image_classification_engine_options import ( BaseImageClassificationEngineOptions, ) from docling.datamodel.object_detection_engine_options import ( BaseObjectDetectionEngineOptions, ) _log = logging.getLogger(__name__) # ============================================================================= # ENGINE-SPECIFIC MODEL CONFIGURATION # ============================================================================= class EngineModelConfig(BaseModel): """Engine-specific model configuration. Allows overriding model settings for specific engines. For example, MLX might use a different repo_id than Transformers. """ repo_id: Optional[str] = Field( default=None, description="Override model repository ID for this engine" ) revision: Optional[str] = Field( default=None, description="Override model revision for this engine" ) torch_dtype: Optional[str] = Field( default=None, description="Override torch dtype for this engine (e.g., 'bfloat16')", ) extra_config: Dict[str, Any] = Field( default_factory=dict, description="Additional engine-specific configuration" ) def merge_with( self, base_repo_id: str, base_revision: str = "main" ) -> "EngineModelConfig": """Merge with base configuration. Args: base_repo_id: Base repository ID base_revision: Base revision Returns: Merged configuration with overrides applied """ return EngineModelConfig( repo_id=self.repo_id or base_repo_id, revision=self.revision or base_revision, torch_dtype=self.torch_dtype, extra_config=self.extra_config, ) class ApiModelConfig(BaseModel): """API-specific model configuration. For API engines, configuration is simpler - just params to send. """ params: Dict[str, Any] = Field( default_factory=dict, description="API parameters (model name, max_tokens, etc.)", ) def merge_with(self, base_params: Dict[str, Any]) -> "ApiModelConfig": """Merge with base parameters. Args: base_params: Base API parameters Returns: Merged configuration with overrides applied """ merged_params = {**base_params, **self.params} return ApiModelConfig(params=merged_params) # ============================================================================= # VLM MODEL SPECIFICATION # ============================================================================= class VlmModelSpec(BaseModel): """Specification for a VLM model. This defines the model configuration that is independent of the engine. It includes: - Default model repository ID - Prompt template - Response format - Engine-specific overrides """ name: str = Field(description="Human-readable model name") default_repo_id: str = Field(description="Default HuggingFace repository ID") revision: str = Field(default="main", description="Default model revision") prompt: str = Field(description="Prompt template for this model") response_format: ResponseFormat = Field( description="Expected response format from the model" ) supported_engines: Optional[Set[VlmEngineType]] = Field( default=None, description="Set of supported engines (None = all supported)" ) engine_overrides: Dict[VlmEngineType, EngineModelConfig] = Field( default_factory=dict, description="Engine-specific configuration overrides" ) api_overrides: Dict[VlmEngineType, ApiModelConfig] = Field( default_factory=dict, description="API-specific configuration overrides" ) trust_remote_code: bool = Field( default=False, description="Whether to trust remote code for this model" ) stop_strings: List[str] = Field( default_factory=list, description="Stop strings for generation" ) max_new_tokens: int = Field( default=4096, description="Maximum number of new tokens to generate" ) def get_repo_id(self, engine_type: VlmEngineType) -> str: """Get the repository ID for a specific engine. Args: engine_type: The engine type Returns: Repository ID (with engine override if applicable) """ if engine_type in self.engine_overrides: override = self.engine_overrides[engine_type] return override.repo_id or self.default_repo_id return self.default_repo_id def get_revision(self, engine_type: VlmEngineType) -> str: """Get the model revision for a specific engine. Args: engine_type: The engine type Returns: Model revision (with engine override if applicable) """ if engine_type in self.engine_overrides: override = self.engine_overrides[engine_type] return override.revision or self.revision return self.revision def get_api_params(self, engine_type: VlmEngineType) -> Dict[str, Any]: """Get API parameters for a specific engine. Args: engine_type: The engine type Returns: API parameters (with engine override if applicable) """ base_params = {"model": self.default_repo_id} if engine_type in self.api_overrides: override = self.api_overrides[engine_type] return override.merge_with(base_params).params return base_params def is_engine_supported(self, engine_type: VlmEngineType) -> bool: """Check if an engine is supported by this model. Args: engine_type: The engine type to check Returns: True if supported, False otherwise """ if self.supported_engines is None: return True return engine_type in self.supported_engines def get_engine_config(self, engine_type: VlmEngineType) -> EngineModelConfig: """Get EngineModelConfig for a specific engine type. This is the single source of truth for generating engine-specific configuration from the model spec. Args: engine_type: The engine type to get config for Returns: EngineModelConfig with repo_id, revision, and engine-specific extra_config """ # Get repo_id and revision (with engine-specific overrides if present) repo_id = self.get_repo_id(engine_type) revision = self.get_revision(engine_type) # Get engine-specific extra_config extra_config = {} if engine_type in self.engine_overrides: extra_config = self.engine_overrides[engine_type].extra_config.copy() return EngineModelConfig( repo_id=repo_id, revision=revision, extra_config=extra_config, ) def has_explicit_engine_export(self, engine_type: VlmEngineType) -> bool: """Check if this model has an explicit export for the given engine. An explicit export means either: 1. The engine has a different repo_id in engine_overrides, OR 2. The engine is explicitly listed in supported_engines (not None) This is used by auto_inline to determine if it should attempt to use a specific engine. For example, MLX should only be used if there's an actual MLX export available (different repo_id) or if the model explicitly declares MLX support. Args: engine_type: The engine type to check Returns: True if there's an explicit export, False otherwise Examples: >>> # Model with MLX export (different repo_id) >>> spec = VlmModelSpec( ... name="Test", ... default_repo_id="org/model", ... engine_overrides={ ... VlmEngineType.MLX: EngineModelConfig(repo_id="org/model-mlx") ... } ... ) >>> spec.has_explicit_engine_export(VlmEngineType.MLX) True >>> # Model without MLX export (same repo_id or no override) >>> spec = VlmModelSpec(name="Test", default_repo_id="org/model") >>> spec.has_explicit_engine_export(VlmEngineType.MLX) False >>> # Model with explicit supported_engines >>> spec = VlmModelSpec( ... name="Test", ... default_repo_id="org/model", ... supported_engines={VlmEngineType.MLX} ... ) >>> spec.has_explicit_engine_export(VlmEngineType.MLX) True """ # If supported_engines is explicitly set and includes this engine if self.supported_engines is not None: return engine_type in self.supported_engines # Check if there's a different repo_id for this engine if engine_type in self.engine_overrides: override = self.engine_overrides[engine_type] if ( override.repo_id is not None and override.repo_id != self.default_repo_id ): return True return False # ============================================================================= # OBJECT DETECTION MODEL SPECIFICATION # ============================================================================= class ObjectDetectionModelSpec(BaseModel): """Specification for an object detection model. Simpler than VlmModelSpec - no prompts, no preprocessing params. Preprocessing comes from HuggingFace preprocessor configs. Model files are assumed to be at the root of the HuggingFace repo. """ name: str = Field(description="Human-readable model name") repo_id: str = Field(description="Default HuggingFace repository ID") revision: str = Field(default="main", description="Default model revision") engine_overrides: Dict["ObjectDetectionEngineType", EngineModelConfig] = Field( default_factory=dict, description="Engine-specific configuration overrides", ) def get_engine_config( self, engine_type: "ObjectDetectionEngineType" ) -> EngineModelConfig: """Get EngineModelConfig for a specific object-detection engine. Args: engine_type: The engine type being requested Returns: EngineModelConfig populated with repo/revision and engine overrides """ override = self.engine_overrides.get(engine_type) if override is not None: return override.merge_with(self.repo_id, self.revision) return EngineModelConfig(repo_id=self.repo_id, revision=self.revision) def get_repo_id(self, engine_type: "ObjectDetectionEngineType") -> str: """Get repository ID for specific engine. Args: engine_type: The engine type Returns: Repository ID (with engine override if applicable) """ override = self.engine_overrides.get(engine_type) if override and override.repo_id: return override.repo_id return self.repo_id def get_revision(self, engine_type: "ObjectDetectionEngineType") -> str: """Get revision for specific engine. Args: engine_type: The engine type Returns: Model revision (with engine override if applicable) """ override = self.engine_overrides.get(engine_type) if override and override.revision: return override.revision return self.revision # ============================================================================= # IMAGE CLASSIFICATION MODEL SPECIFICATION # ============================================================================= class ImageClassificationModelSpec(BaseModel): """Specification for an image-classification model.""" name: str = Field(description="Human-readable model name") repo_id: str = Field(description="Default HuggingFace repository ID") revision: str = Field(default="main", description="Default model revision") engine_overrides: Dict["ImageClassificationEngineType", EngineModelConfig] = Field( default_factory=dict, description="Engine-specific configuration overrides", ) def get_engine_config( self, engine_type: "ImageClassificationEngineType" ) -> EngineModelConfig: """Get EngineModelConfig for a specific image-classification engine.""" override = self.engine_overrides.get(engine_type) if override is not None: return override.merge_with(self.repo_id, self.revision) return EngineModelConfig(repo_id=self.repo_id, revision=self.revision) def get_repo_id(self, engine_type: "ImageClassificationEngineType") -> str: """Get repository ID for specific engine.""" override = self.engine_overrides.get(engine_type) if override and override.repo_id: return override.repo_id return self.repo_id def get_revision(self, engine_type: "ImageClassificationEngineType") -> str: """Get revision for specific engine.""" override = self.engine_overrides.get(engine_type) if override and override.revision: return override.revision return self.revision # ============================================================================= # STAGE PRESET SYSTEM # ============================================================================= class StageModelPreset(BaseModel): """A preset configuration combining stage, model, and prompt. Presets provide convenient named configurations that users can reference by ID instead of manually configuring everything. """ preset_id: str = Field( description="Simple preset identifier (e.g., 'smolvlm', 'granite')" ) name: str = Field(description="Human-readable preset name") description: str = Field(description="Description of what this preset does") model_spec: VlmModelSpec = Field(description="Model specification for this preset") scale: float = Field(default=2.0, description="Image scaling factor") max_size: Optional[int] = Field(default=None, description="Maximum image dimension") default_engine_type: VlmEngineType = Field( default=VlmEngineType.AUTO_INLINE, description="Default engine to use with this preset", ) stage_options: Dict[str, Any] = Field( default_factory=dict, description="Additional stage-specific options" ) @property def supported_engines(self) -> Set[VlmEngineType]: """Get supported engines from model spec.""" if self.model_spec.supported_engines is None: return set(VlmEngineType) return self.model_spec.supported_engines class StagePresetMixin: """Mixin for stage options classes that support presets. Each stage options class that uses this mixin manages its own presets. This is more decentralized than a global registry. Usage: class MyStageOptions(StagePresetMixin, BaseModel): ... # Register presets MyStageOptions.register_preset(preset1) MyStageOptions.register_preset(preset2) # Use presets options = MyStageOptions.from_preset("preset1") """ # Class variable to store presets for this specific stage # Note: Each subclass gets its own _presets dict via __init_subclass__ _presets: ClassVar[Dict[str, StageModelPreset]] def __init_subclass__(cls, **kwargs): """Initialize each subclass with its own preset registry. This ensures that each stage options class has an isolated preset registry, preventing namespace collisions across different stages. """ super().__init_subclass__(**kwargs) # Each subclass gets its own _presets dictionary cls._presets = {} @classmethod def register_preset(cls, preset: StageModelPreset) -> None: """Register a preset for this stage options class. Args: preset: The preset to register Note: If preset ID already registered, it will be silently skipped. This allows for idempotent registration at module import time. """ if preset.preset_id not in cls._presets: cls._presets[preset.preset_id] = preset else: _log.error( f"Preset '{preset.preset_id}' already registered for {cls.__name__}" ) @classmethod def get_preset(cls, preset_id: str) -> StageModelPreset: """Get a specific preset. Args: preset_id: The preset identifier Returns: The requested preset Raises: KeyError: If preset not found """ if preset_id not in cls._presets: raise KeyError( f"Preset '{preset_id}' not found for {cls.__name__}. " f"Available presets: {list(cls._presets.keys())}" ) return cls._presets[preset_id] @classmethod def list_presets(cls) -> List[StageModelPreset]: """List all presets for this stage. Returns: List of presets """ return list(cls._presets.values()) @classmethod def list_preset_ids(cls) -> List[str]: """List all preset IDs for this stage. Returns: List of preset IDs """ return list(cls._presets.keys()) @classmethod def get_preset_info(cls) -> List[Dict[str, str]]: """Get summary info for all presets (useful for CLI). Returns: List of dicts with preset_id, name, description, model """ return [ { "preset_id": p.preset_id, "name": p.name, "description": p.description, "model": p.model_spec.name, "default_engine": p.default_engine_type.value, } for p in cls._presets.values() ] @classmethod def from_preset( cls, preset_id: str, engine_options: Optional[BaseVlmEngineOptions] = None, **overrides, ): """Create options from a registered preset. Args: preset_id: The preset identifier engine_options: Optional engine override **overrides: Additional option overrides Returns: Instance of the stage options class """ from docling.datamodel.vlm_engine_options import ( ApiVlmEngineOptions, AutoInlineVlmEngineOptions, MlxVlmEngineOptions, TransformersVlmEngineOptions, VllmVlmEngineOptions, ) preset = cls.get_preset(preset_id) # Create engine options if not provided if engine_options is None: if preset.default_engine_type == VlmEngineType.AUTO_INLINE: engine_options = AutoInlineVlmEngineOptions() elif VlmEngineType.is_api_variant(preset.default_engine_type): engine_options = ApiVlmEngineOptions( engine_type=preset.default_engine_type ) elif preset.default_engine_type == VlmEngineType.TRANSFORMERS: engine_options = TransformersVlmEngineOptions() elif preset.default_engine_type == VlmEngineType.MLX: engine_options = MlxVlmEngineOptions() elif preset.default_engine_type == VlmEngineType.VLLM: engine_options = VllmVlmEngineOptions() else: engine_options = AutoInlineVlmEngineOptions() # Create instance with preset values # Type ignore because cls is the concrete options class, not the mixin instance = cls( # type: ignore[call-arg] model_spec=preset.model_spec, engine_options=engine_options, scale=preset.scale, max_size=preset.max_size, **preset.stage_options, ) # Apply overrides for key, value in overrides.items(): setattr(instance, key, value) return instance class ObjectDetectionStagePreset(BaseModel): """Preset definition for object detection-powered stages.""" preset_id: str = Field(description="Preset identifier") name: str = Field(description="Human-readable preset name") description: str = Field(description="Description of this preset") model_spec: ObjectDetectionModelSpec = Field( description="Object detection model specification" ) default_engine_type: ObjectDetectionEngineType = Field( default=ObjectDetectionEngineType.ONNXRUNTIME, description="Default inference engine to use", ) stage_options: Dict[str, Any] = Field( default_factory=dict, description="Additional stage-specific defaults" ) class ObjectDetectionStagePresetMixin: """Mixin to enable preset loading for object detection stages.""" _presets: ClassVar[Dict[str, ObjectDetectionStagePreset]] def __init_subclass__(cls, **kwargs): super().__init_subclass__(**kwargs) cls._presets = {} @classmethod def register_preset(cls, preset: ObjectDetectionStagePreset) -> None: if preset.preset_id not in cls._presets: cls._presets[preset.preset_id] = preset else: _log.error( f"Preset '{preset.preset_id}' already registered for {cls.__name__}" ) @classmethod def get_preset(cls, preset_id: str) -> ObjectDetectionStagePreset: if preset_id not in cls._presets: raise KeyError( f"Preset '{preset_id}' not found for {cls.__name__}. " f"Available presets: {list(cls._presets.keys())}" ) return cls._presets[preset_id] @classmethod def list_presets(cls) -> List[ObjectDetectionStagePreset]: return list(cls._presets.values()) @classmethod def list_preset_ids(cls) -> List[str]: return list(cls._presets.keys()) @classmethod def get_preset_info(cls) -> List[Dict[str, str]]: return [ { "preset_id": p.preset_id, "name": p.name, "description": p.description, "model": p.model_spec.name, "default_engine": p.default_engine_type.value, } for p in cls._presets.values() ] @classmethod def from_preset( cls, preset_id: str, engine_options: Optional["BaseObjectDetectionEngineOptions"] = None, **overrides: Any, ): from docling.datamodel.object_detection_engine_options import ( ApiKserveV2ObjectDetectionEngineOptions, OnnxRuntimeObjectDetectionEngineOptions, TransformersObjectDetectionEngineOptions, ) preset = cls.get_preset(preset_id) if engine_options is None: if preset.default_engine_type == ObjectDetectionEngineType.ONNXRUNTIME: engine_options = OnnxRuntimeObjectDetectionEngineOptions() elif preset.default_engine_type == ObjectDetectionEngineType.TRANSFORMERS: engine_options = TransformersObjectDetectionEngineOptions() elif preset.default_engine_type == ObjectDetectionEngineType.API_KSERVE_V2: raise ValueError( f"Preset '{preset_id}' uses API_KSERVE_V2 engine which requires explicit " "engine_options with a 'url' parameter. Please provide " "engine_options=ApiKserveV2ObjectDetectionEngineOptions(url='...') " "when calling from_preset()." ) else: raise ValueError( f"Unsupported engine type {preset.default_engine_type} for presets" ) instance = cls( # type: ignore[call-arg] model_spec=preset.model_spec, engine_options=engine_options, **preset.stage_options, ) for key, value in overrides.items(): setattr(instance, key, value) return instance class ImageClassificationStagePreset(BaseModel): """Preset definition for image classification-powered stages.""" preset_id: str = Field(description="Preset identifier") name: str = Field(description="Human-readable preset name") description: str = Field(description="Description of this preset") model_spec: ImageClassificationModelSpec = Field( description="Image classification model specification" ) default_engine_type: ImageClassificationEngineType = Field( default=ImageClassificationEngineType.TRANSFORMERS, description="Default inference engine to use", ) stage_options: Dict[str, Any] = Field( default_factory=dict, description="Additional stage-specific defaults" ) class ImageClassificationStagePresetMixin: """Mixin to enable preset loading for image-classification stages.""" _presets: ClassVar[Dict[str, ImageClassificationStagePreset]] def __init_subclass__(cls, **kwargs): super().__init_subclass__(**kwargs) cls._presets = {} @classmethod def register_preset(cls, preset: ImageClassificationStagePreset) -> None: if preset.preset_id not in cls._presets: cls._presets[preset.preset_id] = preset else: _log.error( f"Preset '{preset.preset_id}' already registered for {cls.__name__}" ) @classmethod def get_preset(cls, preset_id: str) -> ImageClassificationStagePreset: if preset_id not in cls._presets: raise KeyError( f"Preset '{preset_id}' not found for {cls.__name__}. " f"Available presets: {list(cls._presets.keys())}" ) return cls._presets[preset_id] @classmethod def list_presets(cls) -> List[ImageClassificationStagePreset]: return list(cls._presets.values()) @classmethod def list_preset_ids(cls) -> List[str]: return list(cls._presets.keys()) @classmethod def get_preset_info(cls) -> List[Dict[str, str]]: return [ { "preset_id": p.preset_id, "name": p.name, "description": p.description, "model": p.model_spec.name, "default_engine": p.default_engine_type.value, } for p in cls._presets.values() ] @classmethod def from_preset( cls, preset_id: str, engine_options: Optional["BaseImageClassificationEngineOptions"] = None, **overrides: Any, ): from docling.datamodel.image_classification_engine_options import ( ApiKserveV2ImageClassificationEngineOptions, OnnxRuntimeImageClassificationEngineOptions, TransformersImageClassificationEngineOptions, ) preset = cls.get_preset(preset_id) if engine_options is None: if preset.default_engine_type == ImageClassificationEngineType.ONNXRUNTIME: engine_options = OnnxRuntimeImageClassificationEngineOptions() elif ( preset.default_engine_type == ImageClassificationEngineType.TRANSFORMERS ): engine_options = TransformersImageClassificationEngineOptions() elif ( preset.default_engine_type == ImageClassificationEngineType.API_KSERVE_V2 ): raise ValueError( f"Preset '{preset_id}' uses API_KSERVE_V2 engine which requires explicit " "engine_options with a 'url' parameter. Please provide " "engine_options=ApiKserveV2ImageClassificationEngineOptions(url='...') " "when calling from_preset()." ) else: raise ValueError( f"Unsupported engine type {preset.default_engine_type} for presets" ) instance = cls( # type: ignore[call-arg] model_spec=preset.model_spec, engine_options=engine_options, **preset.stage_options, ) for key, value in overrides.items(): setattr(instance, key, value) return instance # ============================================================================= # PRESET DEFINITIONS # ============================================================================= # ----------------------------------------------------------------------------- # SHARED MODEL SPECS (for reuse across multiple stages) # ----------------------------------------------------------------------------- # Shared Granite Docling model spec used across VLM_CONVERT and CODE_FORMULA stages # Note: prompt and response_format are intentionally excluded here as they vary per stage GRANITE_DOCLING_MODEL_SPEC_BASE = { "name": "Granite-Docling-258M", "default_repo_id": "ibm-granite/granite-docling-258M", "stop_strings": ["</doctag>", "<|end_of_text|>"], "max_new_tokens": 8192, "engine_overrides": { VlmEngineType.MLX: EngineModelConfig( repo_id="ibm-granite/granite-docling-258M-mlx" ), VlmEngineType.TRANSFORMERS: EngineModelConfig( extra_config={ "transformers_model_type": TransformersModelType.AUTOMODEL_IMAGETEXTTOTEXT, "extra_generation_config": {"skip_special_tokens": False}, } ), }, "api_overrides": { VlmEngineType.API_OLLAMA: ApiModelConfig( params={"model": "ibm/granite-docling:258m"} ), }, } # Shared Pixtral model spec used across VLM_CONVERT and PICTURE_DESCRIPTION stages PIXTRAL_MODEL_SPEC_BASE = { "name": "Pixtral-12B", "default_repo_id": "mistral-community/pixtral-12b", "engine_overrides": { VlmEngineType.MLX: EngineModelConfig(repo_id="mlx-community/pixtral-12b-bf16"), VlmEngineType.TRANSFORMERS: EngineModelConfig( extra_config={ "transformers_model_type": TransformersModelType.AUTOMODEL_VISION2SEQ, } ), }, } # Shared Granite Vision model spec used across VLM_CONVERT and PICTURE_DESCRIPTION stages GRANITE_VISION_MODEL_SPEC_BASE = { "name": "Granite-Vision-3.3-2B", "default_repo_id": "ibm-granite/granite-vision-3.3-2b", "supported_engines": { VlmEngineType.TRANSFORMERS, VlmEngineType.VLLM, VlmEngineType.API_OLLAMA, VlmEngineType.API_LMSTUDIO, }, "engine_overrides": { VlmEngineType.TRANSFORMERS: EngineModelConfig( extra_config={ "transformers_model_type": TransformersModelType.AUTOMODEL_IMAGETEXTTOTEXT, } ), }, "api_overrides": { VlmEngineType.API_OLLAMA: ApiModelConfig( params={"model": "granite3.3-vision:2b"} ), }, } # ----------------------------------------------------------------------------- # OBJECT DETECTION PRESETS # ----------------------------------------------------------------------------- OBJECT_DETECTION_LAYOUT_HERON = ObjectDetectionStagePreset( preset_id="layout_heron_default", name="Layout Heron", description="RT-DETR layout-heron model (ResNet50)", model_spec=ObjectDetectionModelSpec( name="layout_heron", repo_id="docling-project/docling-layout-heron", revision="main", engine_overrides={ ObjectDetectionEngineType.ONNXRUNTIME: EngineModelConfig( repo_id="docling-project/docling-layout-heron-onnx", extra_config={"model_filename": "model.onnx"}, ) }, ), default_engine_type=ObjectDetectionEngineType.TRANSFORMERS, ) # ----------------------------------------------------------------------------- # IMAGE CLASSIFICATION PRESETS # ----------------------------------------------------------------------------- IMAGE_CLASSIFICATION_DOCUMENT_FIGURE = ImageClassificationStagePreset( preset_id="document_figure_classifier_v2", name="Document Figure Classifier v2", description="EfficientNet model for classifying document pictures", model_spec=ImageClassificationModelSpec( name="document_figure_classifier_v2", repo_id="docling-project/DocumentFigureClassifier-v2.0", revision="main", ), default_engine_type=ImageClassificationEngineType.TRANSFORMERS, ) # ----------------------------------------------------------------------------- # VLM_CONVERT PRESETS (for full page conversion) # ----------------------------------------------------------------------------- VLM_CONVERT_SMOLDOCLING = StageModelPreset( preset_id="smoldocling", name="SmolDocling", description="Lightweight DocTags model optimized for document conversion (256M parameters)", model_spec=VlmModelSpec( name="SmolDocling-256M", default_repo_id="docling-project/SmolDocling-256M-preview", prompt="Convert this page to docling.", response_format=ResponseFormat.DOCTAGS, stop_strings=["</doctag>", "<end_of_utterance>"], engine_overrides={ VlmEngineType.MLX: EngineModelConfig( repo_id="docling-project/SmolDocling-256M-preview-mlx-bf16" ), VlmEngineType.TRANSFORMERS: EngineModelConfig( torch_dtype="bfloat16", extra_config={ "transformers_model_type": TransformersModelType.AUTOMODEL_IMAGETEXTTOTEXT, }, ), }, ), scale=2.0, default_engine_type=VlmEngineType.AUTO_INLINE, ) VLM_CONVERT_GRANITE_DOCLING = StageModelPreset( preset_id="granite_docling", name="Granite-Docling", description="IBM Granite DocTags model for document conversion (258M parameters)", model_spec=VlmModelSpec( **GRANITE_DOCLING_MODEL_SPEC_BASE, prompt="Convert this page to docling.", response_format=ResponseFormat.DOCTAGS, ), scale=2.0, default_engine_type=VlmEngineType.AUTO_INLINE, ) VLM_CONVERT_DEEPSEEK_OCR = StageModelPreset( preset_id="deepseek_ocr", name="DeepSeek-OCR", description="DeepSeek OCR model via Ollama/LM Studio for document conversion (3B parameters)", model_spec=VlmModelSpec( name="DeepSeek-OCR-3B", default_repo_id="deepseek-ocr:3b", # Ollama model name prompt="<|grounding|>Convert the document to markdown. ", response_format=ResponseFormat.DEEPSEEKOCR_MARKDOWN, supported_engines={VlmEngineType.API_OLLAMA, VlmEngineType.API_LMSTUDIO}, api_overrides={ VlmEngineType.API_OLLAMA: ApiModelConfig( params={"model": "deepseek-ocr:3b", "max_tokens": 4096} ), VlmEngineType.API_LMSTUDIO: ApiModelConfig( params={"model": "deepseek-ocr", "max_tokens": 4096} ), }, ), scale=2.0, default_engine_type=VlmEngineType.API_OLLAMA, ) VLM_CONVERT_GRANITE_VISION = StageModelPreset( preset_id="granite_vision", name="Granite-Vision", description="IBM Granite Vision model for markdown conversion (2B parameters)", model_spec=VlmModelSpec( **GRANITE_VISION_MODEL_SPEC_BASE, prompt="Convert this page to markdown. Do not miss any text and only output the bare markdown!", response_format=ResponseFormat.MARKDOWN, ), scale=2.0, default_engine_type=VlmEngineType.AUTO_INLINE, ) VLM_CONVERT_PIXTRAL = StageModelPreset( preset_id="pixtral", name="Pixtral-12B", description="Mistral Pixtral model for markdown conversion (12B parameters)", model_spec=VlmModelSpec( **PIXTRAL_MODEL_SPEC_BASE, prompt="Convert this page to markdown. Do not miss any text and only output the bare markdown!", response_format=ResponseFormat.MARKDOWN, ), scale=2.0, default_engine_type=VlmEngineType.AUTO_INLINE, ) VLM_CONVERT_GOT_OCR = StageModelPreset( preset_id="got_ocr", name="GOT-OCR-2.0", description="GOT OCR 2.0 model for markdown conversion", model_spec=VlmModelSpec( name="GOT-OCR-2.0", default_repo_id="stepfun-ai/GOT-OCR-2.0-hf", prompt="", response_format=ResponseFormat.MARKDOWN, supported_engines={VlmEngineType.TRANSFORMERS}, stop_strings=["<|im_end|>"], engine_overrides={ VlmEngineType.TRANSFORMERS: EngineModelConfig( extra_config={ "transformers_model_type": TransformersModelType.AUTOMODEL_IMAGETEXTTOTEXT, "transformers_prompt_style": TransformersPromptStyle.NONE, "extra_processor_kwargs": {"format": True}, } ), }, ), scale=2.0, default_engine_type=VlmEngineType.TRANSFORMERS, ) VLM_CONVERT_PHI4 = StageModelPreset( preset_id="phi4", name="Phi-4", description="Microsoft Phi-4 multimodal model for markdown conversion", model_spec=VlmModelSpec( name="Phi-4-Multimodal-Instruct", default_repo_id="microsoft/Phi-4-multimodal-instruct", prompt="Convert this page to MarkDown. Do not miss any text and only output the bare markdown", response_format=ResponseFormat.MARKDOWN, trust_remote_code=True, supported_engines={ VlmEngineType.TRANSFORMERS, VlmEngineType.VLLM, }, engine_overrides={ VlmEngineType.TRANSFORMERS: EngineModelConfig( extra_config={ "transformers_model_type": TransformersModelType.AUTOMODEL_CAUSALLM, "extra_generation_config": {"num_logits_to_keep": 0}, } ), }, ), scale=2.0, default_engine_type=VlmEngineType.AUTO_INLINE, ) VLM_CONVERT_QWEN = StageModelPreset( preset_id="qwen", name="Qwen2.5-VL-3B", description="Qwen vision-language model for markdown conversion (3B parameters)", model_spec=VlmModelSpec( name="Qwen2.5-VL-3B-Instruct", default_repo_id="Qwen/Qwen2.5-VL-3B-Instruct", prompt="Convert this page to markdown. Do not miss any text and only output the bare markdown!", response_format=ResponseFormat.MARKDOWN, engine_overrides={ VlmEngineType.MLX: EngineModelConfig( repo_id="mlx-community/Qwen2.5-VL-3B-Instruct-bf16" ), VlmEngineType.TRANSFORMERS: EngineModelConfig( extra_config={ "transformers_model_type": TransformersModelType.AUTOMODEL_IMAGETEXTTOTEXT, } ), }, ), scale=2.0, default_engine_type=VlmEngineType.AUTO_INLINE, ) VLM_CONVERT_GEMMA_12B = StageModelPreset( preset_id="gemma_12b", name="Gemma-3-12B", description="Google Gemma-3 vision model for markdown conversion (12B parameters)", model_spec=VlmModelSpec( name="Gemma-3-12B-IT", default_repo_id="google/gemma-3-12b-it", prompt="Convert this page to markdown. Do not miss any text and only output the bare markdown!", response_format=ResponseFormat.MARKDOWN, supported_engines={VlmEngineType.MLX}, engine_overrides={ VlmEngineType.MLX: EngineModelConfig( repo_id="mlx-community/gemma-3-12b-it-bf16" ), }, ), scale=2.0, default_engine_type=VlmEngineType.MLX, ) VLM_CONVERT_GEMMA_27B = StageModelPreset( preset_id="gemma_27b", name="Gemma-3-27B", description="Google Gemma-3 vision model for markdown conversion (27B parameters)", model_spec=VlmModelSpec( name="Gemma-3-27B-IT", default_repo_id="google/gemma-3-27b-it", prompt="Convert this page to markdown. Do not miss any text and only output the bare markdown!", response_format=ResponseFormat.MARKDOWN, supported_engines={VlmEngineType.MLX}, engine_overrides={ VlmEngineType.MLX: EngineModelConfig( repo_id="mlx-community/gemma-3-27b-it-bf16" ), }, ), scale=2.0, default_engine_type=VlmEngineType.MLX, ) VLM_CONVERT_DOLPHIN = StageModelPreset( preset_id="dolphin", name="Dolphin", description="ByteDance Dolphin OCR model for markdown conversion", model_spec=VlmModelSpec( name="Dolphin", default_repo_id="ByteDance/Dolphin", prompt="<s>Read text in the image. <Answer/>", response_format=ResponseFormat.MARKDOWN, engine_overrides={ VlmEngineType.TRANSFORMERS: EngineModelConfig( extra_config={ "transformers_model_type": TransformersModelType.AUTOMODEL_IMAGETEXTTOTEXT, "transformers_prompt_style": TransformersPromptStyle.RAW, } ), }, ), scale=2.0, default_engine_type=VlmEngineType.AUTO_INLINE, ) # ----------------------------------------------------------------------------- # PICTURE_DESCRIPTION PRESETS (for image captioning/description) # ----------------------------------------------------------------------------- PICTURE_DESC_SMOLVLM = StageModelPreset( preset_id="smolvlm", name="SmolVLM-256M", description="Lightweight vision-language model for image descriptions (256M parameters)", model_spec=VlmModelSpec( name="SmolVLM-256M-Instruct", default_repo_id="HuggingFaceTB/SmolVLM-256M-Instruct", prompt="Describe this image in a few sentences.", response_format=ResponseFormat.PLAINTEXT, engine_overrides={ VlmEngineType.MLX: EngineModelConfig( repo_id="moot20/SmolVLM-256M-Instruct-MLX" ), VlmEngineType.TRANSFORMERS: EngineModelConfig( torch_dtype="bfloat16", extra_config={ "transformers_model_type": TransformersModelType.AUTOMODEL_IMAGETEXTTOTEXT, }, ), }, api_overrides={ VlmEngineType.API_LMSTUDIO: ApiModelConfig( params={"model": "smolvlm-256m-instruct"} ), }, ), scale=2.0, default_engine_type=VlmEngineType.AUTO_INLINE, stage_options={ "picture_area_threshold": 0.05, }, ) PICTURE_DESC_GRANITE_VISION = StageModelPreset( preset_id="granite_vision", name="Granite-Vision-3.3-2B", description="IBM Granite Vision model for detailed image descriptions (2B parameters)", model_spec=VlmModelSpec( **GRANITE_VISION_MODEL_SPEC_BASE, prompt="What is shown in this image?", response_format=ResponseFormat.PLAINTEXT, ), scale=2.0, default_engine_type=VlmEngineType.AUTO_INLINE, stage_options={ "picture_area_threshold": 0.05, }, ) PICTURE_DESC_PIXTRAL = StageModelPreset( preset_id="pixtral", name="Pixtral-12B", description="Mistral Pixtral model for detailed image descriptions (12B parameters)", model_spec=VlmModelSpec( **PIXTRAL_MODEL_SPEC_BASE, prompt="Describe this image in detail.", response_format=ResponseFormat.PLAINTEXT, ), scale=2.0, default_engine_type=VlmEngineType.AUTO_INLINE, stage_options={ "picture_area_threshold": 0.05, }, ) PICTURE_DESC_QWEN = StageModelPreset( preset_id="qwen", name="Qwen2.5-VL-3B", description="Qwen vision-language model for image descriptions (3B parameters)", model_spec=VlmModelSpec( name="Qwen2.5-VL-3B-Instruct", default_repo_id="Qwen/Qwen2.5-VL-3B-Instruct", prompt="Describe this image.", response_format=ResponseFormat.PLAINTEXT, engine_overrides={ VlmEngineType.MLX: EngineModelConfig( repo_id="mlx-community/Qwen2.5-VL-3B-Instruct-bf16" ), VlmEngineType.TRANSFORMERS: EngineModelConfig( extra_config={ "transformers_model_type": TransformersModelType.AUTOMODEL_IMAGETEXTTOTEXT, } ), }, ), scale=2.0, default_engine_type=VlmEngineType.AUTO_INLINE, stage_options={ "picture_area_threshold": 0.05, }, ) # ----------------------------------------------------------------------------- # CODE_FORMULA PRESETS (for code and formula extraction) # ----------------------------------------------------------------------------- CODE_FORMULA_CODEFORMULAV2 = StageModelPreset( preset_id="codeformulav2", name="CodeFormulaV2", description="Specialized model for code and formula extraction", model_spec=VlmModelSpec( name="CodeFormulaV2", default_repo_id="docling-project/CodeFormulaV2", prompt="", response_format=ResponseFormat.PLAINTEXT, stop_strings=["</doctag>", "<end_of_utterance>"], engine_overrides={ VlmEngineType.TRANSFORMERS: EngineModelConfig( extra_config={ "transformers_model_type": TransformersModelType.AUTOMODEL_IMAGETEXTTOTEXT, "extra_generation_config": {"skip_special_tokens": False}, } ), }, ), scale=2.0, default_engine_type=VlmEngineType.AUTO_INLINE, ) CODE_FORMULA_GRANITE_DOCLING = StageModelPreset( preset_id="granite_docling", name="Granite-Docling-CodeFormula", description="IBM Granite Docling model for code and formula extraction (258M parameters)", model_spec=VlmModelSpec( **GRANITE_DOCLING_MODEL_SPEC_BASE, prompt="", response_format=ResponseFormat.PLAINTEXT, ), scale=2.0, default_engine_type=VlmEngineType.AUTO_INLINE, )
{ "repo_id": "docling-project/docling", "file_path": "docling/datamodel/stage_model_specs.py", "license": "MIT License", "lines": 1119, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
docling-project/docling:docling/datamodel/vlm_engine_options.py
"""Engine options for VLM inference. This module defines engine-specific configuration options that are independent of model specifications and prompts. """ import logging from typing import Any, Dict, Literal, Optional from pydantic import AnyUrl, Field from docling.datamodel.accelerator_options import AcceleratorDevice from docling.datamodel.settings import default_compile_model from docling.models.inference_engines.vlm.base import ( BaseVlmEngineOptions, VlmEngineType, ) _log = logging.getLogger(__name__) # ============================================================================= # AUTO_INLINE ENGINE OPTIONS # ============================================================================= class AutoInlineVlmEngineOptions(BaseVlmEngineOptions): """Options for auto-selecting the best local inference engine. Automatically selects the best available local engine based on: - Platform (macOS -> MLX, Linux/Windows -> Transformers/VLLM) - Available hardware (CUDA, MPS, CPU) - Model support """ engine_type: Literal[VlmEngineType.AUTO_INLINE] = VlmEngineType.AUTO_INLINE prefer_vllm: bool = Field( default=False, description="Prefer VLLM over Transformers when both are available on CUDA", ) # ============================================================================= # TRANSFORMERS ENGINE OPTIONS # ============================================================================= class TransformersVlmEngineOptions(BaseVlmEngineOptions): """Options for HuggingFace Transformers inference engine.""" engine_type: Literal[VlmEngineType.TRANSFORMERS] = VlmEngineType.TRANSFORMERS device: Optional[AcceleratorDevice] = Field( default=None, description="Device to use (auto-detected if None)" ) load_in_8bit: bool = Field( default=True, description="Load model in 8-bit precision using bitsandbytes" ) llm_int8_threshold: float = Field( default=6.0, description="Threshold for LLM.int8() quantization" ) quantized: bool = Field( default=False, description="Whether the model is pre-quantized" ) torch_dtype: Optional[str] = Field( default=None, description="PyTorch dtype (e.g., 'float16', 'bfloat16')" ) trust_remote_code: bool = Field( default=False, description="Allow execution of custom code from model repo" ) use_kv_cache: bool = Field( default=True, description="Enable key-value caching for attention" ) compile_model: bool = Field( default_factory=default_compile_model, description="Whether to compile the model with torch.compile() for better performance.", ) # ============================================================================= # MLX ENGINE OPTIONS # ============================================================================= class MlxVlmEngineOptions(BaseVlmEngineOptions): """Options for Apple MLX inference engine (Apple Silicon only).""" engine_type: Literal[VlmEngineType.MLX] = VlmEngineType.MLX trust_remote_code: bool = Field( default=False, description="Allow execution of custom code from model repo" ) # ============================================================================= # VLLM ENGINE OPTIONS # ============================================================================= class VllmVlmEngineOptions(BaseVlmEngineOptions): """Options for vLLM inference engine (high-throughput serving).""" engine_type: Literal[VlmEngineType.VLLM] = VlmEngineType.VLLM device: Optional[AcceleratorDevice] = Field( default=None, description="Device to use (auto-detected if None)" ) tensor_parallel_size: int = Field( default=1, description="Number of GPUs for tensor parallelism" ) gpu_memory_utilization: float = Field( default=0.9, description="Fraction of GPU memory to use" ) trust_remote_code: bool = Field( default=False, description="Allow execution of custom code from model repo" ) # ============================================================================= # API ENGINE OPTIONS # ============================================================================= class ApiVlmEngineOptions(BaseVlmEngineOptions): """Options for API-based VLM services. Supports multiple API variants: - Generic OpenAI-compatible API - Ollama - LM Studio - OpenAI """ engine_type: VlmEngineType = Field( default=VlmEngineType.API, description="API variant to use" ) url: AnyUrl = Field( default=AnyUrl("http://localhost:11434/v1/chat/completions"), description="API endpoint URL", ) headers: Dict[str, str] = Field( default_factory=dict, description="HTTP headers for authentication" ) params: Dict[str, Any] = Field( default_factory=dict, description="Additional API parameters (model, max_tokens, etc.)", ) timeout: float = Field(default=60.0, description="Request timeout in seconds") concurrency: int = Field(default=1, description="Number of concurrent requests") def __init__(self, **data): """Initialize with default URLs based on engine type.""" if "engine_type" in data and "url" not in data: engine_type = data["engine_type"] if engine_type == VlmEngineType.API_OLLAMA: data["url"] = "http://localhost:11434/v1/chat/completions" elif engine_type == VlmEngineType.API_LMSTUDIO: data["url"] = "http://localhost:1234/v1/chat/completions" elif engine_type == VlmEngineType.API_OPENAI: data["url"] = "https://api.openai.com/v1/chat/completions" super().__init__(**data)
{ "repo_id": "docling-project/docling", "file_path": "docling/datamodel/vlm_engine_options.py", "license": "MIT License", "lines": 125, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
docling-project/docling:docling/models/inference_engines/vlm/_utils.py
"""Internal utilities for VLM runtimes. This module contains shared utility functions used across different VLM runtime implementations to avoid code duplication and ensure consistency. """ import logging from pathlib import Path from typing import Any, Callable, Dict, List, Optional, Union import numpy as np from PIL import Image from docling.datamodel.pipeline_options_vlm_model import TransformersPromptStyle from docling.models.utils.generation_utils import GenerationStopper _log = logging.getLogger(__name__) def normalize_image_to_pil(image: Union[Image.Image, np.ndarray]) -> Image.Image: """Convert any image format to RGB PIL Image. Args: image: Input image as PIL Image or numpy array Returns: RGB PIL Image Raises: ValueError: If numpy array has unsupported shape """ # Handle numpy arrays if isinstance(image, np.ndarray): if image.ndim == 3 and image.shape[2] in [3, 4]: # RGB or RGBA array image = Image.fromarray(image.astype(np.uint8)) elif image.ndim == 2: # Grayscale array image = Image.fromarray(image.astype(np.uint8), mode="L") else: raise ValueError(f"Unsupported numpy array shape: {image.shape}") # Ensure RGB mode (handles RGBA, L, P, etc.) if image.mode != "RGB": image = image.convert("RGB") return image def preprocess_image_batch( images: List[Union[Image.Image, np.ndarray]], ) -> List[Image.Image]: """Preprocess a batch of images to RGB PIL Images. Args: images: List of images as PIL Images or numpy arrays Returns: List of RGB PIL Images """ return [normalize_image_to_pil(img) for img in images] def extract_generation_stoppers( extra_config: Dict[str, Any], ) -> List[GenerationStopper]: """Extract and instantiate GenerationStopper instances from config. This handles both GenerationStopper instances and classes, instantiating classes as needed. Args: extra_config: Extra generation configuration dictionary Returns: List of GenerationStopper instances """ stoppers: List[GenerationStopper] = [] custom_criteria = extra_config.get("custom_stopping_criteria", []) for criteria in custom_criteria: if isinstance(criteria, GenerationStopper): # Already an instance stoppers.append(criteria) elif isinstance(criteria, type) and issubclass(criteria, GenerationStopper): # A class - instantiate it stoppers.append(criteria()) # Ignore other types (e.g., HF StoppingCriteria for transformers) return stoppers def resolve_model_artifacts_path( repo_id: str, revision: str, artifacts_path: Optional[Union[Path, str]], download_fn: Callable[[str, str], Path], ) -> Path: """Resolve the path to model artifacts, downloading if needed. This standardizes the logic for finding or downloading model artifacts across different runtimes. Args: repo_id: HuggingFace repository ID (e.g., "microsoft/Phi-3.5-vision-instruct") revision: Model revision (e.g., "main") artifacts_path: Optional path to cached artifacts directory download_fn: Function to download models, takes (repo_id, revision) and returns Path Returns: Path to the model artifacts directory Raises: FileNotFoundError: If artifacts_path is provided but model not found """ repo_cache_folder = repo_id.replace("/", "--") artifacts_path = artifacts_path if artifacts_path is None else Path(artifacts_path) if artifacts_path is None: # No cache path provided - download return download_fn(repo_id, revision) elif (artifacts_path / repo_cache_folder).exists(): # Cache path with repo-specific subfolder exists return artifacts_path / repo_cache_folder else: # Model not found in artifacts_path - raise clear error available_models = [] if artifacts_path.exists(): available_models = [p.name for p in artifacts_path.iterdir() if p.is_dir()] raise FileNotFoundError( f"Model '{repo_id}' not found in artifacts_path.\n" f"Expected location: {artifacts_path / repo_cache_folder}\n" f"Available models in {artifacts_path}: " f"{', '.join(available_models) if available_models else 'none'}\n\n" f"To fix this issue:\n" f" 1. Download the model: docling-tools models download-hf-repo {repo_id}\n" f" 2. Or remove --artifacts-path to enable auto-download\n" f" 3. Or use a different model that exists in your artifacts_path" ) def format_prompt_for_vlm( prompt: str, processor: Any, prompt_style: TransformersPromptStyle, repo_id: Optional[str] = None, ) -> Optional[str]: """Format a prompt according to the specified style. This centralizes prompt formatting logic that was previously duplicated across different model implementations. Args: prompt: User prompt text processor: Model processor with apply_chat_template method prompt_style: Style of prompt formatting to use repo_id: Optional model repository ID for model-specific formatting Returns: Formatted prompt string, or None if prompt_style is NONE """ if prompt_style == TransformersPromptStyle.RAW: return prompt elif prompt_style == TransformersPromptStyle.NONE: return None elif repo_id == "microsoft/Phi-4-multimodal-instruct": # Special handling for Phi-4 _log.debug("Using specialized prompt for Phi-4") user_prompt_prefix = "<|user|>" assistant_prompt = "<|assistant|>" prompt_suffix = "<|end|>" formatted = ( f"{user_prompt_prefix}<|image_1|>{prompt}{prompt_suffix}{assistant_prompt}" ) _log.debug(f"Formatted prompt for {repo_id}: {formatted}") return formatted elif prompt_style == TransformersPromptStyle.CHAT: # Standard chat template with image placeholder messages = [ { "role": "user", "content": [ {"type": "text", "text": "This is a page from a document."}, {"type": "image"}, {"type": "text", "text": prompt}, ], } ] return processor.apply_chat_template(messages, add_generation_prompt=True) else: raise ValueError( f"Unknown prompt style: {prompt_style}. " f"Valid values are {', '.join(s.value for s in TransformersPromptStyle)}" )
{ "repo_id": "docling-project/docling", "file_path": "docling/models/inference_engines/vlm/_utils.py", "license": "MIT License", "lines": 159, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
docling-project/docling:docling/models/inference_engines/vlm/api_openai_compatible_engine.py
"""API-based VLM inference engine for remote services.""" import asyncio import logging import time from concurrent.futures import ThreadPoolExecutor from typing import TYPE_CHECKING, List, Optional from PIL.Image import Image from docling.datamodel.vlm_engine_options import ApiVlmEngineOptions from docling.exceptions import OperationNotAllowed from docling.models.inference_engines.vlm._utils import ( extract_generation_stoppers, preprocess_image_batch, ) from docling.models.inference_engines.vlm.base import ( BaseVlmEngine, VlmEngineInput, VlmEngineOutput, ) from docling.models.utils.generation_utils import GenerationStopper from docling.utils.api_image_request import ( api_image_request, api_image_request_streaming, ) if TYPE_CHECKING: from docling.datamodel.stage_model_specs import EngineModelConfig _log = logging.getLogger(__name__) class ApiVlmEngine(BaseVlmEngine): """API engine for VLM inference via remote services. This runtime supports OpenAI-compatible API endpoints including: - Generic OpenAI-compatible APIs - Ollama - LM Studio - OpenAI """ def __init__( self, enable_remote_services: bool, options: ApiVlmEngineOptions, model_config: Optional["EngineModelConfig"] = None, ): """Initialize the API engine. Args: options: API-specific runtime options model_config: Model configuration (repo_id, revision, extra_config) """ super().__init__(options, model_config=model_config) self.enable_remote_services = enable_remote_services self.options: ApiVlmEngineOptions = options if not self.enable_remote_services: raise OperationNotAllowed( "Connections to remote services is only allowed when set explicitly. " "pipeline_options.enable_remote_services=True." ) # Merge model_config extra_config (which contains API params from model spec) # with runtime options params. Runtime options take precedence. if model_config and "api_params" in model_config.extra_config: # Model spec provides API params (e.g., model name) model_api_params = model_config.extra_config["api_params"] # Only use model spec params if user hasn't provided any params # This prevents conflicts when users provide custom params (e.g., model_id for watsonx) if not self.options.params: self.merged_params = model_api_params.copy() else: # User provided params - use them as-is (don't merge with model spec) self.merged_params = self.options.params.copy() else: self.merged_params = self.options.params.copy() def initialize(self) -> None: """Initialize the API engine. For API runtimes, initialization is minimal - just validate options. """ if self._initialized: return _log.info( f"Initializing API VLM inference engine (endpoint: {self.options.url})" ) # Validate that we have a URL if not self.options.url: raise ValueError("API runtime requires a URL") self._initialized = True _log.info("API runtime initialized") def predict_batch(self, input_batch: List[VlmEngineInput]) -> List[VlmEngineOutput]: """Run inference on a batch of inputs using concurrent API requests. This method processes multiple images concurrently using a thread pool, which can significantly improve throughput for API-based runtimes. Args: input_batch: List of inputs to process Returns: List of outputs, one per input """ if not self._initialized: self.initialize() if not input_batch: return [] def _process_single_input(input_data: VlmEngineInput) -> VlmEngineOutput: """Process a single input via API.""" # Prepare image using shared utility images = preprocess_image_batch([input_data.image]) image = images[0] # Prepare API parameters (use merged params which include model spec params) api_params = { **self.merged_params, "temperature": input_data.temperature, } # Add max_tokens if specified if input_data.max_new_tokens: api_params["max_tokens"] = input_data.max_new_tokens # Add stop strings if specified if input_data.stop_strings: api_params["stop"] = input_data.stop_strings # Extract custom stopping criteria using shared utility custom_stoppers = extract_generation_stoppers( input_data.extra_generation_config ) request_start_time = time.time() stop_reason = "unspecified" if custom_stoppers: # Streaming path with early abort support generated_text, num_tokens = api_image_request_streaming( url=self.options.url, # type: ignore[arg-type] image=image, prompt=input_data.prompt, headers=self.options.headers, generation_stoppers=custom_stoppers, timeout=self.options.timeout, **api_params, ) # Check if stopped by custom criteria for stopper in custom_stoppers: if stopper.should_stop(generated_text): stop_reason = "custom_criteria" break else: # Non-streaming path generated_text, num_tokens, api_stop_reason = api_image_request( url=self.options.url, # type: ignore[arg-type] image=image, prompt=input_data.prompt, headers=self.options.headers, timeout=self.options.timeout, **api_params, ) stop_reason = api_stop_reason generation_time = time.time() - request_start_time return VlmEngineOutput( text=generated_text, stop_reason=stop_reason, metadata={ "generation_time": generation_time, "num_tokens": num_tokens, }, ) # Use ThreadPoolExecutor for concurrent API requests max_workers = min(self.options.concurrency, len(input_batch)) _log.info( f"Processing batch of {len(input_batch)} images with " f"{max_workers} concurrent requests" ) start_time = time.time() with ThreadPoolExecutor(max_workers=max_workers) as executor: # Submit all requests futures = [ executor.submit(_process_single_input, input_data) for input_data in input_batch ] # Collect results in order outputs = [future.result() for future in futures] total_time = time.time() - start_time _log.info( f"Batch processed {len(input_batch)} images in {total_time:.2f}s " f"({total_time / len(input_batch):.2f}s per image, " f"{max_workers} concurrent requests)" ) return outputs def cleanup(self) -> None: """Clean up API runtime resources. For API runtimes, there's nothing to clean up. """ _log.info("API runtime cleaned up")
{ "repo_id": "docling-project/docling", "file_path": "docling/models/inference_engines/vlm/api_openai_compatible_engine.py", "license": "MIT License", "lines": 179, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
docling-project/docling:docling/models/inference_engines/vlm/auto_inline_engine.py
"""Auto-inline VLM inference engine that selects the best local engine.""" import logging import platform from pathlib import Path from typing import TYPE_CHECKING, List, Optional, Union from docling.datamodel.accelerator_options import AcceleratorDevice, AcceleratorOptions from docling.datamodel.vlm_engine_options import ( AutoInlineVlmEngineOptions, MlxVlmEngineOptions, TransformersVlmEngineOptions, VllmVlmEngineOptions, ) from docling.models.inference_engines.vlm.base import ( BaseVlmEngine, VlmEngineInput, VlmEngineOutput, VlmEngineType, ) from docling.utils.accelerator_utils import decide_device if TYPE_CHECKING: from docling.datamodel.stage_model_specs import EngineModelConfig, VlmModelSpec _log = logging.getLogger(__name__) class AutoInlineVlmEngine(BaseVlmEngine): """Auto-selecting engine that picks the best local implementation. Selection logic: 1. On macOS with Apple Silicon (MPS available) -> MLX 2. On Linux/Windows with CUDA and prefer_vllm=True -> vLLM 3. Otherwise -> Transformers This engine delegates to the selected engine after initialization. """ def __init__( self, options: AutoInlineVlmEngineOptions, accelerator_options: AcceleratorOptions, artifacts_path: Optional[Union[Path, str]], model_spec: Optional["VlmModelSpec"] = None, ): """Initialize the auto-inline engine. Args: options: Auto-inline engine options accelerator_options: Hardware accelerator configuration artifacts_path: Path to cached model artifacts model_spec: Model specification (for generating engine-specific configs) """ super().__init__(options, model_config=None) self.options: AutoInlineVlmEngineOptions = options self.accelerator_options = accelerator_options self.artifacts_path = artifacts_path self.model_spec = model_spec # The actual engine will be set during initialization self.actual_engine: Optional[BaseVlmEngine] = None self.selected_engine_type: Optional[VlmEngineType] = None # Initialize immediately if model_spec is provided if self.model_spec is not None: self.initialize() def _select_engine(self) -> VlmEngineType: """Select the best engine based on platform and hardware. Respects model's supported_engines if model_spec is provided. Returns: The selected engine type """ system = platform.system() # Detect available device device = decide_device( self.accelerator_options.device, supported_devices=[ AcceleratorDevice.CPU, AcceleratorDevice.CUDA, AcceleratorDevice.MPS, AcceleratorDevice.XPU, ], ) _log.info(f"Auto-selecting engine for system={system}, device={device}") # macOS with Apple Silicon -> MLX (if explicitly supported) if system == "Darwin" and device == "mps": # Check if model has explicit MLX export has_mlx_export = False if self.model_spec is not None: has_mlx_export = self.model_spec.has_explicit_engine_export( VlmEngineType.MLX ) if has_mlx_export: try: import mlx_vlm _log.info( "Selected MLX engine (Apple Silicon with explicit MLX export)" ) return VlmEngineType.MLX except ImportError: _log.warning( "MLX not available on Apple Silicon, falling back to Transformers" ) else: _log.info( "MLX not selected: no explicit MLX export found for this model " "(no different repo_id in engine_overrides or not in supported_engines). " "Falling back to Transformers." ) # CUDA with prefer_vllm -> vLLM (if supported) if device.startswith("cuda") and self.options.prefer_vllm: # For vLLM, check supported_engines if explicitly set # (vLLM typically uses the same repo_id, so we only check explicit restrictions) has_vllm_support = True if ( self.model_spec is not None and self.model_spec.supported_engines is not None ): has_vllm_support = ( VlmEngineType.VLLM in self.model_spec.supported_engines ) if has_vllm_support: try: import vllm _log.info("Selected vLLM engine (CUDA + prefer_vllm=True)") return VlmEngineType.VLLM except ImportError: _log.warning("vLLM not available, falling back to Transformers") else: _log.info( "vLLM not selected: not in model's supported_engines. " "Falling back to Transformers." ) # Default to Transformers (should always be supported) _log.info("Selected Transformers engine (default)") return VlmEngineType.TRANSFORMERS def initialize(self) -> None: """Initialize by selecting and creating the actual engine.""" if self._initialized: return _log.info("Initializing auto-inline VLM inference engine...") # Select the best engine self.selected_engine_type = self._select_engine() # Generate model_config for the selected engine model_config = None if self.model_spec is not None: model_config = self.model_spec.get_engine_config(self.selected_engine_type) _log.info( f"Generated config for {self.selected_engine_type.value}: " f"repo_id={model_config.repo_id}, extra_config={model_config.extra_config}" ) # Create the actual engine if self.selected_engine_type == VlmEngineType.MLX: from docling.models.inference_engines.vlm.mlx_engine import MlxVlmEngine mlx_options = MlxVlmEngineOptions( trust_remote_code=self.options.trust_remote_code if hasattr(self.options, "trust_remote_code") else False, ) self.actual_engine = MlxVlmEngine( options=mlx_options, artifacts_path=self.artifacts_path, model_config=model_config, ) elif self.selected_engine_type == VlmEngineType.VLLM: from docling.models.inference_engines.vlm.vllm_engine import VllmVlmEngine vllm_options = VllmVlmEngineOptions() self.actual_engine = VllmVlmEngine( options=vllm_options, accelerator_options=self.accelerator_options, artifacts_path=self.artifacts_path, model_config=model_config, ) else: # TRANSFORMERS from docling.models.inference_engines.vlm.transformers_engine import ( TransformersVlmEngine, ) transformers_options = TransformersVlmEngineOptions() self.actual_engine = TransformersVlmEngine( options=transformers_options, accelerator_options=self.accelerator_options, artifacts_path=self.artifacts_path, model_config=model_config, ) # Note: actual_engine.initialize() is called automatically in their __init__ # if model_config is provided self._initialized = True _log.info( f"Auto-inline engine initialized with {self.selected_engine_type.value}" ) def predict_batch(self, input_batch: List[VlmEngineInput]) -> List[VlmEngineOutput]: """Run inference on a batch of inputs using the selected engine. Args: input_batch: List of inputs to process Returns: List of outputs, one per input """ if not self._initialized: self.initialize() assert self.actual_engine is not None, "Engine not initialized" # Delegate to the actual engine's batch implementation return self.actual_engine.predict_batch(input_batch) def cleanup(self) -> None: """Clean up the actual engine resources.""" if self.actual_engine is not None: self.actual_engine.cleanup() self.actual_engine = None _log.info("Auto-inline engine cleaned up")
{ "repo_id": "docling-project/docling", "file_path": "docling/models/inference_engines/vlm/auto_inline_engine.py", "license": "MIT License", "lines": 197, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
docling-project/docling:docling/models/inference_engines/vlm/base.py
"""Base classes for VLM inference engines.""" import logging from abc import ABC, abstractmethod from enum import Enum from typing import ( TYPE_CHECKING, Any, ClassVar, Dict, List, Literal, Optional, Type, get_args, get_origin, ) from PIL.Image import Image from pydantic import BaseModel, ConfigDict, Field, field_validator from pydantic_core import PydanticUndefined if TYPE_CHECKING: from docling.datamodel.stage_model_specs import EngineModelConfig _log = logging.getLogger(__name__) class VlmEngineType(str, Enum): """Types of VLM inference engines available.""" # Local/inline engines TRANSFORMERS = "transformers" MLX = "mlx" VLLM = "vllm" # API-based engines API = "api" API_OLLAMA = "api_ollama" API_LMSTUDIO = "api_lmstudio" API_OPENAI = "api_openai" # Auto-selection AUTO_INLINE = "auto_inline" @classmethod def is_api_variant(cls, engine_type: "VlmEngineType") -> bool: """Check if an engine type is an API variant.""" return engine_type in { cls.API, cls.API_OLLAMA, cls.API_LMSTUDIO, cls.API_OPENAI, } @classmethod def is_inline_variant(cls, engine_type: "VlmEngineType") -> bool: """Check if an engine type is an inline/local variant.""" return engine_type in { cls.TRANSFORMERS, cls.MLX, cls.VLLM, } class BaseVlmEngineOptions(BaseModel): """Base configuration for VLM inference engines. Engine options are independent of model specifications and prompts. They only control how the inference is executed. """ model_config = ConfigDict(arbitrary_types_allowed=True) engine_type: VlmEngineType = Field(description="Type of inference engine to use") # registry: engine_type → subclass _registry: ClassVar[Dict[VlmEngineType, Type["BaseVlmEngineOptions"]]] = {} @classmethod def __pydantic_init_subclass__(cls, **kwargs): super().__pydantic_init_subclass__(**kwargs) # Skip base class itself if cls is BaseVlmEngineOptions: return # only register concrete subclasses that fix engine_type via Literal field = cls.model_fields.get("engine_type") if not field: return engine_type = None # 1. Literal[...] annotation ann = field.annotation if get_origin(ann) is Literal: values = get_args(ann) if len(values) == 1: engine_type = values[0] # 2. Explicit default if engine_type is None and field.default is not PydanticUndefined: engine_type = field.default if engine_type is not None: BaseVlmEngineOptions._registry[engine_type] = cls class VlmEngineOptionsMixin(BaseModel): engine_options: BaseVlmEngineOptions = Field( description="Runtime configuration (transformers, mlx, api, etc.)" ) @field_validator("engine_options", mode="before") @classmethod def resolve_engine_options(cls, value): # already concrete if isinstance(value, BaseVlmEngineOptions): return value # dict / JSON case if isinstance(value, dict): engine_type = value.get("engine_type") model_cls = BaseVlmEngineOptions._registry.get(engine_type) if model_cls: return model_cls.model_validate(value) # Minimal fallback: API variants share one options class if VlmEngineType.is_api_variant(engine_type): from docling.datamodel.vlm_engine_options import ApiVlmEngineOptions return ApiVlmEngineOptions.model_validate(value) return value class VlmEngineInput(BaseModel): """Input to a VLM inference engine. This is the generic interface that all engines accept. """ model_config = ConfigDict(arbitrary_types_allowed=True) image: Image = Field(description="PIL Image to process") prompt: str = Field(description="Text prompt for the model") temperature: float = Field( default=0.0, description="Sampling temperature for generation" ) max_new_tokens: int = Field( default=4096, description="Maximum number of tokens to generate" ) stop_strings: List[str] = Field( default_factory=list, description="Strings that trigger generation stopping" ) extra_generation_config: Dict[str, Any] = Field( default_factory=dict, description="Additional generation configuration" ) class VlmEngineOutput(BaseModel): """Output from a VLM inference engine. This is the generic interface that all engines return. """ text: str = Field(description="Generated text from the model") stop_reason: Optional[str] = Field( default=None, description="Reason why generation stopped" ) metadata: Dict[str, Any] = Field( default_factory=dict, description="Additional metadata from the engine" ) class BaseVlmEngine(ABC): """Abstract base class for VLM inference engines. An engine handles the low-level model inference with generic inputs (PIL images + text prompts) and returns text predictions. Engines are independent of: - Pipeline stages (DoclingDocument, Page objects) - Response formats (doctags, markdown, etc.) But they ARE aware of: - Model specifications (repo_id, revision, model_type via EngineModelConfig) These model specs are provided at construction time for eager initialization. """ def __init__( self, options: BaseVlmEngineOptions, model_config: Optional["EngineModelConfig"] = None, ): """Initialize the engine. Args: options: Engine-specific configuration options model_config: Model configuration (repo_id, revision, extra_config) If None, model must be specified in predict() calls """ self.options = options self.model_config = model_config self._initialized = False @abstractmethod def initialize(self) -> None: """Initialize the engine (load models, setup connections, etc.). This is called once before the first inference. Implementations should set self._initialized = True when done. """ @abstractmethod def predict_batch(self, input_batch: List[VlmEngineInput]) -> List[VlmEngineOutput]: """Run inference on a batch of inputs. This is the primary method that all engines must implement. Single predictions are routed through this method. Args: input_batch: List of inputs to process Returns: List of outputs, one per input """ def predict(self, input_data: VlmEngineInput) -> VlmEngineOutput: """Run inference on a single input. This is a convenience method that wraps the input in a list and calls predict_batch(). Engines should NOT override this method - all inference logic should be in predict_batch(). Args: input_data: Generic input containing image, prompt, and config Returns: Generic output containing generated text and metadata """ if not self._initialized: self.initialize() results = self.predict_batch([input_data]) return results[0] def __call__( self, input_data: VlmEngineInput | List[VlmEngineInput] ) -> VlmEngineOutput | List[VlmEngineOutput]: """Convenience method to run inference. Args: input_data: Single input or list of inputs Returns: Single output or list of outputs """ if not self._initialized: self.initialize() if isinstance(input_data, list): return self.predict_batch(input_data) else: return self.predict(input_data) def cleanup(self) -> None: """Clean up resources (optional). Called when the engine is no longer needed. Implementations can override to release resources. """
{ "repo_id": "docling-project/docling", "file_path": "docling/models/inference_engines/vlm/base.py", "license": "MIT License", "lines": 209, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
docling-project/docling:docling/models/inference_engines/vlm/factory.py
"""Factory for creating VLM inference engines.""" import logging from pathlib import Path from typing import TYPE_CHECKING, Optional, Union from docling.datamodel.accelerator_options import AcceleratorOptions from docling.models.inference_engines.vlm.base import ( BaseVlmEngine, BaseVlmEngineOptions, VlmEngineType, ) if TYPE_CHECKING: from docling.datamodel.stage_model_specs import EngineModelConfig, VlmModelSpec from docling.datamodel.vlm_engine_options import ( ApiVlmEngineOptions, AutoInlineVlmEngineOptions, MlxVlmEngineOptions, TransformersVlmEngineOptions, VllmVlmEngineOptions, ) _log = logging.getLogger(__name__) def create_vlm_engine( *, options: BaseVlmEngineOptions, model_spec: "VlmModelSpec", enable_remote_services: bool, artifacts_path: Optional[Union[Path, str]], accelerator_options: AcceleratorOptions, ) -> BaseVlmEngine: """Create a VLM inference engine from options. Args: options: Engine configuration options model_spec: Model specification (for generating engine-specific configs) Returns: Initialized engine instance Raises: ValueError: If engine type is not supported ImportError: If required dependencies are not installed """ engine_type = options.engine_type # Generate model_config from model_spec if provided model_config: Optional[EngineModelConfig] = None if model_spec is not None and engine_type != VlmEngineType.AUTO_INLINE: # AUTO_INLINE handles model_spec internally model_config = model_spec.get_engine_config(engine_type) # For API engines, add API params to extra_config if VlmEngineType.is_api_variant(engine_type): api_params = model_spec.get_api_params(engine_type) model_config.extra_config["api_params"] = api_params if engine_type == VlmEngineType.AUTO_INLINE: from docling.datamodel.vlm_engine_options import AutoInlineVlmEngineOptions from docling.models.inference_engines.vlm.auto_inline_engine import ( AutoInlineVlmEngine, ) if not isinstance(options, AutoInlineVlmEngineOptions): raise ValueError( f"Expected AutoInlineVlmEngineOptions, got {type(options)}" ) return AutoInlineVlmEngine( options, model_spec=model_spec, artifacts_path=artifacts_path, accelerator_options=accelerator_options, ) elif engine_type == VlmEngineType.TRANSFORMERS: from docling.datamodel.vlm_engine_options import TransformersVlmEngineOptions from docling.models.inference_engines.vlm.transformers_engine import ( TransformersVlmEngine, ) if not isinstance(options, TransformersVlmEngineOptions): raise ValueError( f"Expected TransformersVlmEngineOptions, got {type(options)}" ) return TransformersVlmEngine( options, model_config=model_config, artifacts_path=artifacts_path, accelerator_options=accelerator_options, ) elif engine_type == VlmEngineType.MLX: from docling.datamodel.vlm_engine_options import MlxVlmEngineOptions from docling.models.inference_engines.vlm.mlx_engine import MlxVlmEngine if not isinstance(options, MlxVlmEngineOptions): raise ValueError(f"Expected MlxVlmEngineOptions, got {type(options)}") return MlxVlmEngine( options, model_config=model_config, artifacts_path=artifacts_path ) elif engine_type == VlmEngineType.VLLM: from docling.datamodel.vlm_engine_options import VllmVlmEngineOptions from docling.models.inference_engines.vlm.vllm_engine import VllmVlmEngine if not isinstance(options, VllmVlmEngineOptions): raise ValueError(f"Expected VllmVlmEngineOptions, got {type(options)}") return VllmVlmEngine( options, model_config=model_config, artifacts_path=artifacts_path, accelerator_options=accelerator_options, ) elif VlmEngineType.is_api_variant(engine_type): from docling.datamodel.vlm_engine_options import ApiVlmEngineOptions from docling.models.inference_engines.vlm.api_openai_compatible_engine import ( ApiVlmEngine, ) if not isinstance(options, ApiVlmEngineOptions): raise ValueError(f"Expected ApiVlmEngineOptions, got {type(options)}") return ApiVlmEngine( enable_remote_services=enable_remote_services, options=options, model_config=model_config, ) else: raise ValueError(f"Unsupported engine type: {engine_type}")
{ "repo_id": "docling-project/docling", "file_path": "docling/models/inference_engines/vlm/factory.py", "license": "MIT License", "lines": 111, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
docling-project/docling:docling/models/inference_engines/vlm/mlx_engine.py
"""MLX-based VLM inference engine for Apple Silicon.""" import logging import threading import time from pathlib import Path from typing import TYPE_CHECKING, Any, Callable, List, Optional, Union from PIL.Image import Image from docling.datamodel.vlm_engine_options import MlxVlmEngineOptions from docling.models.inference_engines.vlm._utils import ( extract_generation_stoppers, preprocess_image_batch, ) from docling.models.inference_engines.vlm.base import ( BaseVlmEngine, VlmEngineInput, VlmEngineOutput, ) from docling.models.utils.generation_utils import GenerationStopper from docling.models.utils.hf_model_download import HuggingFaceModelDownloadMixin if TYPE_CHECKING: from docling.datamodel.stage_model_specs import EngineModelConfig _log = logging.getLogger(__name__) # Global lock for MLX model calls - MLX models are not thread-safe # All MLX models share this lock to prevent concurrent MLX operations _MLX_GLOBAL_LOCK = threading.Lock() class MlxVlmEngine(BaseVlmEngine, HuggingFaceModelDownloadMixin): """MLX engine for VLM inference on Apple Silicon. This engine uses the mlx-vlm library to run vision-language models efficiently on Apple Silicon (M1/M2/M3) using the Metal Performance Shaders. Note: MLX models are not thread-safe and use a global lock. """ def __init__( self, options: MlxVlmEngineOptions, artifacts_path: Optional[Union[Path, str]], model_config: Optional["EngineModelConfig"] = None, ): """Initialize the MLX engine. Args: options: MLX-specific runtime options artifacts_path: Path to cached model artifacts model_config: Model configuration (repo_id, revision, extra_config) """ super().__init__(options, model_config=model_config) self.options: MlxVlmEngineOptions = options self.artifacts_path = ( artifacts_path if artifacts_path is None else Path(artifacts_path) ) # These will be set during initialization # MLX types are complex and external, using Any with type: ignore self.vlm_model: Any = None self.processor: Any = None self.config: Any = None self.apply_chat_template: Any = None self.stream_generate: Any = None # Initialize immediately if model_config is provided if self.model_config is not None: self.initialize() def initialize(self) -> None: """Initialize the MLX model and processor.""" if self._initialized: return _log.info("Initializing MLX VLM inference engine...") try: from mlx_vlm import load, stream_generate from mlx_vlm.prompt_utils import apply_chat_template from mlx_vlm.utils import load_config except ImportError: raise ImportError( "mlx-vlm is not installed. Please install it via `pip install mlx-vlm` " "to use MLX VLM models on Apple Silicon." ) self.apply_chat_template = apply_chat_template # type: ignore[assignment] self.stream_generate = stream_generate # type: ignore[assignment] # Load model if model_config is provided if self.model_config is not None and self.model_config.repo_id is not None: repo_id = self.model_config.repo_id revision = self.model_config.revision or "main" _log.info(f"Loading MLX model {repo_id} (revision: {revision})") self._load_model_for_repo(repo_id, revision=revision) self._initialized = True _log.info("MLX runtime initialized") def _load_model_for_repo(self, repo_id: str, revision: str = "main") -> None: """Load model and processor for a specific repository. Args: repo_id: HuggingFace repository ID revision: Model revision """ from mlx_vlm import load from mlx_vlm.utils import load_config # Download or locate model artifacts repo_cache_folder = repo_id.replace("/", "--") if self.artifacts_path is None: artifacts_path = self.download_models(repo_id, revision=revision) elif (self.artifacts_path / repo_cache_folder).exists(): artifacts_path = self.artifacts_path / repo_cache_folder else: # Model not found in artifacts_path - raise clear error available_models = [] if self.artifacts_path.exists(): available_models = [ p.name for p in self.artifacts_path.iterdir() if p.is_dir() ] raise FileNotFoundError( f"Model '{repo_id}' not found in artifacts_path.\n" f"Expected location: {self.artifacts_path / repo_cache_folder}\n" f"Available models in {self.artifacts_path}: " f"{', '.join(available_models) if available_models else 'none'}\n\n" f"To fix this issue:\n" f" 1. Download the model: docling-tools models download-hf-repo {repo_id}\n" f" 2. Or remove --artifacts-path to enable auto-download\n" f" 3. Or use a different model that exists in your artifacts_path" ) # Load the model self.vlm_model, self.processor = load(artifacts_path) self.config = load_config(artifacts_path) _log.info(f"Loaded MLX model {repo_id} (revision: {revision})") def predict_batch(self, input_batch: List[VlmEngineInput]) -> List[VlmEngineOutput]: """Run inference on a batch of inputs. Note: MLX models are not thread-safe and use a global lock, so batch processing is done sequentially. This method is provided for API consistency but does not provide performance benefits over sequential processing. Args: input_batch: List of inputs to process Returns: List of outputs, one per input """ if not self._initialized: self.initialize() if not input_batch: return [] # Model should already be loaded via initialize() if self.vlm_model is None or self.processor is None or self.config is None: raise RuntimeError( "Model not loaded. Ensure EngineModelConfig was provided during initialization." ) _log.debug( f"MLX runtime processing batch of {len(input_batch)} images sequentially " "(MLX does not support batched inference)" ) outputs: List[VlmEngineOutput] = [] # MLX models are not thread-safe - use global lock to serialize access with _MLX_GLOBAL_LOCK: _log.debug("MLX model: Acquired global lock for thread safety") for input_data in input_batch: # Preprocess image images = preprocess_image_batch([input_data.image]) image = images[0] # Format prompt using MLX's chat template formatted_prompt = self.apply_chat_template( self.processor, self.config, input_data.prompt, num_images=1 ) # Extract custom stopping criteria custom_stoppers = extract_generation_stoppers( input_data.extra_generation_config ) # Stream generate with stop strings and custom stopping criteria support start_time = time.time() _log.debug("Starting MLX generation...") output_text = "" stop_reason = "unspecified" # Use stream_generate for proper stop string handling for token in self.stream_generate( self.vlm_model, self.processor, formatted_prompt, [image], # MLX stream_generate expects list of images max_tokens=input_data.max_new_tokens, verbose=False, temp=input_data.temperature, ): output_text += token.text # Check for configured stop strings if input_data.stop_strings: if any( stop_str in output_text for stop_str in input_data.stop_strings ): _log.debug("Stopping generation due to stop string match") stop_reason = "stop_string" break # Check for custom stopping criteria if custom_stoppers: for stopper in custom_stoppers: # Determine the text window to check based on lookback_tokens lookback_tokens = stopper.lookback_tokens() text_to_check = ( output_text[-lookback_tokens:] if len(output_text) > lookback_tokens else output_text ) try: if stopper.should_stop(text_to_check): _log.info( f"Stopping generation due to GenerationStopper: {type(stopper).__name__}" ) stop_reason = "custom_criteria" break except Exception as e: _log.warning( f"Error in GenerationStopper.should_stop: {e}" ) continue else: # for-else: only executed if inner loop didn't break continue # Break outer loop if any stopper triggered break generation_time = time.time() - start_time _log.debug( f"MLX generation completed in {generation_time:.2f}s, " f"stop_reason: {stop_reason}" ) # Create output outputs.append( VlmEngineOutput( text=output_text, stop_reason=stop_reason, metadata={ "generation_time": generation_time, "model": self.model_config.repo_id if self.model_config else "unknown", }, ) ) _log.debug("MLX model: Released global lock") return outputs def cleanup(self) -> None: """Clean up model resources.""" if self.vlm_model is not None: del self.vlm_model self.vlm_model = None if self.processor is not None: del self.processor self.processor = None _log.info("MLX runtime cleaned up")
{ "repo_id": "docling-project/docling", "file_path": "docling/models/inference_engines/vlm/mlx_engine.py", "license": "MIT License", "lines": 238, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
docling-project/docling:docling/models/inference_engines/vlm/transformers_engine.py
"""Transformers-based VLM inference engine.""" import importlib.metadata import logging import sys import time from pathlib import Path from typing import TYPE_CHECKING, Any, Callable, List, Optional, Union import torch from packaging import version from PIL.Image import Image from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForImageTextToText, AutoModelForVision2Seq, AutoProcessor, BitsAndBytesConfig, GenerationConfig, PreTrainedModel, ProcessorMixin, StoppingCriteriaList, StopStringCriteria, ) from docling.datamodel.accelerator_options import AcceleratorDevice, AcceleratorOptions from docling.datamodel.pipeline_options_vlm_model import ( TransformersModelType, TransformersPromptStyle, ) from docling.datamodel.vlm_engine_options import TransformersVlmEngineOptions from docling.models.inference_engines.vlm._utils import ( extract_generation_stoppers, preprocess_image_batch, resolve_model_artifacts_path, ) from docling.models.inference_engines.vlm.base import ( BaseVlmEngine, VlmEngineInput, VlmEngineOutput, ) from docling.models.utils.generation_utils import ( GenerationStopper, HFStoppingCriteriaWrapper, ) from docling.models.utils.hf_model_download import HuggingFaceModelDownloadMixin from docling.utils.accelerator_utils import decide_device if TYPE_CHECKING: from docling.datamodel.stage_model_specs import EngineModelConfig _log = logging.getLogger(__name__) class TransformersVlmEngine(BaseVlmEngine, HuggingFaceModelDownloadMixin): """HuggingFace Transformers engine for VLM inference. This engine uses the transformers library to run vision-language models locally on CPU, CUDA, or XPU devices. """ def __init__( self, options: TransformersVlmEngineOptions, accelerator_options: AcceleratorOptions, artifacts_path: Optional[Union[Path, str]], model_config: Optional["EngineModelConfig"] = None, ): """Initialize the Transformers engine. Args: options: Transformers-specific runtime options accelerator_options: Hardware accelerator configuration artifacts_path: Path to cached model artifacts model_config: Model configuration (repo_id, revision, extra_config) """ super().__init__(options, model_config=model_config) self.options: TransformersVlmEngineOptions = options self.accelerator_options = accelerator_options self.artifacts_path = artifacts_path # These will be set during initialization self.device: Optional[str] = None self.processor: Optional[ProcessorMixin] = None self.vlm_model: Optional[PreTrainedModel] = None self.generation_config: Optional[GenerationConfig] = None # Initialize immediately if model_config is provided if self.model_config is not None: self.initialize() def initialize(self) -> None: """Initialize the Transformers model and processor.""" if self._initialized: return _log.info("Initializing Transformers VLM inference engine...") # Determine device supported_devices = [ AcceleratorDevice.CPU, AcceleratorDevice.CUDA, AcceleratorDevice.XPU, ] self.device = decide_device( self.options.device or self.accelerator_options.device, supported_devices=supported_devices, ) _log.info(f"Using device: {self.device}") # Load model if model_config is provided if self.model_config is not None and self.model_config.repo_id is not None: repo_id = self.model_config.repo_id revision = self.model_config.revision or "main" # Get model_type from extra_config model_type = self.model_config.extra_config.get( "transformers_model_type", TransformersModelType.AUTOMODEL, ) _log.info( f"Loading model {repo_id} (revision: {revision}, " f"model_type: {model_type.value})" ) self._load_model_for_repo(repo_id, revision=revision, model_type=model_type) self._initialized = True def _load_model_for_repo( self, repo_id: str, revision: str = "main", model_type: TransformersModelType = TransformersModelType.AUTOMODEL, ) -> None: """Load model and processor for a specific repository. Args: repo_id: HuggingFace repository ID revision: Model revision model_type: Type of model architecture """ # Check for Phi-4 compatibility transformers_version = importlib.metadata.version("transformers") if ( repo_id == "microsoft/Phi-4-multimodal-instruct" and transformers_version >= "4.52.0" ): raise NotImplementedError( f"Phi 4 only works with transformers<4.52.0 but you have {transformers_version=}. " f"Please downgrade by running: pip install -U 'transformers<4.52.0'" ) # Download or locate model artifacts using shared utility def download_wrapper(repo_id: str, revision: str) -> Path: return self.download_models(repo_id, revision=revision) artifacts_path = resolve_model_artifacts_path( repo_id=repo_id, revision=revision, artifacts_path=self.artifacts_path, download_fn=download_wrapper, ) # Setup quantization if needed quantization_config: Optional[BitsAndBytesConfig] = None if self.options.quantized: quantization_config = BitsAndBytesConfig( load_in_8bit=self.options.load_in_8bit, llm_int8_threshold=self.options.llm_int8_threshold, ) # Select model class model_cls: type[ Union[ AutoModel, AutoModelForCausalLM, AutoModelForVision2Seq, AutoModelForImageTextToText, ] ] = AutoModel if model_type == TransformersModelType.AUTOMODEL_CAUSALLM: model_cls = AutoModelForCausalLM # type: ignore[assignment] elif model_type == TransformersModelType.AUTOMODEL_VISION2SEQ: model_cls = AutoModelForVision2Seq # type: ignore[assignment] elif model_type == TransformersModelType.AUTOMODEL_IMAGETEXTTOTEXT: model_cls = AutoModelForImageTextToText # type: ignore[assignment] # Load processor self.processor = AutoProcessor.from_pretrained( artifacts_path, trust_remote_code=self.options.trust_remote_code, revision=revision, ) self.processor.tokenizer.padding_side = "left" # type: ignore[union-attr] # Load model self.vlm_model = model_cls.from_pretrained( artifacts_path, device_map=self.device, dtype=self.options.torch_dtype, _attn_implementation=( "flash_attention_2" if self.device.startswith("cuda") # type: ignore[union-attr] and self.accelerator_options.cuda_use_flash_attention2 else "sdpa" ), trust_remote_code=self.options.trust_remote_code, revision=revision, quantization_config=quantization_config, ) self.vlm_model.eval() # Optionally compile model for better performance (model must be in eval mode first) # Works for Python < 3.14 with any torch 2.x # Works for Python >= 3.14 with torch >= 2.10 if self.options.compile_model: if sys.version_info < (3, 14): self.vlm_model = torch.compile(self.vlm_model) # type: ignore[assignment] elif version.parse(torch.__version__) >= version.parse("2.10"): self.vlm_model = torch.compile(self.vlm_model) # type: ignore[assignment] else: _log.warning( "Model compilation requested but not available " "(requires Python < 3.14 or torch >= 2.10 for Python 3.14+)" ) # Load generation config self.generation_config = GenerationConfig.from_pretrained( artifacts_path, revision=revision ) _log.info(f"Loaded model {repo_id} (revision: {revision})") def predict_batch(self, input_batch: List[VlmEngineInput]) -> List[VlmEngineOutput]: """Run inference on a batch of inputs efficiently. This method processes multiple images in a single forward pass, which is much more efficient than processing them sequentially. Args: input_batch: List of inputs to process Returns: List of outputs, one per input """ if not self._initialized: self.initialize() if not input_batch: return [] # Model should already be loaded via initialize() if self.vlm_model is None or self.processor is None: raise RuntimeError( "Model not loaded. Ensure EngineModelConfig was provided during initialization." ) # Get prompt style from first input's extra config first_input = input_batch[0] prompt_style = first_input.extra_generation_config.get( "transformers_prompt_style", TransformersPromptStyle.CHAT, ) # Prepare images using shared utility images = preprocess_image_batch([inp.image for inp in input_batch]) # Prepare prompts prompts = [] for input_data in input_batch: # Format prompt if prompt_style == TransformersPromptStyle.CHAT: # Use structured message format with image placeholder (like legacy implementation) # This is required for vision models like Granite Vision to properly tokenize # both image features and text tokens messages = [ { "role": "user", "content": [ {"type": "image"}, {"type": "text", "text": input_data.prompt}, ], } ] formatted_prompt = self.processor.apply_chat_template( # type: ignore[union-attr] messages, tokenize=False, add_generation_prompt=True, ) elif prompt_style == TransformersPromptStyle.RAW: formatted_prompt = input_data.prompt else: # NONE formatted_prompt = None prompts.append(formatted_prompt) # Process batch if prompt_style == TransformersPromptStyle.NONE: inputs = self.processor( # type: ignore[misc] images, return_tensors="pt", padding=True, **first_input.extra_generation_config.get("extra_processor_kwargs", {}), ) else: inputs = self.processor( # type: ignore[misc] text=prompts, images=images, return_tensors="pt", padding=True, **first_input.extra_generation_config.get("extra_processor_kwargs", {}), ) inputs = {k: v.to(self.device) for k, v in inputs.items()} # Setup stopping criteria (use first input's config) stopping_criteria_list = StoppingCriteriaList() if first_input.stop_strings: stopping_criteria_list.append( StopStringCriteria( stop_strings=first_input.stop_strings, tokenizer=self.processor.tokenizer, # type: ignore[union-attr,attr-defined] ) ) # Add custom stopping criteria using shared utility custom_stoppers = extract_generation_stoppers( first_input.extra_generation_config ) for stopper in custom_stoppers: wrapped_criteria = HFStoppingCriteriaWrapper( self.processor.tokenizer, # type: ignore[union-attr,attr-defined] stopper, ) stopping_criteria_list.append(wrapped_criteria) # Also handle any HF StoppingCriteria directly passed custom_criteria = first_input.extra_generation_config.get( "custom_stopping_criteria", [] ) for criteria in custom_criteria: # Skip GenerationStopper instances (already handled above) if not isinstance(criteria, GenerationStopper) and not ( isinstance(criteria, type) and issubclass(criteria, GenerationStopper) ): stopping_criteria_list.append(criteria) # Filter decoder-specific keys decoder_keys = { "skip_special_tokens", "clean_up_tokenization_spaces", "spaces_between_special_tokens", } generation_config = { k: v for k, v in first_input.extra_generation_config.items() if k not in decoder_keys and k not in { "transformers_model_type", "transformers_prompt_style", "extra_processor_kwargs", "custom_stopping_criteria", "revision", } } decoder_config = { k: v for k, v in first_input.extra_generation_config.items() if k in decoder_keys } # Generate gen_kwargs = { **inputs, "max_new_tokens": first_input.max_new_tokens, "use_cache": self.options.use_kv_cache, "generation_config": self.generation_config, **generation_config, } if first_input.temperature > 0: gen_kwargs["do_sample"] = True gen_kwargs["temperature"] = first_input.temperature else: gen_kwargs["do_sample"] = False if stopping_criteria_list: gen_kwargs["stopping_criteria"] = stopping_criteria_list start_time = time.time() with torch.inference_mode(): generated_ids = self.vlm_model.generate(**gen_kwargs) # type: ignore[union-attr,operator] generation_time = time.time() - start_time # Decode input_len = inputs["input_ids"].shape[1] trimmed_sequences = generated_ids[:, input_len:] decode_fn = getattr(self.processor, "batch_decode", None) if decode_fn is None and hasattr(self.processor, "tokenizer"): decode_fn = self.processor.tokenizer.batch_decode # type: ignore[union-attr] if decode_fn is None: raise RuntimeError( "Neither processor.batch_decode nor tokenizer.batch_decode is available." ) decoded_texts = decode_fn(trimmed_sequences, **decoder_config) # Remove padding pad_token = self.processor.tokenizer.pad_token # type: ignore[union-attr,attr-defined] if pad_token: decoded_texts = [text.rstrip(pad_token) for text in decoded_texts] # Create outputs outputs = [] for i, text in enumerate(decoded_texts): outputs.append( VlmEngineOutput( text=text, stop_reason="unspecified", metadata={ "generation_time": generation_time / len(input_batch), "num_tokens": int(generated_ids[i].shape[0]) if i < generated_ids.shape[0] else None, "batch_size": len(input_batch), }, ) ) _log.info( f"Batch processed {len(input_batch)} images in {generation_time:.2f}s " f"({generation_time / len(input_batch):.2f}s per image)" ) return outputs def cleanup(self) -> None: """Clean up model resources.""" if self.vlm_model is not None: del self.vlm_model self.vlm_model = None if self.processor is not None: del self.processor self.processor = None # Clear CUDA cache if using GPU if self.device and self.device.startswith("cuda"): torch.cuda.empty_cache() _log.info("Transformers runtime cleaned up")
{ "repo_id": "docling-project/docling", "file_path": "docling/models/inference_engines/vlm/transformers_engine.py", "license": "MIT License", "lines": 394, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
docling-project/docling:docling/models/inference_engines/vlm/vllm_engine.py
"""vLLM-based VLM inference engine for high-throughput serving.""" import logging import sys import time from pathlib import Path from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union from docling.datamodel.accelerator_options import AcceleratorDevice, AcceleratorOptions from docling.datamodel.pipeline_options_vlm_model import TransformersPromptStyle from docling.datamodel.vlm_engine_options import VllmVlmEngineOptions from docling.models.inference_engines.vlm._utils import ( format_prompt_for_vlm, preprocess_image_batch, resolve_model_artifacts_path, ) from docling.models.inference_engines.vlm.base import ( BaseVlmEngine, VlmEngineInput, VlmEngineOutput, ) from docling.utils.accelerator_utils import decide_device if TYPE_CHECKING: from docling.datamodel.stage_model_specs import EngineModelConfig _log = logging.getLogger(__name__) class VllmVlmEngine(BaseVlmEngine): """vLLM engine for high-throughput VLM inference. This engine uses the vLLM library for efficient batched inference on CUDA and XPU devices. """ # Allowlist of vLLM SamplingParams arguments (runtime generation controls) _VLLM_SAMPLING_KEYS = { # Core "max_tokens", "temperature", "top_p", "top_k", # Penalties "presence_penalty", "frequency_penalty", "repetition_penalty", # Stops / outputs "stop", "stop_token_ids", "skip_special_tokens", "spaces_between_special_tokens", # Search / length "n", "best_of", "length_penalty", "early_stopping", # Misc "logprobs", "prompt_logprobs", "min_p", "seed", } # Allowlist of vLLM LLM/EngineArgs arguments (engine/load-time controls) _VLLM_ENGINE_KEYS = { # Model/tokenizer/impl "tokenizer", "tokenizer_mode", "download_dir", # Parallelism / memory / lengths "tensor_parallel_size", "pipeline_parallel_size", "gpu_memory_utilization", "max_model_len", "max_num_batched_tokens", "kv_cache_dtype", "dtype", # Quantization "quantization", # Multimodal limits "limit_mm_per_prompt", # Execution toggles "enforce_eager", } def __init__( self, options: VllmVlmEngineOptions, accelerator_options: AcceleratorOptions, artifacts_path: Optional[Union[Path, str]], model_config: Optional["EngineModelConfig"] = None, ): """Initialize the vLLM engine. Args: options: vLLM-specific runtime options accelerator_options: Hardware accelerator configuration artifacts_path: Path to cached model artifacts model_config: Model configuration (repo_id, revision, extra_config) """ super().__init__(options, model_config=model_config) self.options: VllmVlmEngineOptions = options self.accelerator_options = accelerator_options self.artifacts_path = artifacts_path # These will be set during initialization self.device: Optional[str] = None self.llm: Any = None self.sampling_params: Any = None self.processor: Any = None # Initialize immediately if model_config is provided if self.model_config is not None: self.initialize() def initialize(self) -> None: """Initialize the vLLM engine.""" if self._initialized: return _log.info("Initializing vLLM VLM inference engine...") try: from transformers import AutoProcessor from vllm import LLM, SamplingParams except ImportError: if sys.version_info < (3, 14): raise ImportError( "vLLM is not installed. Please install it via `pip install vllm` " "to use vLLM for high-throughput VLM inference." ) else: raise ImportError( "vLLM is not installed. It is not yet available on Python 3.14." ) # Determine device supported_devices = [ AcceleratorDevice.CPU, AcceleratorDevice.CUDA, AcceleratorDevice.XPU, ] self.device = decide_device( self.options.device or self.accelerator_options.device, supported_devices=supported_devices, ) _log.info(f"Using device: {self.device}") # Load model if model_config is provided if self.model_config is not None and self.model_config.repo_id is not None: repo_id = self.model_config.repo_id revision = self.model_config.revision or "main" _log.info(f"Loading vLLM model {repo_id} (revision: {revision})") # Resolve artifacts path from docling.models.utils.hf_model_download import ( HuggingFaceModelDownloadMixin, ) # Create a temporary mixin instance for downloading downloader = type( "Downloader", (HuggingFaceModelDownloadMixin,), {}, )() # Wrapper to match expected signature def download_wrapper(repo_id: str, revision: str) -> Path: return downloader.download_models(repo_id, revision=revision) artifacts_path = resolve_model_artifacts_path( repo_id=repo_id, revision=revision, artifacts_path=self.artifacts_path, download_fn=download_wrapper, ) # Split extra_generation_config into engine and sampling kwargs extra_cfg = self.model_config.extra_config load_cfg = { k: v for k, v in extra_cfg.items() if k in self._VLLM_ENGINE_KEYS } gen_cfg = { k: v for k, v in extra_cfg.items() if k in self._VLLM_SAMPLING_KEYS } unknown = sorted( k for k in extra_cfg.keys() if k not in self._VLLM_ENGINE_KEYS and k not in self._VLLM_SAMPLING_KEYS ) if unknown: _log.warning("Ignoring unknown extra_config keys for vLLM: %s", unknown) # Construct LLM kwargs (engine/load-time) llm_kwargs: Dict[str, Any] = { "model": str(artifacts_path), "model_impl": "transformers", "limit_mm_per_prompt": {"image": 1}, "revision": revision, "trust_remote_code": self.options.trust_remote_code, **load_cfg, } if self.device == "cpu": llm_kwargs.setdefault("enforce_eager", True) else: # Use configured gpu_memory_utilization or default llm_kwargs.setdefault( "gpu_memory_utilization", self.options.gpu_memory_utilization ) # Quantization support (if specified in extra_config) if "quantization" in extra_cfg: llm_kwargs.setdefault("quantization", extra_cfg["quantization"]) # Initialize vLLM LLM self.llm = LLM(**llm_kwargs) # Initialize processor for prompt templating self.processor = AutoProcessor.from_pretrained( artifacts_path, trust_remote_code=self.options.trust_remote_code, revision=revision, ) # Create default SamplingParams (will be overridden per-batch in predict_batch) # Use reasonable defaults since these come from input data self.sampling_params = SamplingParams( temperature=0.0, max_tokens=4096, **gen_cfg, ) _log.info(f"Loaded vLLM model {repo_id} (revision: {revision})") self._initialized = True _log.info("vLLM runtime initialized") def predict_batch(self, input_batch: List[VlmEngineInput]) -> List[VlmEngineOutput]: """Run inference on a batch of inputs using vLLM. This method processes multiple images in a single batched vLLM call, which is much more efficient than processing them sequentially. Args: input_batch: List of inputs to process Returns: List of outputs, one per input """ if not self._initialized: self.initialize() if not input_batch: return [] # Model should already be loaded via initialize() if self.llm is None or self.processor is None or self.sampling_params is None: raise RuntimeError( "Model not loaded. Ensure EngineModelConfig was provided during initialization." ) # Preprocess images images = preprocess_image_batch([inp.image for inp in input_batch]) # Get prompt style from first input's extra config first_input = input_batch[0] prompt_style = first_input.extra_generation_config.get( "transformers_prompt_style", TransformersPromptStyle.CHAT, ) # Format prompts prompts: List[Optional[str]] = [] for input_data in input_batch: formatted_prompt = format_prompt_for_vlm( prompt=input_data.prompt, processor=self.processor, prompt_style=prompt_style, repo_id=self.model_config.repo_id if self.model_config else None, ) prompts.append(formatted_prompt) # Build vLLM inputs llm_inputs = [ {"prompt": p, "multi_modal_data": {"image": im}} for p, im in zip(prompts, images) ] # Update sampling params with input-specific settings from vllm import SamplingParams # Use first input's settings for the batch sampling_params = SamplingParams( temperature=first_input.temperature, max_tokens=first_input.max_new_tokens, stop=first_input.stop_strings or None, **{ k: v for k, v in first_input.extra_generation_config.items() if k in self._VLLM_SAMPLING_KEYS }, ) # Generate start_time = time.time() outputs = self.llm.generate(llm_inputs, sampling_params=sampling_params) generation_time = time.time() - start_time _log.debug( f"vLLM generated {len(outputs)} outputs in {generation_time:.2f}s " f"({len(outputs) / generation_time:.1f} outputs/sec)" ) # Create output objects results: List[VlmEngineOutput] = [] for i, output in enumerate(outputs): text = output.outputs[0].text if output.outputs else "" stop_reason = ( "end_of_sequence" if output.outputs[0].stop_reason else "length" ) num_tokens = len(output.outputs[0].token_ids) if output.outputs else 0 results.append( VlmEngineOutput( text=text, stop_reason=stop_reason, metadata={ "generation_time": generation_time / len(input_batch), "num_tokens": num_tokens, "batch_size": len(input_batch), "model": self.model_config.repo_id if self.model_config else "unknown", }, ) ) return results def cleanup(self) -> None: """Clean up vLLM resources.""" if self.llm is not None: del self.llm self.llm = None if self.processor is not None: del self.processor self.processor = None _log.info("vLLM runtime cleaned up")
{ "repo_id": "docling-project/docling", "file_path": "docling/models/inference_engines/vlm/vllm_engine.py", "license": "MIT License", "lines": 301, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
docling-project/docling:docling/models/stages/code_formula/code_formula_vlm_model.py
"""Code and formula extraction stage using the new VLM runtime system. This module provides a runtime-agnostic code and formula extraction stage that can use any VLM runtime (Transformers, MLX, API, etc.) through the unified runtime interface. """ import logging import re from collections.abc import Iterable from pathlib import Path from typing import List, Optional, Tuple, Union import numpy as np from docling_core.types.doc import ( CodeItem, DocItemLabel, DoclingDocument, NodeItem, TextItem, ) from docling_core.types.doc.labels import CodeLanguageLabel from PIL import Image from docling.datamodel.accelerator_options import AcceleratorOptions from docling.datamodel.base_models import ItemAndImageEnrichmentElement from docling.datamodel.pipeline_options import CodeFormulaVlmOptions from docling.models.base_model import BaseItemAndImageEnrichmentModel from docling.models.inference_engines.vlm import ( BaseVlmEngine, VlmEngineInput, create_vlm_engine, ) _log = logging.getLogger(__name__) class CodeFormulaVlmModel(BaseItemAndImageEnrichmentModel): """Code and formula extraction stage using the new runtime system. This stage uses the unified VLM runtime interface to extract code and formulas from document elements. It supports all runtime types (Transformers, MLX, API, etc.) through the runtime factory. The stage: 1. Filters code and formula elements 2. Uses the runtime to extract text content 3. Post-processes outputs (language detection for code, cleanup) 4. Updates element text and metadata Example: ```python from docling.datamodel.pipeline_options import CodeFormulaVlmOptions # Use preset with default runtime options = CodeFormulaVlmOptions.from_preset("codeformulav2") # Create stage stage = CodeFormulaVlmModel( enabled=True, artifacts_path=None, options=options, accelerator_options=AcceleratorOptions(), ) ``` """ elements_batch_size = 5 images_scale = 1.67 # = 120 dpi, aligned with training data resolution expansion_factor = 0.18 def __init__( self, enabled: bool, enable_remote_services: bool, artifacts_path: Optional[Union[Path, str]], options: CodeFormulaVlmOptions, accelerator_options: AcceleratorOptions, ): """Initialize the code/formula extraction stage. Args: enabled: Whether this stage is enabled artifacts_path: Path to model artifacts (optional) options: Configuration options including model spec and runtime options accelerator_options: Hardware acceleration options """ self.enabled = enabled self.options = options self.engine: Optional[BaseVlmEngine] = None if self.enabled: # New runtime system path engine_type = self.options.engine_options.engine_type # Get model configuration for this engine self.repo_id = self.options.model_spec.get_repo_id(engine_type) self.revision = self.options.model_spec.get_revision(engine_type) _log.info( f"Initializing CodeFormulaVlmModel with runtime system: " f"model={self.repo_id}, " f"engine={engine_type.value}" ) # Create engine using factory self.engine = create_vlm_engine( options=self.options.engine_options, model_spec=self.options.model_spec, accelerator_options=accelerator_options, artifacts_path=artifacts_path, enable_remote_services=enable_remote_services, ) _log.info("CodeFormulaVlmModel initialized successfully") def is_processable(self, doc: DoclingDocument, element: NodeItem) -> bool: """Determine if an element can be processed by this stage. Args: doc: The document being processed element: The element to check Returns: True if the element is a code block or formula that should be processed """ return self.enabled and ( (isinstance(element, CodeItem) and self.options.extract_code) or ( isinstance(element, TextItem) and element.label == DocItemLabel.FORMULA and self.options.extract_formulas ) ) def _get_prompt(self, label: str) -> str: """Construct the prompt for the model based on the element type. Args: label: The type of input, either 'code' or 'formula' Returns: The prompt string Raises: NotImplementedError: If the label is not 'code' or 'formula' """ if label == "code": return "<code>" elif label == "formula": return "<formula>" else: raise NotImplementedError("Label must be either code or formula") def _extract_code_language(self, input_string: str) -> Tuple[str, Optional[str]]: """Extract programming language from the beginning of a string. Checks if the input string starts with a pattern of the form ``<_some_language_>``. If it does, extracts the language string. Args: input_string: The input string, which may start with ``<_language_>`` Returns: Tuple of (remainder, language) where: - remainder is the string after the language tag (or original if no match) - language is the extracted language if found, otherwise None """ pattern = r"^<_([^_>]+)_>\s*(.*)" match = re.match(pattern, input_string, flags=re.DOTALL) if match: language = str(match.group(1)) remainder = str(match.group(2)) return remainder, language else: return input_string, None def _get_code_language_enum(self, value: Optional[str]) -> CodeLanguageLabel: """Convert a string to a CodeLanguageLabel enum member. Args: value: The string representation of the code language or None Returns: The corresponding enum member if valid, otherwise CodeLanguageLabel.UNKNOWN """ if not isinstance(value, str): return CodeLanguageLabel.UNKNOWN try: return CodeLanguageLabel(value) except ValueError: return CodeLanguageLabel.UNKNOWN def _post_process(self, texts: list[str]) -> list[str]: """Post-process model outputs by removing unwanted tokens. Args: texts: List of strings to be post-processed Returns: List of cleaned strings with specified substrings removed """ to_remove = ["</code>", "</formula>", "<loc_0><loc_0><loc_500><loc_500>"] def clean_text(text: str) -> str: # Handle both <end_of_utterance> and <end_of_utterance (without closing >) # The tokenizer may decode it differently depending on skip_special_tokens setting idx = text.find("<end_of_utterance>") if idx == -1: idx = text.find("<end_of_utterance") if idx != -1: text = text[:idx] for token in to_remove: if token in text: text = text.replace(token, "") return text.lstrip() return [clean_text(t) for t in texts] def __call__( self, doc: DoclingDocument, element_batch: Iterable[ItemAndImageEnrichmentElement], ) -> Iterable[NodeItem]: """Process a batch of code/formula elements. Args: doc: The document being processed element_batch: Batch of elements to process Yields: Enriched elements with extracted text """ if not self.enabled: for element in element_batch: yield element.item return if self.engine is None: raise RuntimeError("Engine not initialized") labels: List[str] = [] images: List[Union[Image.Image, np.ndarray]] = [] elements: List[Union[CodeItem, TextItem]] = [] for el in element_batch: assert isinstance(el.item, CodeItem | TextItem) elements.append(el.item) labels.append(el.item.label) images.append(el.image) # Process batch through engine try: # Prepare batch of engine inputs engine_inputs = [ VlmEngineInput( image=image if isinstance(image, Image.Image) else Image.fromarray(image), prompt=self._get_prompt(label), temperature=0.0, max_new_tokens=2048, extra_generation_config={ "skip_special_tokens": False, # Keep special tokens for post-processing }, ) for image, label in zip(images, labels) ] # Run batch inference batch_outputs = self.engine.predict_batch(engine_inputs) outputs = [output.text for output in batch_outputs] except Exception as e: _log.error(f"Error processing code/formula batch: {e}") outputs = [""] * len(images) # Post-process outputs outputs = self._post_process(outputs) # Update elements with extracted text for item, output_text in zip(elements, outputs): if isinstance(item, CodeItem): output_text, code_language = self._extract_code_language(output_text) item.code_language = self._get_code_language_enum(code_language) item.text = output_text yield item def __del__(self): """Cleanup engine resources.""" if self.engine is not None: try: self.engine.cleanup() except Exception as e: _log.warning(f"Error cleaning up engine: {e}")
{ "repo_id": "docling-project/docling", "file_path": "docling/models/stages/code_formula/code_formula_vlm_model.py", "license": "MIT License", "lines": 243, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
docling-project/docling:docling/models/stages/picture_description/picture_description_vlm_engine_model.py
"""Picture description stage using the VLM engine system. This module provides an engine-agnostic picture description stage that can use any VLM engine (Transformers, MLX, API, etc.) through the unified engine interface. """ import logging from collections.abc import Iterable from pathlib import Path from typing import Optional, Type, Union from PIL import Image from docling.datamodel.accelerator_options import AcceleratorOptions from docling.datamodel.pipeline_options import ( PictureDescriptionBaseOptions, PictureDescriptionVlmEngineOptions, ) from docling.datamodel.stage_model_specs import EngineModelConfig from docling.models.inference_engines.vlm import ( BaseVlmEngine, VlmEngineInput, create_vlm_engine, ) from docling.models.picture_description_base_model import PictureDescriptionBaseModel _log = logging.getLogger(__name__) class PictureDescriptionVlmEngineModel(PictureDescriptionBaseModel): """Picture description stage using the VLM engine system. This stage uses the unified VLM engine interface to generate descriptions for pictures in documents. It supports all engine types (Transformers, MLX, API, etc.) through the engine factory. The stage: 1. Filters pictures based on size and classification thresholds 2. Uses the engine to generate descriptions 3. Stores descriptions in PictureItem metadata Example: ```python from docling.datamodel.pipeline_options import PictureDescriptionVlmEngineOptions # Use preset with default engine options = PictureDescriptionVlmEngineOptions.from_preset("smolvlm") # Create stage stage = PictureDescriptionVlmEngineModel( enabled=True, enable_remote_services=False, artifacts_path=None, options=options, accelerator_options=AcceleratorOptions(), ) ``` """ @classmethod def get_options_type(cls) -> Type[PictureDescriptionBaseOptions]: return PictureDescriptionVlmEngineOptions def __init__( self, enabled: bool, enable_remote_services: bool, artifacts_path: Optional[Union[Path, str]], options: PictureDescriptionVlmEngineOptions, accelerator_options: AcceleratorOptions, ): super().__init__( enabled=enabled, enable_remote_services=enable_remote_services, artifacts_path=artifacts_path, options=options, accelerator_options=accelerator_options, ) self.options: PictureDescriptionVlmEngineOptions self.engine: Optional[BaseVlmEngine] = None if self.enabled: # Get engine type from options engine_type = self.options.engine_options.engine_type # Get model configuration for this engine (for logging) self.repo_id = self.options.model_spec.get_repo_id(engine_type) self.revision = self.options.model_spec.get_revision(engine_type) _log.info( f"Initializing PictureDescriptionVlmEngineModel with engine system: " f"model={self.repo_id}, " f"engine={engine_type.value}" ) # Create engine - pass model_spec, let factory handle config generation self.engine = create_vlm_engine( options=self.options.engine_options, model_spec=self.options.model_spec, accelerator_options=accelerator_options, artifacts_path=artifacts_path, enable_remote_services=enable_remote_services, ) # Set provenance from model spec self.provenance = f"{self.repo_id} ({engine_type.value})" def _annotate_images(self, images: Iterable[Image.Image]) -> Iterable[str]: """Generate descriptions for a batch of images. Args: images: Iterable of PIL images to describe Yields: Description text for each image """ if self.engine is None: raise RuntimeError("Engine not initialized") # Get prompt from options prompt = self.options.prompt # Convert to list for batch processing image_list = list(images) if not image_list: return try: # Prepare batch of engine inputs engine_inputs = [ VlmEngineInput( image=image, prompt=prompt, temperature=0.0, max_new_tokens=200, # Use from options if available ) for image in image_list ] # Generate descriptions using batch prediction outputs = self.engine.predict_batch(engine_inputs) # Extract and yield descriptions for output in outputs: description = output.text.strip() _log.debug(f"Generated description: {description[:100]}...") yield description except Exception as e: _log.error(f"Error generating picture descriptions: {e}") # Yield empty strings on error to maintain batch alignment for _ in image_list: yield "" def __del__(self): """Cleanup engine resources.""" if self.engine is not None: try: self.engine.cleanup() except Exception as e: _log.warning(f"Error cleaning up engine: {e}")
{ "repo_id": "docling-project/docling", "file_path": "docling/models/stages/picture_description/picture_description_vlm_engine_model.py", "license": "MIT License", "lines": 132, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
docling-project/docling:docling/models/stages/vlm_convert/vlm_convert_model.py
"""VLM-based document conversion stage using the new runtime system. This stage converts document pages to structured formats (DocTags, Markdown, etc.) using vision-language models through a pluggable runtime system. """ import logging from collections.abc import Iterable from pathlib import Path from typing import Optional, Union from PIL import Image as PILImage from docling.datamodel.accelerator_options import AcceleratorOptions from docling.datamodel.base_models import Page, VlmPrediction, VlmStopReason from docling.datamodel.document import ConversionResult from docling.datamodel.pipeline_options import VlmConvertOptions from docling.models.base_model import BasePageModel from docling.models.inference_engines.vlm import ( BaseVlmEngine, VlmEngineInput, create_vlm_engine, ) from docling.utils.profiling import TimeRecorder _log = logging.getLogger(__name__) class VlmConvertModel(BasePageModel): """Stage for VLM-based document conversion using the new runtime system. This stage: 1. Takes document pages with images 2. Processes them through a VLM runtime (transformers, mlx, api, etc.) 3. Returns pages with VLM predictions attached The actual model inference is delegated to the runtime layer, making this stage runtime-agnostic. """ def __init__( self, enabled: bool, enable_remote_services: bool, artifacts_path: Optional[Union[Path, str]], options: VlmConvertOptions, accelerator_options: AcceleratorOptions, ): """Initialize the VLM convert stage. Args: enabled: Whether this stage is enabled options: Configuration options including model spec and runtime options """ self.enabled = enabled self.options = options if not self.enabled: return # Get engine type from options engine_type = options.engine_options.engine_type # Get model configuration for this engine (for logging) self.repo_id = options.model_spec.get_repo_id(engine_type) self.revision = options.model_spec.get_revision(engine_type) _log.info( f"Initializing VlmConvertModel with engine={engine_type.value}, " f"model={self.repo_id}, revision={self.revision}" ) # Create the engine - pass model_spec, let factory handle config generation self.engine: BaseVlmEngine = create_vlm_engine( options=self.options.engine_options, model_spec=self.options.model_spec, accelerator_options=accelerator_options, artifacts_path=artifacts_path, enable_remote_services=enable_remote_services, ) _log.info("VlmConvertModel initialized successfully") def __call__( self, conv_res: ConversionResult, page_batch: Iterable[Page] ) -> Iterable[Page]: """Process a batch of pages through the VLM engine. Args: conv_res: Conversion result context page_batch: Batch of pages to process Yields: Pages with VLM predictions attached """ if not self.enabled: yield from page_batch return page_list = list(page_batch) if not page_list: return with TimeRecorder(conv_res, "vlm_convert"): # Prepare images and prompts images = [] prompts = [] valid_pages = [] for page in page_list: if page.image is None: _log.warning( f"Page {page.page_no} has no image, skipping VLM conversion" ) continue # Scale image if needed image = page.image if self.options.scale != 1.0: new_size = ( int(image.width * self.options.scale), int(image.height * self.options.scale), ) image = image.resize(new_size, PILImage.Resampling.LANCZOS) # Apply max_size constraint if specified if self.options.max_size is not None: max_dim = max(image.width, image.height) if max_dim > self.options.max_size: scale_factor = self.options.max_size / max_dim new_size = ( int(image.width * scale_factor), int(image.height * scale_factor), ) image = image.resize(new_size, PILImage.Resampling.LANCZOS) images.append(image) prompts.append(self.options.model_spec.prompt) valid_pages.append(page) if not images: _log.warning("No valid images to process") return # Process through runtime using batch prediction _log.debug(f"Processing {len(images)} pages through VLM engine (batched)") try: # Create batch of runtime inputs engine_inputs = [ VlmEngineInput( image=img, prompt=prompt, temperature=0.0, # Use from options if needed max_new_tokens=4096, # Use from options if needed ) for img, prompt in zip(images, prompts) ] # Run batch inference outputs = self.engine.predict_batch(engine_inputs) # Attach predictions to pages for page, output in zip(valid_pages, outputs): # Convert string stop_reason to VlmStopReason enum stop_reason = VlmStopReason.UNSPECIFIED if output.stop_reason: try: stop_reason = VlmStopReason(output.stop_reason) except ValueError: stop_reason = VlmStopReason.UNSPECIFIED page.predictions.vlm_response = VlmPrediction( text=output.text, stop_reason=stop_reason, ) _log.debug( f"Page {page.page_no}: Generated {len(output.text)} chars, " f"stop_reason={output.stop_reason}" ) except Exception as e: _log.error(f"Error processing pages through VLM engine: {e}") raise # Yield all pages (including those that were skipped) yield from page_list def process_images( self, image_batch: Iterable[PILImage.Image], prompt: str | list[str], ) -> Iterable[VlmPrediction]: """Process raw images without page metadata. This method provides a simpler interface for processing images directly, useful for testing or when page metadata is not available. Args: image_batch: Iterable of PIL Images prompt: Either a single prompt string or list of prompts (one per image) Yields: VLM predictions for each image Raises: ValueError: If prompt list length doesn't match image count """ if not self.enabled: return images = list(image_batch) if not images: return # Handle prompt if isinstance(prompt, str): prompts = [prompt] * len(images) else: if len(prompt) != len(images): raise ValueError( f"Prompt list length ({len(prompt)}) must match " f"image count ({len(images)})" ) prompts = prompt # Process batch of images engine_inputs = [ VlmEngineInput( image=img, prompt=p, temperature=0.0, max_new_tokens=4096, ) for img, p in zip(images, prompts) ] # Run batch inference outputs = self.engine.predict_batch(engine_inputs) # Convert outputs to VlmPredictions for output in outputs: # Convert string stop_reason to VlmStopReason enum stop_reason = VlmStopReason.UNSPECIFIED if output.stop_reason: try: stop_reason = VlmStopReason(output.stop_reason) except ValueError: stop_reason = VlmStopReason.UNSPECIFIED # Convert to VlmPrediction yield VlmPrediction( text=output.text, stop_reason=stop_reason, ) def __del__(self): """Cleanup engine resources.""" if hasattr(self, "engine"): try: self.engine.cleanup() except Exception as e: _log.warning(f"Error cleaning up engine: {e}")
{ "repo_id": "docling-project/docling", "file_path": "docling/models/stages/vlm_convert/vlm_convert_model.py", "license": "MIT License", "lines": 217, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
docling-project/docling:docs/examples/code_formula_granite_docling.py
"""Example: Comparing CodeFormula models for code and formula extraction. This example demonstrates how to use both the CodeFormulaV2 model and the Granite Docling model for extracting code blocks and mathematical formulas from PDF documents, allowing you to compare their outputs. """ from pathlib import Path from docling_core.types.doc import CodeItem, FormulaItem from docling.datamodel.base_models import InputFormat from docling.datamodel.pipeline_options import ( CodeFormulaVlmOptions, PdfPipelineOptions, ) from docling.document_converter import DocumentConverter, PdfFormatOption def extract_with_preset(preset_name: str, input_doc: Path): """Extract code and formulas using a specific preset. Args: preset_name: Name of the preset to use ('codeformulav2' or 'granite_docling') input_doc: Path to the input PDF document Returns: The converted document """ print(f"\n{'=' * 60}") print(f"Processing with preset: {preset_name}") print(f"{'=' * 60}\n") # Create options with the specified preset code_formula_options = CodeFormulaVlmOptions.from_preset(preset_name) # Display preset information print(f"Model: {code_formula_options.model_spec.name}") print(f"Repo ID: {code_formula_options.model_spec.default_repo_id}") print(f"Scale: {code_formula_options.scale}") print(f"Max tokens: {code_formula_options.model_spec.max_new_tokens}") print() # Configure the PDF pipeline to use code/formula enrichment pipeline_options = PdfPipelineOptions( do_code_enrichment=True, do_formula_enrichment=True, code_formula_options=code_formula_options, ) # Create converter with the configured options converter = DocumentConverter( format_options={ InputFormat.PDF: PdfFormatOption(pipeline_options=pipeline_options) } ) # Convert the document result = converter.convert(input_doc) doc = result.document # Print extracted code blocks code_blocks = [ item for item, _ in doc.iterate_items() if isinstance(item, CodeItem) ] print(f"Code blocks found: {len(code_blocks)}") for i, item in enumerate(code_blocks, 1): print(f"\n Code block {i}:") print(f" Language: {item.code_language}") print(f" Text: {item.text[:100]}{'...' if len(item.text) > 100 else ''}") # Print extracted formulas formulas = [ item for item, _ in doc.iterate_items() if isinstance(item, FormulaItem) ] print(f"\nFormulas found: {len(formulas)}") for i, item in enumerate(formulas, 1): print(f"\n Formula {i}:") print(f" Text: {item.text[:100]}{'...' if len(item.text) > 100 else ''}") return doc def main(): """Main function to compare both presets.""" input_doc = Path("tests/data/pdf/code_and_formula.pdf") if not input_doc.exists(): print(f"Error: Input file not found: {input_doc}") print("Please provide a valid PDF file with code and formulas.") return print("Comparing CodeFormula presets for code and formula extraction") print(f"Input document: {input_doc}") # Extract with CodeFormulaV2 model extract_with_preset("codeformulav2", input_doc) # Extract with Granite Docling model extract_with_preset("granite_docling", input_doc) print(f"\n{'=' * 60}") print("Comparison complete!") print(f"{'=' * 60}") print("\nBoth presets have been tested. You can compare the outputs above.") print("\nKey differences:") print("- CodeFormulaV2: Uses specialized CodeFormulaV2 model") print( "- Granite Docling: Uses IBM Granite-Docling-258M with extended context (8192 tokens)" ) if __name__ == "__main__": main()
{ "repo_id": "docling-project/docling", "file_path": "docs/examples/code_formula_granite_docling.py", "license": "MIT License", "lines": 89, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
docling-project/docling:docs/examples/legacy/minimal_vlm_pipeline_legacy.py
# %% [markdown] # Minimal VLM pipeline example (LEGACY VERSION - for backward compatibility testing) # # **NOTE:** This is the legacy version using `vlm_model_specs` directly. # For the new preset-based approach, see `minimal_vlm_pipeline.py`. # This file is kept to validate backward compatibility with the old API. # # What this example does # - Runs the VLM-powered pipeline on a PDF (by URL) and prints Markdown output. # - Shows two setups: default (Transformers/GraniteDocling) and macOS MPS/MLX. # - Uses the LEGACY vlm_model_specs approach (still supported for backward compatibility) # # Prerequisites # - Install Docling with VLM extras and the appropriate backend (Transformers or MLX). # - Ensure your environment can download model weights (e.g., from Hugging Face). # # How to run # - From the repository root, run: `python docs/examples/minimal_vlm_pipeline_legacy.py`. # - The script prints the converted Markdown to stdout. # # Notes # - `source` may be a local path or a URL to a PDF. # - The second section demonstrates macOS MPS acceleration via MLX (`vlm_model_specs.GRANITEDOCLING_MLX`). # - For the NEW preset-based approach, see `docs/examples/minimal_vlm_pipeline.py`. # %% from docling.datamodel import vlm_model_specs from docling.datamodel.base_models import InputFormat from docling.datamodel.pipeline_options import ( VlmPipelineOptions, ) from docling.document_converter import DocumentConverter, PdfFormatOption from docling.pipeline.vlm_pipeline import VlmPipeline # Convert a public arXiv PDF; replace with a local path if preferred. source = "https://arxiv.org/pdf/2501.17887" ###### USING SIMPLE DEFAULT VALUES # - GraniteDocling model # - Using the transformers framework converter = DocumentConverter( format_options={ InputFormat.PDF: PdfFormatOption( pipeline_cls=VlmPipeline, ), } ) doc = converter.convert(source=source).document print(doc.export_to_markdown()) ###### USING MACOS MPS ACCELERATOR # Demonstrates using MLX on macOS with MPS acceleration (macOS only). # For more options see the `compare_vlm_models.py` example. pipeline_options = VlmPipelineOptions( vlm_options=vlm_model_specs.GRANITEDOCLING_MLX, ) converter = DocumentConverter( format_options={ InputFormat.PDF: PdfFormatOption( pipeline_cls=VlmPipeline, pipeline_options=pipeline_options, ), } ) doc = converter.convert(source=source).document print(doc.export_to_markdown())
{ "repo_id": "docling-project/docling", "file_path": "docs/examples/legacy/minimal_vlm_pipeline_legacy.py", "license": "MIT License", "lines": 62, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
docling-project/docling:docs/examples/legacy/picture_description_inline_legacy.py
# %% [markdown] # Picture Description with Legacy VLM Options # # This example demonstrates the LEGACY approach using PictureDescriptionVlmOptions # with direct repo_id specification (no preset system). # # For the NEW approach with preset support, see: picture_description_inline.py # # What this example does: # - Uses the legacy PictureDescriptionVlmOptions with direct repo_id # - Shows backward compatibility with the old implementation # - Demonstrates the PictureDescriptionVlmModel (not the runtime-based version) # # Prerequisites: # - Install Docling with VLM extras: `pip install docling[vlm]` # # How to run: # - From the repository root: `python docs/examples/legacy/picture_description_inline_legacy.py` # %% from pathlib import Path from docling_core.types.doc import PictureItem from docling.datamodel.base_models import InputFormat from docling.datamodel.pipeline_options import ( PdfPipelineOptions, PictureDescriptionVlmOptions, ) from docling.document_converter import DocumentConverter, PdfFormatOption # %% # Example 1: Legacy approach with direct repo_id specification IMAGE_RESOLUTION_SCALE = 2.0 input_doc_path = Path("./tests/data/pdf/2206.01062.pdf") # Configure pipeline with legacy VLM options pipeline_options = PdfPipelineOptions() pipeline_options.do_ocr = False pipeline_options.do_table_structure = True # Legacy: Direct repo_id specification (no preset system) pipeline_options.do_picture_description = True pipeline_options.picture_description_options = PictureDescriptionVlmOptions( repo_id="HuggingFaceTB/SmolVLM-256M-Instruct", prompt="Describe this image in a few sentences.", scale=IMAGE_RESOLUTION_SCALE, ) doc_converter = DocumentConverter( format_options={ InputFormat.PDF: PdfFormatOption(pipeline_options=pipeline_options), } ) result = doc_converter.convert(input_doc_path) # Print picture descriptions print("\n" + "=" * 80) print("PICTURE DESCRIPTIONS (Legacy Approach)") print("=" * 80) for item, _ in result.document.iterate_items(): if isinstance(item, PictureItem): print( f"Picture {item.self_ref}\n" f"Caption: {item.caption_text(doc=result.document)}\n" f"Meta: {item.meta}" ) # %% # Example 2: Legacy approach with custom prompt pipeline_options = PdfPipelineOptions() pipeline_options.do_ocr = False pipeline_options.do_table_structure = True # Legacy: Custom prompt with direct repo_id pipeline_options.do_picture_description = True pipeline_options.picture_description_options = PictureDescriptionVlmOptions( repo_id="HuggingFaceTB/SmolVLM-256M-Instruct", prompt="What is shown in this image? Provide a detailed technical description.", scale=IMAGE_RESOLUTION_SCALE, generation_config={ "max_new_tokens": 300, "do_sample": False, }, ) doc_converter = DocumentConverter( format_options={ InputFormat.PDF: PdfFormatOption(pipeline_options=pipeline_options), } ) result = doc_converter.convert(input_doc_path) print("\n" + "=" * 80) print("PICTURE DESCRIPTIONS (Legacy with Custom Prompt)") print("=" * 80) for item, _level in result.document.iterate_items(): if isinstance(item, PictureItem): print( f"Picture {item.self_ref}\n" f"Caption: {item.caption_text(doc=result.document)}\n" f"Meta: {item.meta}" ) print("\n" + "=" * 80) print("NOTE: This is the LEGACY approach.") print("For the NEW preset-based approach, see: picture_description_inline.py") print("=" * 80)
{ "repo_id": "docling-project/docling", "file_path": "docs/examples/legacy/picture_description_inline_legacy.py", "license": "MIT License", "lines": 95, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
docling-project/docling:docs/examples/legacy/pictures_description_api_legacy.py
# %% [markdown] # Describe pictures using a remote VLM API (vLLM, LM Studio, or watsonx.ai). # # What this example does # - Configures `PictureDescriptionApiOptions` for local or cloud providers. # - Converts a PDF, then prints each picture's caption and annotations. # # Prerequisites # - Install Docling and `python-dotenv` if loading env vars from a `.env` file. # - For local providers: ensure vLLM or LM Studio is running. # - For watsonx.ai: set `WX_API_KEY` and `WX_PROJECT_ID` in the environment. # # How to run # - From the repo root: `python docs/examples/pictures_description_api.py`. # - Uncomment exactly one provider config and set `enable_remote_services=True` (already set). # # Notes # - vLLM default endpoint: `http://localhost:8000/v1/chat/completions`. # - LM Studio default endpoint: `http://localhost:1234/v1/chat/completions`. # - Calling remote APIs sends page images/text to the provider; review privacy and # costs. For local testing, LM Studio runs everything on your machine. # %% import logging import os from pathlib import Path import requests from docling_core.types.doc import PictureItem from dotenv import load_dotenv from docling.datamodel.base_models import InputFormat from docling.datamodel.pipeline_options import ( PdfPipelineOptions, PictureDescriptionApiOptions, ) from docling.document_converter import DocumentConverter, PdfFormatOption ### Example of PictureDescriptionApiOptions definitions #### Using vLLM # Models can be launched via: # $ vllm serve MODEL_NAME def vllm_local_options(model: str): options = PictureDescriptionApiOptions( url="http://localhost:8000/v1/chat/completions", params=dict( model=model, seed=42, max_completion_tokens=200, ), prompt="Describe the image in three sentences. Be consise and accurate.", timeout=90, ) return options #### Using LM Studio def lms_local_options(model: str): options = PictureDescriptionApiOptions( url="http://localhost:1234/v1/chat/completions", params=dict( model=model, seed=42, max_completion_tokens=200, ), prompt="Describe the image in three sentences. Be consise and accurate.", timeout=90, ) return options #### Using a cloud service like IBM watsonx.ai def watsonx_vlm_options(): load_dotenv() api_key = os.environ.get("WX_API_KEY") project_id = os.environ.get("WX_PROJECT_ID") def _get_iam_access_token(api_key: str) -> str: res = requests.post( url="https://iam.cloud.ibm.com/identity/token", headers={ "Content-Type": "application/x-www-form-urlencoded", }, data=f"grant_type=urn:ibm:params:oauth:grant-type:apikey&apikey={api_key}", ) res.raise_for_status() api_out = res.json() print(f"{api_out=}") return api_out["access_token"] # Background information in case the model_id is updated: # [1] Official list of models: https://dataplatform.cloud.ibm.com/docs/content/wsj/analyze-data/fm-models.html?context=wx # [2] Info on granite vision 3.3: https://dataplatform.cloud.ibm.com/docs/content/wsj/analyze-data/fm-models-ibm.html?context=wx#granite-vision-3-3-2b options = PictureDescriptionApiOptions( url="https://us-south.ml.cloud.ibm.com/ml/v1/text/chat?version=2023-05-29", params=dict( model_id="ibm/granite-vision-3-3-2b", project_id=project_id, parameters=dict( max_new_tokens=400, ), ), headers={ "Authorization": "Bearer " + _get_iam_access_token(api_key=api_key), }, prompt="Describe the image in three sentences. Be consise and accurate.", timeout=60, ) return options ### Usage and conversion def main(): logging.basicConfig(level=logging.INFO) data_folder = Path(__file__).parent / "../../../tests/data" input_doc_path = data_folder / "pdf/2206.01062.pdf" pipeline_options = PdfPipelineOptions( enable_remote_services=True # <-- this is required! ) pipeline_options.do_picture_description = True # The PictureDescriptionApiOptions() allows to interface with APIs supporting # the multi-modal chat interface. Here follow a few example on how to configure those. # # One possibility is self-hosting model, e.g. via VLLM. # $ vllm serve MODEL_NAME # Then PictureDescriptionApiOptions can point to the localhost endpoint. # Example for the Granite Vision model: # (uncomment the following lines) # pipeline_options.picture_description_options = vllm_local_options( # model="ibm-granite/granite-vision-3.3-2b" # ) # Example for the SmolVLM model: # (uncomment the following lines) # pipeline_options.picture_description_options = vllm_local_options( # model="HuggingFaceTB/SmolVLM-256M-Instruct" # ) # For using models on LM Studio using the built-in GGUF or MLX runtimes, e.g. the SmolVLM model: # (uncomment the following lines) pipeline_options.picture_description_options = lms_local_options( model="smolvlm-256m-instruct" ) # Another possibility is using online services, e.g. watsonx.ai. # Using requires setting the env variables WX_API_KEY and WX_PROJECT_ID. # (uncomment the following lines) # pipeline_options.picture_description_options = watsonx_vlm_options() doc_converter = DocumentConverter( format_options={ InputFormat.PDF: PdfFormatOption( pipeline_options=pipeline_options, ) } ) result = doc_converter.convert(input_doc_path) for element, _level in result.document.iterate_items(): if isinstance(element, PictureItem): print( f"Picture {element.self_ref}\n" f"Caption: {element.caption_text(doc=result.document)}\n" f"Meta: {element.meta}" ) if __name__ == "__main__": main()
{ "repo_id": "docling-project/docling", "file_path": "docs/examples/legacy/pictures_description_api_legacy.py", "license": "MIT License", "lines": 150, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
docling-project/docling:docs/examples/legacy/vlm_pipeline_api_model_legacy.py
# %% [markdown] # Use the VLM pipeline with remote API models (LM Studio, Ollama, watsonx.ai). # # What this example does # - Shows how to configure `ApiVlmOptions` for different VLM providers. # - Converts a single PDF page using the VLM pipeline and prints Markdown. # # Prerequisites # - Install Docling with VLM extras and `python-dotenv` if using environment files. # - For local APIs: run LM Studio (HTTP server) or Ollama locally. # - For cloud APIs: set required environment variables (see below). # - Requires `requests` for HTTP calls and `python-dotenv` if loading env vars from `.env`. # # How to run # - From the repo root: `python docs/examples/vlm_pipeline_api_model.py`. # - The script prints the converted Markdown to stdout. # # Choosing a provider # - Uncomment exactly one `pipeline_options.vlm_options = ...` block below. # - Keep `enable_remote_services=True` to permit calling remote APIs. # # Notes # - LM Studio default endpoint: `http://localhost:1234/v1/chat/completions`. # - Ollama default endpoint: `http://localhost:11434/v1/chat/completions`. # - watsonx.ai requires `WX_API_KEY` and `WX_PROJECT_ID` in env/`.env`. # %% import json import logging import os from pathlib import Path from typing import Optional import requests from docling_core.types.doc.page import SegmentedPage from dotenv import load_dotenv from docling.datamodel.base_models import InputFormat from docling.datamodel.pipeline_options import ( VlmPipelineOptions, ) from docling.datamodel.pipeline_options_vlm_model import ApiVlmOptions, ResponseFormat from docling.document_converter import DocumentConverter, PdfFormatOption from docling.pipeline.vlm_pipeline import VlmPipeline ### Example of ApiVlmOptions definitions #### Using LM Studio or VLLM (OpenAI-compatible APIs) def openai_compatible_vlm_options( model: str, prompt: str, format: ResponseFormat, hostname_and_port, temperature: float = 0.7, max_tokens: int = 4096, api_key: str = "", skip_special_tokens=False, ): headers = {} if api_key: headers["Authorization"] = f"Bearer {api_key}" options = ApiVlmOptions( url=f"http://{hostname_and_port}/v1/chat/completions", # LM studio defaults to port 1234, VLLM to 8000 params=dict( model=model, max_tokens=max_tokens, skip_special_tokens=skip_special_tokens, # needed for VLLM ), headers=headers, prompt=prompt, timeout=90, scale=2.0, temperature=temperature, response_format=format, ) return options #### Using LM Studio with OlmOcr model def lms_olmocr_vlm_options(model: str): class OlmocrVlmOptions(ApiVlmOptions): def build_prompt(self, page: Optional[SegmentedPage]) -> str: if page is None: return self.prompt.replace("#RAW_TEXT#", "") anchor = [ f"Page dimensions: {int(page.dimension.width)}x{int(page.dimension.height)}" ] for text_cell in page.textline_cells: if not text_cell.text.strip(): continue bbox = text_cell.rect.to_bounding_box().to_bottom_left_origin( page.dimension.height ) anchor.append(f"[{int(bbox.l)}x{int(bbox.b)}] {text_cell.text}") for image_cell in page.bitmap_resources: bbox = image_cell.rect.to_bounding_box().to_bottom_left_origin( page.dimension.height ) anchor.append( f"[Image {int(bbox.l)}x{int(bbox.b)} to {int(bbox.r)}x{int(bbox.t)}]" ) if len(anchor) == 1: anchor.append( f"[Image 0x0 to {int(page.dimension.width)}x{int(page.dimension.height)}]" ) # Original prompt uses cells sorting. We are skipping it for simplicity. raw_text = "\n".join(anchor) return self.prompt.replace("#RAW_TEXT#", raw_text) def decode_response(self, text: str) -> str: # OlmOcr trained to generate json response with language, rotation and other info try: generated_json = json.loads(text) except json.decoder.JSONDecodeError: return "" return generated_json["natural_text"] options = OlmocrVlmOptions( url="http://localhost:1234/v1/chat/completions", params=dict( model=model, ), prompt=( "Below is the image of one page of a document, as well as some raw textual" " content that was previously extracted for it. Just return the plain text" " representation of this document as if you were reading it naturally.\n" "Do not hallucinate.\n" "RAW_TEXT_START\n#RAW_TEXT#\nRAW_TEXT_END" ), timeout=90, scale=1.0, max_size=1024, # from OlmOcr pipeline response_format=ResponseFormat.MARKDOWN, ) return options #### Using Ollama def ollama_vlm_options(model: str, prompt: str): options = ApiVlmOptions( url="http://localhost:11434/v1/chat/completions", # the default Ollama endpoint params=dict( model=model, ), prompt=prompt, timeout=90, scale=1.0, response_format=ResponseFormat.MARKDOWN, ) return options #### Using a cloud service like IBM watsonx.ai def watsonx_vlm_options(model: str, prompt: str): load_dotenv() api_key = os.environ.get("WX_API_KEY") project_id = os.environ.get("WX_PROJECT_ID") def _get_iam_access_token(api_key: str) -> str: res = requests.post( url="https://iam.cloud.ibm.com/identity/token", headers={ "Content-Type": "application/x-www-form-urlencoded", }, data=f"grant_type=urn:ibm:params:oauth:grant-type:apikey&apikey={api_key}", ) res.raise_for_status() api_out = res.json() print(f"{api_out=}") return api_out["access_token"] options = ApiVlmOptions( url="https://us-south.ml.cloud.ibm.com/ml/v1/text/chat?version=2023-05-29", params=dict( model_id=model, project_id=project_id, parameters=dict( max_new_tokens=400, ), ), headers={ "Authorization": "Bearer " + _get_iam_access_token(api_key=api_key), }, prompt=prompt, timeout=60, response_format=ResponseFormat.MARKDOWN, ) return options ### Usage and conversion def main(): logging.basicConfig(level=logging.INFO) data_folder = Path(__file__).parent / "../../../tests/data" input_doc_path = data_folder / "pdf/2305.03393v1-pg9.pdf" # Configure the VLM pipeline. Enabling remote services allows HTTP calls to # locally hosted APIs (LM Studio, Ollama) or cloud services. pipeline_options = VlmPipelineOptions( enable_remote_services=True # required when calling remote VLM endpoints ) # The ApiVlmOptions() allows to interface with APIs supporting # the multi-modal chat interface. Here follow a few example on how to configure those. # One possibility is self-hosting the model, e.g., via LM Studio, Ollama or VLLM. # # e.g. with VLLM, serve granite-docling with these commands: # > vllm serve ibm-granite/granite-docling-258M --revision untied # # with LM Studio, serve granite-docling with these commands: # > lms server start # > lms load ibm-granite/granite-docling-258M-mlx # Example using the Granite-Docling model with LM Studio or VLLM: pipeline_options.vlm_options = openai_compatible_vlm_options( model="granite-docling-258m-mlx", # For VLLM use "ibm-granite/granite-docling-258M" hostname_and_port="localhost:1234", # LM studio defaults to port 1234, VLLM to 8000 prompt="Convert this page to docling.", format=ResponseFormat.DOCTAGS, api_key="", ) # Example using the OlmOcr (dynamic prompt) model with LM Studio: # (uncomment the following lines) # pipeline_options.vlm_options = lms_olmocr_vlm_options( # model="hf.co/lmstudio-community/olmOCR-7B-0225-preview-GGUF", # ) # Example using the Granite Vision model with Ollama: # (uncomment the following lines) # pipeline_options.vlm_options = ollama_vlm_options( # model="granite3.2-vision:2b", # prompt="OCR the full page to markdown.", # ) # Another possibility is using online services, e.g., watsonx.ai. # Using watsonx.ai requires setting env variables WX_API_KEY and WX_PROJECT_ID # (see the top-level docstring for details). You can use a .env file as well. # (uncomment the following lines) # pipeline_options.vlm_options = watsonx_vlm_options( # model="ibm/granite-vision-3-2-2b", prompt="OCR the full page to markdown." # ) # Create the DocumentConverter and launch the conversion. doc_converter = DocumentConverter( format_options={ InputFormat.PDF: PdfFormatOption( pipeline_options=pipeline_options, pipeline_cls=VlmPipeline, ) } ) result = doc_converter.convert(input_doc_path) print(result.document.export_to_markdown()) if __name__ == "__main__": main() # %%
{ "repo_id": "docling-project/docling", "file_path": "docs/examples/legacy/vlm_pipeline_api_model_legacy.py", "license": "MIT License", "lines": 233, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
docling-project/docling:docs/examples/picture_description_inline.py
# %% [markdown] # Picture Description with Inline VLM Models # # What this example does # - Demonstrates picture description in standard PDF pipeline # - Shows default preset, changing presets, and manual configuration without presets # - Enriches documents with AI-generated image captions # # Prerequisites # - Install Docling with VLM extras: `pip install docling[vlm]` # - Ensure your environment can download model weights # # How to run # - From the repository root: `python docs/examples/picture_description_inline.py` # # Notes # - This uses the standard PDF pipeline (not VlmPipeline) # - For API-based picture description, see `pictures_description_api.py` # - For legacy PictureDescriptionVlmOptions approach, see `picture_description_inline_legacy.py` # %% import logging import os from pathlib import Path from docling_core.types.doc import PictureItem from docling.datamodel.base_models import InputFormat from docling.datamodel.pipeline_options import ( PdfPipelineOptions, PictureDescriptionVlmEngineOptions, PictureDescriptionVlmOptions, ) from docling.datamodel.pipeline_options_vlm_model import ResponseFormat from docling.datamodel.stage_model_specs import VlmModelSpec from docling.datamodel.vlm_engine_options import AutoInlineVlmEngineOptions from docling.document_converter import DocumentConverter, PdfFormatOption logging.basicConfig(level=logging.INFO) # Test document with images input_doc_path = Path("tests/data/pdf/2206.01062.pdf") # Check if running in CI IS_CI = os.environ.get("CI", "").lower() in ("true", "1", "yes") ###### EXAMPLE 1: Using default VLM for picture description (SmolVLM) print("=" * 60) print("Example 1: Default picture description (SmolVLM preset)") print("=" * 60) pipeline_options = PdfPipelineOptions() pipeline_options.do_picture_description = True # When no picture_description_options is set, it uses the default (SmolVLM) converter = DocumentConverter( format_options={ InputFormat.PDF: PdfFormatOption( pipeline_options=pipeline_options, ) } ) result = converter.convert(input_doc_path) # Print picture descriptions for element, _level in result.document.iterate_items(): if isinstance(element, PictureItem): print( f"Picture {element.self_ref}\n" f"Caption: {element.caption_text(doc=result.document)}\n" f"Meta: {element.meta}" ) ###### EXAMPLE 2: Change to Granite Vision preset (skipped in CI) if not IS_CI: print("\n" + "=" * 60) print("Example 2: Using Granite Vision preset") print("=" * 60) pipeline_options = PdfPipelineOptions() pipeline_options.do_picture_description = True pipeline_options.picture_description_options = ( PictureDescriptionVlmEngineOptions.from_preset("granite_vision") ) converter = DocumentConverter( format_options={ InputFormat.PDF: PdfFormatOption( pipeline_options=pipeline_options, ) } ) result = converter.convert(input_doc_path) for element, _level in result.document.iterate_items(): if isinstance(element, PictureItem): print( f"Picture {element.self_ref}\n" f"Caption: {element.caption_text(doc=result.document)}\n" f"Meta: {element.meta}" ) else: print("\n" + "=" * 60) print("Example 2: Skipped (running in CI environment)") print("=" * 60) ###### EXAMPLE 3: Without presets - manually configuring model and runtime print("\n" + "=" * 60) print("Example 3: Manual configuration without presets") print("=" * 60) # You can manually configure the model spec and runtime options without using presets pipeline_options = PdfPipelineOptions() pipeline_options.do_picture_description = True pipeline_options.picture_description_options = PictureDescriptionVlmEngineOptions( model_spec=VlmModelSpec( name="SmolVLM-256M-Custom", default_repo_id="HuggingFaceTB/SmolVLM-256M-Instruct", prompt="Provide a detailed technical description of this image, focusing on any diagrams, charts, or technical content.", response_format=ResponseFormat.PLAINTEXT, ), engine_options=AutoInlineVlmEngineOptions(), prompt="Provide a detailed technical description of this image, focusing on any diagrams, charts, or technical content.", ) converter = DocumentConverter( format_options={ InputFormat.PDF: PdfFormatOption( pipeline_options=pipeline_options, ) } ) result = converter.convert(input_doc_path) for element, _level in result.document.iterate_items(): if isinstance(element, PictureItem): print( f"Picture {element.self_ref}\n" f"Caption: {element.caption_text(doc=result.document)}\n" f"Meta: {element.meta}" ) # %% [markdown] # ## Summary # # This example shows three approaches: # 1. **Default**: No configuration needed, uses SmolVLM preset automatically # 2. **Preset-based**: Use `from_preset()` to select a different model (e.g., granite_vision) # 3. **Manual configuration**: Manually create VlmModelSpec and runtime options without presets # # Available presets: smolvlm, granite_vision, pixtral, qwen # # For API-based picture description (vLLM, LM Studio, watsonx.ai), see `pictures_description_api.py` # For the legacy approach using PictureDescriptionVlmOptions, see `picture_description_inline_legacy.py`
{ "repo_id": "docling-project/docling", "file_path": "docs/examples/picture_description_inline.py", "license": "MIT License", "lines": 135, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
docling-project/docling:tests/test_vlm_presets_and_runtime_options.py
"""Tests for VLM preset system and runtime options management. This test suite validates: 1. Preset registration and retrieval 2. Runtime options creation and validation 3. Preset-based options creation with runtime overrides 4. Model spec runtime-specific configurations 5. All three stage types (VlmConvert, PictureDescription, CodeFormula) """ import pytest from pydantic import ValidationError from docling.datamodel.pipeline_options import ( CodeFormulaVlmOptions, PictureDescriptionVlmEngineOptions, VlmConvertOptions, ) from docling.datamodel.pipeline_options_vlm_model import ResponseFormat from docling.datamodel.stage_model_specs import ( ApiModelConfig, EngineModelConfig, StageModelPreset, VlmModelSpec, ) from docling.datamodel.vlm_engine_options import ( ApiVlmEngineOptions, AutoInlineVlmEngineOptions, MlxVlmEngineOptions, TransformersVlmEngineOptions, VllmVlmEngineOptions, ) from docling.models.inference_engines.vlm import VlmEngineType # ============================================================================= # RUNTIME OPTIONS TESTS # ============================================================================= class TestRuntimeOptions: """Test runtime options creation and validation.""" def test_auto_inline_engine_options(self): """Test AutoInlineVlmEngineOptions creation.""" options = AutoInlineVlmEngineOptions() assert options.engine_type == VlmEngineType.AUTO_INLINE assert options.prefer_vllm is False options_with_vllm = AutoInlineVlmEngineOptions(prefer_vllm=True) assert options_with_vllm.prefer_vllm is True def test_transformers_engine_options(self): """Test TransformersVlmEngineOptions creation and defaults.""" options = TransformersVlmEngineOptions() assert options.engine_type == VlmEngineType.TRANSFORMERS assert options.load_in_8bit is True assert options.llm_int8_threshold == 6.0 assert options.quantized is False assert options.trust_remote_code is False assert options.use_kv_cache is True # Test custom values custom_options = TransformersVlmEngineOptions( load_in_8bit=False, trust_remote_code=True, torch_dtype="float16", compile_model=True, ) assert custom_options.load_in_8bit is False assert custom_options.trust_remote_code is True assert custom_options.torch_dtype == "float16" assert custom_options.compile_model is True def test_mlx_engine_options(self): """Test MlxVlmEngineOptions creation.""" options = MlxVlmEngineOptions() assert options.engine_type == VlmEngineType.MLX assert options.trust_remote_code is False options_with_trust = MlxVlmEngineOptions(trust_remote_code=True) assert options_with_trust.trust_remote_code is True def test_api_engine_options(self): """Test ApiVlmEngineOptions for different API types.""" # Test Ollama ollama_options = ApiVlmEngineOptions(engine_type=VlmEngineType.API_OLLAMA) assert ollama_options.engine_type == VlmEngineType.API_OLLAMA assert ollama_options.timeout == 60.0 # Default timeout assert ollama_options.concurrency == 1 # Test OpenAI openai_options = ApiVlmEngineOptions( engine_type=VlmEngineType.API_OPENAI, timeout=60.0, concurrency=5, ) assert openai_options.engine_type == VlmEngineType.API_OPENAI assert openai_options.timeout == 60.0 assert openai_options.concurrency == 5 # Test LM Studio lmstudio_options = ApiVlmEngineOptions(engine_type=VlmEngineType.API_LMSTUDIO) assert lmstudio_options.engine_type == VlmEngineType.API_LMSTUDIO # Test Generic API generic_options = ApiVlmEngineOptions(engine_type=VlmEngineType.API) assert generic_options.engine_type == VlmEngineType.API def test_vllm_engine_options(self): """Test VllmVlmEngineOptions creation.""" options = VllmVlmEngineOptions() assert options.engine_type == VlmEngineType.VLLM # ============================================================================= # MODEL SPEC TESTS # ============================================================================= class TestVlmModelSpec: """Test VlmModelSpec functionality.""" def test_basic_model_spec(self): """Test basic model spec creation.""" spec = VlmModelSpec( name="Test Model", default_repo_id="test/model", prompt="Test prompt", response_format=ResponseFormat.DOCTAGS, ) assert spec.name == "Test Model" assert spec.default_repo_id == "test/model" assert spec.revision == "main" assert spec.prompt == "Test prompt" assert spec.response_format == ResponseFormat.DOCTAGS def test_model_spec_with_engine_overrides(self): """Test model spec with engine-specific overrides.""" spec = VlmModelSpec( name="Test Model", default_repo_id="test/model", prompt="Test prompt", response_format=ResponseFormat.DOCTAGS, engine_overrides={ VlmEngineType.MLX: EngineModelConfig( repo_id="test/model-mlx", revision="v1.0" ), VlmEngineType.TRANSFORMERS: EngineModelConfig(revision="v2.0"), }, ) # Test default repo_id assert spec.get_repo_id(VlmEngineType.AUTO_INLINE) == "test/model" # Test MLX override assert spec.get_repo_id(VlmEngineType.MLX) == "test/model-mlx" assert spec.get_revision(VlmEngineType.MLX) == "v1.0" # Test Transformers override (only revision) assert spec.get_repo_id(VlmEngineType.TRANSFORMERS) == "test/model" assert spec.get_revision(VlmEngineType.TRANSFORMERS) == "v2.0" def test_model_spec_with_api_overrides(self): """Test model spec with API-specific overrides.""" spec = VlmModelSpec( name="Test Model", default_repo_id="test/model", prompt="Test prompt", response_format=ResponseFormat.MARKDOWN, api_overrides={ VlmEngineType.API_OLLAMA: ApiModelConfig( params={"model": "test-model:latest", "max_tokens": 4096} ), }, ) # Test default API params default_params = spec.get_api_params(VlmEngineType.API_OPENAI) assert default_params == {"model": "test/model"} # Test Ollama override ollama_params = spec.get_api_params(VlmEngineType.API_OLLAMA) assert ollama_params["model"] == "test-model:latest" assert ollama_params["max_tokens"] == 4096 def test_model_spec_supported_engines(self): """Test model spec with supported engines restriction.""" spec = VlmModelSpec( name="API-Only Model", default_repo_id="test/model", prompt="Test prompt", response_format=ResponseFormat.MARKDOWN, supported_engines={VlmEngineType.API_OLLAMA, VlmEngineType.API_OPENAI}, ) assert spec.is_engine_supported(VlmEngineType.API_OLLAMA) is True assert spec.is_engine_supported(VlmEngineType.API_OPENAI) is True assert spec.is_engine_supported(VlmEngineType.TRANSFORMERS) is False assert spec.is_engine_supported(VlmEngineType.MLX) is False # Test spec with no restrictions unrestricted_spec = VlmModelSpec( name="Universal Model", default_repo_id="test/model", prompt="Test prompt", response_format=ResponseFormat.DOCTAGS, ) assert unrestricted_spec.is_engine_supported(VlmEngineType.TRANSFORMERS) is True assert unrestricted_spec.is_engine_supported(VlmEngineType.MLX) is True # ============================================================================= # PRESET SYSTEM TESTS # ============================================================================= class TestPresetSystem: """Test preset registration and retrieval.""" def test_vlm_convert_presets_exist(self): """Test that VlmConvert presets are registered.""" preset_ids = VlmConvertOptions.list_preset_ids() # Check that key presets exist assert "smoldocling" in preset_ids assert "granite_docling" in preset_ids assert "deepseek_ocr" in preset_ids assert "granite_vision" in preset_ids assert "pixtral" in preset_ids assert "got_ocr" in preset_ids # Verify we can retrieve them smoldocling = VlmConvertOptions.get_preset("smoldocling") assert smoldocling.preset_id == "smoldocling" assert smoldocling.name == "SmolDocling" assert smoldocling.model_spec.response_format == ResponseFormat.DOCTAGS def test_picture_description_presets_exist(self): """Test that PictureDescription presets are registered.""" preset_ids = PictureDescriptionVlmEngineOptions.list_preset_ids() # Check that key presets exist assert "smolvlm" in preset_ids assert "granite_vision" in preset_ids assert "pixtral" in preset_ids assert "qwen" in preset_ids # Verify we can retrieve them smolvlm = PictureDescriptionVlmEngineOptions.get_preset("smolvlm") assert smolvlm.preset_id == "smolvlm" assert smolvlm.name == "SmolVLM-256M" # Full model name def test_code_formula_presets_exist(self): """Test that CodeFormula presets are registered.""" preset_ids = CodeFormulaVlmOptions.list_preset_ids() # Check that key presets exist assert "codeformulav2" in preset_ids assert "granite_docling" in preset_ids # Verify we can retrieve them codeformulav2 = CodeFormulaVlmOptions.get_preset("codeformulav2") assert codeformulav2.preset_id == "codeformulav2" assert codeformulav2.name == "CodeFormulaV2" granite_docling = CodeFormulaVlmOptions.get_preset("granite_docling") assert granite_docling.preset_id == "granite_docling" assert granite_docling.name == "Granite-Docling-CodeFormula" def test_preset_not_found_error(self): """Test that requesting non-existent preset raises KeyError.""" with pytest.raises(KeyError) as exc_info: VlmConvertOptions.get_preset("nonexistent_preset") assert "nonexistent_preset" in str(exc_info.value) assert "Available presets:" in str(exc_info.value) def test_list_presets(self): """Test listing all presets for a stage.""" vlm_convert_presets = VlmConvertOptions.list_presets() assert len(vlm_convert_presets) >= 6 # At least 6 VlmConvert presets assert all(isinstance(p, StageModelPreset) for p in vlm_convert_presets) picture_desc_presets = PictureDescriptionVlmEngineOptions.list_presets() assert len(picture_desc_presets) >= 4 # At least 4 PictureDescription presets code_formula_presets = CodeFormulaVlmOptions.list_presets() assert len(code_formula_presets) >= 1 # At least 1 CodeFormula preset def test_get_preset_info(self): """Test getting preset summary information.""" info = VlmConvertOptions.get_preset_info() assert len(info) >= 6 # Check structure of info for preset_info in info: assert "preset_id" in preset_info assert "name" in preset_info assert "description" in preset_info assert "model" in preset_info assert "default_engine" in preset_info # ============================================================================= # PRESET-BASED OPTIONS CREATION TESTS # ============================================================================= class TestPresetBasedOptionsCreation: """Test creating options from presets.""" def test_create_vlm_convert_from_preset_default_runtime(self): """Test creating VlmConvertOptions from preset with default runtime.""" options = VlmConvertOptions.from_preset("smoldocling") assert options.model_spec is not None assert options.model_spec.name == "SmolDocling-256M" assert options.model_spec.response_format == ResponseFormat.DOCTAGS assert options.engine_options is not None assert options.engine_options.engine_type == VlmEngineType.AUTO_INLINE assert options.scale == 2.0 def test_create_vlm_convert_from_preset_with_engine_override(self): """Test creating VlmConvertOptions with engine override.""" # Override with Transformers engine transformers_engine = TransformersVlmEngineOptions(load_in_8bit=False) options = VlmConvertOptions.from_preset( "smoldocling", engine_options=transformers_engine ) assert options.engine_options.engine_type == VlmEngineType.TRANSFORMERS assert isinstance(options.engine_options, TransformersVlmEngineOptions) assert options.engine_options.load_in_8bit is False assert options.model_spec.name == "SmolDocling-256M" # Override with MLX engine mlx_engine = MlxVlmEngineOptions() options_mlx = VlmConvertOptions.from_preset( "granite_docling", engine_options=mlx_engine ) assert options_mlx.engine_options.engine_type == VlmEngineType.MLX assert options_mlx.model_spec.name == "Granite-Docling-258M" # Override with API engine api_engine = ApiVlmEngineOptions( engine_type=VlmEngineType.API_OLLAMA, timeout=60.0 ) options_api = VlmConvertOptions.from_preset( "deepseek_ocr", engine_options=api_engine ) assert options_api.engine_options.engine_type == VlmEngineType.API_OLLAMA assert isinstance(options_api.engine_options, ApiVlmEngineOptions) assert options_api.engine_options.timeout == 60.0 def test_create_picture_description_from_preset(self): """Test creating PictureDescriptionVlmOptions from preset.""" # PictureDescriptionVlmOptions has legacy fields that need to be provided # Skip this test as it requires backward compatibility handling # The preset system works for VlmConvert and CodeFormula which don't have legacy fields pytest.skip( "PictureDescriptionVlmOptions requires legacy repo_id field - backward compatibility issue" ) def test_create_code_formula_from_preset(self): """Test creating CodeFormulaVlmOptions from preset.""" options = CodeFormulaVlmOptions.from_preset("codeformulav2") assert options.model_spec is not None assert options.engine_options is not None assert options.scale == 2.0 def test_preset_with_parameter_overrides(self): """Test creating options from preset with additional parameter overrides.""" options = VlmConvertOptions.from_preset( "smoldocling", scale=3.0, max_size=2048, ) assert options.scale == 3.0 assert options.max_size == 2048 assert options.model_spec.name == "SmolDocling-256M" def test_preset_mlx_engine_override_uses_mlx_repo(self): """Test that MLX engine uses MLX-specific repo_id from model spec.""" preset = VlmConvertOptions.get_preset("smoldocling") # Check that MLX override exists assert VlmEngineType.MLX in preset.model_spec.engine_overrides # Get repo_id for different engines default_repo = preset.model_spec.get_repo_id(VlmEngineType.TRANSFORMERS) mlx_repo = preset.model_spec.get_repo_id(VlmEngineType.MLX) assert default_repo == "docling-project/SmolDocling-256M-preview" assert mlx_repo == "docling-project/SmolDocling-256M-preview-mlx-bf16" assert default_repo != mlx_repo def test_preset_api_override_uses_api_params(self): """Test that API engine uses API-specific params from model spec.""" preset = VlmConvertOptions.get_preset("granite_docling") # Check that API override exists for Ollama assert VlmEngineType.API_OLLAMA in preset.model_spec.api_overrides # Get API params default_params = preset.model_spec.get_api_params(VlmEngineType.API_OPENAI) ollama_params = preset.model_spec.get_api_params(VlmEngineType.API_OLLAMA) assert default_params["model"] == "ibm-granite/granite-docling-258M" assert ollama_params["model"] == "ibm/granite-docling:258m" # ============================================================================= # INTEGRATION TESTS # ============================================================================= class TestPresetEngineIntegration: """Test integration between presets and engine options.""" def test_all_vlm_convert_presets_can_be_instantiated(self): """Test that all VlmConvert presets can be instantiated.""" preset_ids = VlmConvertOptions.list_preset_ids() for preset_id in preset_ids: options = VlmConvertOptions.from_preset(preset_id) assert options.model_spec is not None assert options.engine_options is not None assert options.scale > 0 def test_all_picture_description_presets_can_be_instantiated(self): """Test that all PictureDescription presets can be instantiated.""" # Now fully supported with the new runtime options class preset_ids = PictureDescriptionVlmEngineOptions.list_preset_ids() for preset_id in preset_ids: options = PictureDescriptionVlmEngineOptions.from_preset(preset_id) assert options.model_spec is not None assert options.engine_options is not None def test_all_code_formula_presets_can_be_instantiated(self): """Test that all CodeFormula presets can be instantiated.""" preset_ids = CodeFormulaVlmOptions.list_preset_ids() for preset_id in preset_ids: options = CodeFormulaVlmOptions.from_preset(preset_id) assert options.model_spec is not None assert options.engine_options is not None def test_preset_with_all_engine_types(self): """Test that a preset can be used with all engine types.""" preset_id = "smoldocling" # Test with each engine type engine_options_list = [ AutoInlineVlmEngineOptions(), TransformersVlmEngineOptions(), MlxVlmEngineOptions(), ApiVlmEngineOptions(engine_type=VlmEngineType.API_OLLAMA), ApiVlmEngineOptions(engine_type=VlmEngineType.API_OPENAI), VllmVlmEngineOptions(), ] for engine_options in engine_options_list: options = VlmConvertOptions.from_preset( preset_id, engine_options=engine_options ) assert options.engine_options.engine_type == engine_options.engine_type def test_deepseek_ocr_preset_api_only(self): """Test that DeepSeek OCR preset is API-only.""" preset = VlmConvertOptions.get_preset("deepseek_ocr") # Should only support API engines assert preset.model_spec.supported_engines is not None assert VlmEngineType.API_OLLAMA in preset.model_spec.supported_engines assert VlmEngineType.TRANSFORMERS not in preset.model_spec.supported_engines assert VlmEngineType.MLX not in preset.model_spec.supported_engines def test_response_format_consistency(self): """Test that response formats are valid across all presets.""" # All presets should have valid response formats # Note: Presets may be shared across different stage types all_valid_formats = [ ResponseFormat.DOCTAGS, ResponseFormat.MARKDOWN, ResponseFormat.DEEPSEEKOCR_MARKDOWN, ResponseFormat.PLAINTEXT, ] # Check VlmConvert presets vlm_convert_presets = VlmConvertOptions.list_presets() for preset in vlm_convert_presets: assert preset.model_spec.response_format in all_valid_formats # Check PictureDescription presets picture_desc_presets = PictureDescriptionVlmEngineOptions.list_presets() for preset in picture_desc_presets: assert preset.model_spec.response_format in all_valid_formats # Check CodeFormula presets code_formula_presets = CodeFormulaVlmOptions.list_presets() for preset in code_formula_presets: assert preset.model_spec.response_format in all_valid_formats # ============================================================================= # EDGE CASES AND ERROR HANDLING # ============================================================================= class TestEdgeCases: """Test edge cases and error handling.""" def test_preset_registration_idempotent(self): """Test that registering the same preset twice doesn't cause issues.""" # Get current count initial_count = len(VlmConvertOptions.list_preset_ids()) # Try to register an existing preset again existing_preset = VlmConvertOptions.get_preset("smoldocling") VlmConvertOptions.register_preset(existing_preset) # Count should remain the same final_count = len(VlmConvertOptions.list_preset_ids()) assert initial_count == final_count def test_engine_options_validation(self): """Test that engine options are validated properly.""" # Valid options should work valid_options = TransformersVlmEngineOptions( load_in_8bit=True, llm_int8_threshold=6.0, ) assert valid_options.load_in_8bit is True # Invalid engine_type should fail with pytest.raises(ValidationError): ApiVlmEngineOptions(engine_type="invalid_engine") # type: ignore def test_model_spec_with_empty_overrides(self): """Test model spec with empty override dictionaries.""" spec = VlmModelSpec( name="Test Model", default_repo_id="test/model", prompt="Test prompt", response_format=ResponseFormat.DOCTAGS, engine_overrides={}, api_overrides={}, ) # Should use defaults assert spec.get_repo_id(VlmEngineType.TRANSFORMERS) == "test/model" assert spec.get_revision(VlmEngineType.MLX) == "main" assert spec.get_api_params(VlmEngineType.API_OLLAMA) == {"model": "test/model"} def test_preset_with_none_max_size(self): """Test that presets can have None for max_size.""" options = VlmConvertOptions.from_preset("smoldocling") # max_size can be None (no limit) assert options.max_size is None or isinstance(options.max_size, int)
{ "repo_id": "docling-project/docling", "file_path": "tests/test_vlm_presets_and_runtime_options.py", "license": "MIT License", "lines": 455, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
docling-project/docling:docs/examples/chart_extraction.py
# %% [markdown] # Extract chart data from a PDF and export the result as split-page HTML with layout. # # What this example does # - Converts a PDF with chart extraction enrichment enabled. # - Iterates detected pictures and prints extracted chart data as CSV to stdout. # - Saves the converted document as split-page HTML with layout to `scratch/`. # # Prerequisites # - Install Docling with the `granite_vision` extra (for chart extraction model). # - Install `pandas`. # # How to run # - From the repo root: `python docs/examples/chart_extraction.py`. # - Outputs are written to `scratch/`. # # Input document # - Defaults to `docs/examples/data/chart_document.pdf`. Change `input_doc_path` # as needed. # # Notes # - Enabling `do_chart_extraction` automatically enables picture classification. # - Supported chart types: bar chart, pie chart, line chart. # %% import logging import time from pathlib import Path import pandas as pd from docling_core.transforms.serializer.html import ( HTMLDocSerializer, HTMLOutputStyle, HTMLParams, ) from docling_core.transforms.visualizer.layout_visualizer import LayoutVisualizer from docling_core.types.doc import ImageRefMode, PictureItem from docling.datamodel.base_models import InputFormat from docling.datamodel.pipeline_options import PdfPipelineOptions from docling.document_converter import DocumentConverter, PdfFormatOption _log = logging.getLogger(__name__) def main(): logging.basicConfig(level=logging.INFO) input_doc_path = Path(__file__).parent / "data/chart_document.pdf" output_dir = Path("scratch") output_dir.mkdir(parents=True, exist_ok=True) # Configure the PDF pipeline with chart extraction enabled. # This automatically enables picture classification as well. pipeline_options = PdfPipelineOptions() pipeline_options.do_chart_extraction = True pipeline_options.generate_page_images = True pipeline_options.generate_picture_images = True doc_converter = DocumentConverter( format_options={ InputFormat.PDF: PdfFormatOption(pipeline_options=pipeline_options) } ) start_time = time.time() conv_res = doc_converter.convert(input_doc_path) doc_filename = conv_res.input.file.stem # Iterate over document items and print extracted chart data. for item, _level in conv_res.document.iterate_items(): if not isinstance(item, PictureItem): continue if item.meta is None: continue # Check if the picture was classified as a chart. if item.meta.classification is not None: chart_type = item.meta.classification.get_main_prediction().class_name else: continue # Check if chart data was extracted. if item.meta.tabular_chart is None: continue table_data = item.meta.tabular_chart.chart_data print(f"## Chart type: {chart_type}") print(f" Size: {table_data.num_rows} rows x {table_data.num_cols} cols") # Build a DataFrame from the extracted table cells for display. grid: list[list[str]] = [ [""] * table_data.num_cols for _ in range(table_data.num_rows) ] for cell in table_data.table_cells: grid[cell.start_row_offset_idx][cell.start_col_offset_idx] = cell.text chart_df = pd.DataFrame(grid) print(chart_df.to_csv(index=False, header=False)) # Export the full document as split-page HTML with layout. html_filename = output_dir / f"{doc_filename}.html" ser = HTMLDocSerializer( doc=conv_res.document, params=HTMLParams( image_mode=ImageRefMode.EMBEDDED, output_style=HTMLOutputStyle.SPLIT_PAGE, ), ) visualizer = LayoutVisualizer() visualizer.params.show_label = False ser_res = ser.serialize( visualizer=visualizer, ) with open(html_filename, "w") as fw: fw.write(ser_res.text) _log.info(f"Saved split-page HTML to {html_filename}") elapsed = time.time() - start_time _log.info(f"Document converted and exported in {elapsed:.2f} seconds.") if __name__ == "__main__": main()
{ "repo_id": "docling-project/docling", "file_path": "docs/examples/chart_extraction.py", "license": "MIT License", "lines": 104, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
docling-project/docling:docling/models/stages/chart_extraction/granite_vision.py
import logging import re from collections.abc import Iterable from io import StringIO from pathlib import Path from typing import List, Literal, Optional import pandas as pd from docling_core.types.doc import ( DoclingDocument, NodeItem, PictureClassificationMetaField, PictureItem, PictureMeta, TableCell, TableData, TabularChartMetaField, ) from PIL import Image from pydantic import BaseModel from transformers import AutoModelForImageTextToText, AutoProcessor from docling.datamodel.accelerator_options import AcceleratorDevice, AcceleratorOptions from docling.datamodel.base_models import ItemAndImageEnrichmentElement from docling.models.base_model import BaseItemAndImageEnrichmentModel from docling.models.utils.hf_model_download import download_hf_model from docling.utils.accelerator_utils import decide_device _log = logging.getLogger(__name__) class ChartExtractionModelOptions(BaseModel): kind: Literal["chart_extraction"] = "chart_extraction" class ChartExtractionModelGraniteVision(BaseItemAndImageEnrichmentModel): SUPPORTED_CHART_TYPES = ["bar_chart", "pie_chart", "line_chart"] _model_repo_folder = "ibm-granite--granite-vision-3.3-2b-chart2csv-preview" _model_repo_id = "ibm-granite/granite-vision-3.3-2b-chart2csv-preview" _model_repo_revision = "6e1fbaae4604ecc85f4f371416d82154ca49ad67" def __init__( self, enabled: bool, artifacts_path: Optional[Path], options: ChartExtractionModelOptions, accelerator_options: AcceleratorOptions, ): """ Initializes the CodeFormulaModel with the given configuration. Parameters ---------- enabled : bool True if the model is enabled, False otherwise. artifacts_path : Path Path to the directory containing the model artifacts. options : CodeFormulaModelOptions Configuration options for the model. accelerator_options : AcceleratorOptions Options specifying the device and number of threads for acceleration. """ self.enabled = enabled self.options = options if self.enabled: self.device = decide_device( accelerator_options.device, supported_devices=[AcceleratorDevice.CPU, AcceleratorDevice.CUDA], ) if artifacts_path is None: artifacts_path = self.download_models() elif (artifacts_path / self._model_repo_folder).exists(): artifacts_path = artifacts_path / self._model_repo_folder else: _log.warning( f"Model artifacts not found at {artifacts_path / self._model_repo_folder}, they will be downloaded." ) self._processor = AutoProcessor.from_pretrained( artifacts_path, ) self._model_max_length = self._processor.tokenizer.model_max_length self._model = AutoModelForImageTextToText.from_pretrained( artifacts_path, device_map=self.device ) self._model.eval() @staticmethod def download_models( local_dir: Optional[Path] = None, force: bool = False, progress: bool = False, ) -> Path: return download_hf_model( repo_id=ChartExtractionModelGraniteVision._model_repo_id, # Let's pin it to a specific commit to reduce potential regression errors revision=ChartExtractionModelGraniteVision._model_repo_revision, local_dir=local_dir, force=force, progress=progress, ) def is_processable(self, doc: DoclingDocument, element: NodeItem) -> bool: """ Determines if a given element in a document can be processed by the model. Parameters ---------- doc : DoclingDocument The document being processed. element : NodeItem The element within the document to check. Returns ------- bool True if the element can be processed, False otherwise. """ if not self.enabled: return False if not isinstance(element, PictureItem): return False if element.meta is None or not isinstance(element.meta, PictureMeta): return False if element.meta.classification is None or not isinstance( element.meta.classification, PictureClassificationMetaField ): return False main_pred = element.meta.classification.get_main_prediction() return main_pred.class_name in self.SUPPORTED_CHART_TYPES def __call__( self, doc: DoclingDocument, element_batch: Iterable[ItemAndImageEnrichmentElement], ) -> Iterable[NodeItem]: """ Processes the given batch of elements and enriches them with predictions. Parameters ---------- doc : DoclingDocument The document being processed. element_batch : Iterable[ItemAndImageEnrichmentElement] A batch of elements to be processed. Returns ------- Iterable[Any] An iterable of enriched elements. """ if not self.enabled: for element in element_batch: yield element.item return images: List[Image.Image] = [] elements: List[PictureItem] = [] for el in element_batch: elements.append(el.item) # type: ignore[arg-type] images.append(el.image) # Create a batch of conversations conversations = [] for image in images: conversations.append( [ { "role": "user", "content": [ {"type": "image", "image": image}, # <-- PIL Image here { "type": "text", "text": "Convert the information in this chart into a data table in CSV format.", }, ], }, ] ) # Process batch in a single call inputs = self._processor.apply_chat_template( conversations, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt", padding=True, padding_side="left", ).to(self.device) eos_ids = [ self._processor.tokenizer.eos_token_id, self._processor.tokenizer.convert_tokens_to_ids("<|end_of_text|>"), ] # autoregressively complete prompt for batch output_ids = self._model.generate( **inputs, max_new_tokens=self._model_max_length, eos_token_id=eos_ids, # self._processor.tokenizer.eos_token_id, ) output_texts = self._processor.batch_decode( output_ids, skip_special_tokens=True ) chart_data: list[Optional[TabularChartMetaField]] = self._post_process( outputs=output_texts ) for item, tabular_chart in zip(elements, chart_data): if (tabular_chart is not None) and isinstance(item, PictureItem): if (item.meta is not None) and isinstance(item.meta, PictureMeta): item.meta.tabular_chart = tabular_chart else: meta = PictureMeta(tabular_chart=tabular_chart) item.meta = meta yield item def _post_process( self, outputs: list[str] ) -> list[Optional[TabularChartMetaField]]: chart_data: list[Optional[TabularChartMetaField]] = [] for i, text in enumerate(outputs): # Post-process to extract DataFrame try: dataframe = self._extract_csv_to_dataframe(text) # In convert_batch_images, after extracting DataFrame: table_data = self._dataframe_to_tabledata(dataframe) chart_data.append(TabularChartMetaField(chart_data=table_data)) except Exception as e: _log.error(f"Failed to extract DataFrame for image {i}: {e}") chart_data.append(None) return chart_data def _extract_csv_to_dataframe(self, decoded_text: str) -> pd.DataFrame: """ Extract CSV content from decoded text and convert to DataFrame. Handles: - Chat format with <|assistant|> tags - Nested code blocks (```csv ``` inside ```) - Various CSV formatting issues Args: decoded_text: The decoded output from the model Returns: pandas DataFrame containing the CSV data """ # Extract the assistant's response assistant_match = re.search(r"<\|assistant\|>\s*(.*)", decoded_text, re.DOTALL) if not assistant_match: raise ValueError("Could not find assistant response in decoded text") assistant_response = assistant_match.group(1).strip() # Extract the first CSV code block (```csv ... ```) # This handles <|end_of_text|> tokens and multiple blocks in the output csv_match = re.search(r"```csv\s*\n(.*?)\n```", assistant_response, re.DOTALL) if csv_match: csv_content = csv_match.group(1).strip() else: # Fallback: take content up to first <|end_of_text|> and strip # code block markers csv_content = assistant_response.split("<|end_of_text|>")[0].strip() csv_content = re.sub(r"^```+(?:csv)?\s*", "", csv_content) csv_content = re.sub(r"```+\s*$", "", csv_content) csv_content = csv_content.strip() # Convert to DataFrame try: dataframe = pd.read_csv(StringIO(csv_content), header=None) return dataframe except Exception as e: _log.error(f"Error parsing CSV: {e}") _log.error(f"CSV content:\n{csv_content}") raise def _is_numeric(self, value) -> bool: """Check if a value is numeric (int or float).""" if pd.isna(value): return False try: float(value) return True except (ValueError, TypeError): return False def _dataframe_to_tabledata(self, df: pd.DataFrame) -> TableData: """ Transform a pandas DataFrame into a TableData object. Automatically infers if the first row is a header by checking if all values in the first row are non-numeric. Args: df: The pandas DataFrame to convert Returns: TableData object containing the table structure """ table_cells = [] # Infer if first row is header: check if all values in first row are non-numeric first_row_is_header = False if len(df) > 0: first_row = df.iloc[0] first_row_is_header = all(not self._is_numeric(val) for val in first_row) # Add header row cells if inferred if first_row_is_header: for col_idx, value in enumerate(df.iloc[0]): cell = TableCell( text=str(value), start_row_offset_idx=0, end_row_offset_idx=1, start_col_offset_idx=col_idx, end_col_offset_idx=col_idx + 1, row_span=1, col_span=1, column_header=True, row_header=False, row_section=False, fillable=False, ) table_cells.append(cell) # Add data cells (skip the first row if it was used as header) data_df = df.iloc[1:] if first_row_is_header else df row_offset = 1 if first_row_is_header else 0 for row_idx, (_idx, row) in enumerate(data_df.iterrows()): for col_idx, value in enumerate(row): # Convert value to string, handling NaN and None if pd.isna(value): text = "" else: text = str(value) # Check if the value is numeric - non-numeric cells are row headers is_row_header = not self._is_numeric(value) cell = TableCell( text=text, start_row_offset_idx=row_idx + row_offset, end_row_offset_idx=row_idx + row_offset + 1, start_col_offset_idx=col_idx, end_col_offset_idx=col_idx + 1, row_span=1, col_span=1, column_header=False, row_header=is_row_header, row_section=False, fillable=False, ) table_cells.append(cell) # Total rows equals DataFrame length in both cases: # with header: 1 header + (len(df) - 1) data rows = len(df) # without header: len(df) data rows num_rows = len(df) num_cols = len(df.columns) return TableData(table_cells=table_cells, num_rows=num_rows, num_cols=num_cols)
{ "repo_id": "docling-project/docling", "file_path": "docling/models/stages/chart_extraction/granite_vision.py", "license": "MIT License", "lines": 320, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
docling-project/docling:tests/test_picture_description_filters.py
from docling_core.types.doc import ( PictureClassificationLabel, PictureClassificationMetaField, PictureMeta, ) from docling.models.picture_description_base_model import _passes_classification def _meta_with_predictions(predictions): return PictureMeta( classification=PictureClassificationMetaField(predictions=predictions) ) def test_passes_with_no_filters(): assert _passes_classification(None, None, None, 0.5) def test_allow_without_predictions_fails(): assert not _passes_classification( None, [PictureClassificationLabel.BAR_CHART], None, 0.0, ) def test_deny_without_predictions_passes(): assert _passes_classification( None, None, [PictureClassificationLabel.BAR_CHART], 0.0, ) def test_deny_blocks_matching_prediction(): meta = _meta_with_predictions([{"class_name": "bar_chart", "confidence": 0.9}]) assert not _passes_classification( meta, None, [PictureClassificationLabel.BAR_CHART], 0.0, ) def test_allow_accepts_matching_prediction(): meta = _meta_with_predictions([{"class_name": "bar_chart", "confidence": 0.9}]) assert _passes_classification( meta, [PictureClassificationLabel.BAR_CHART], None, 0.0, ) def test_allow_respects_min_confidence(): meta = _meta_with_predictions([{"class_name": "bar_chart", "confidence": 0.1}]) assert not _passes_classification( meta, [PictureClassificationLabel.BAR_CHART], None, 0.5, )
{ "repo_id": "docling-project/docling", "file_path": "tests/test_picture_description_filters.py", "license": "MIT License", "lines": 50, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
docling-project/docling:docling/utils/deepseekocr_utils.py
"""Utilities for parsing DeepSeek OCR annotated markdown format.""" import logging import re from typing import Optional, Union from docling_core.types.doc import ( BoundingBox, CoordOrigin, DocItemLabel, DoclingDocument, DocumentOrigin, ImageRef, ProvenanceItem, RefItem, Size, TableCell, TableData, TextItem, ) from lxml import etree from PIL import Image as PILImage _log = logging.getLogger(__name__) def _parse_table_html(html_content: str) -> TableData: """Parse HTML table content and create TableData structure. Args: html_content: HTML string containing <table> element Returns: TableData with parsed table structure """ # Extract table HTML if wrapped in other content table_match = re.search( r"<table[^>]*>.*?</table>", html_content, re.DOTALL | re.IGNORECASE ) if not table_match: # No table found, return empty table return TableData(num_rows=0, num_cols=0, table_cells=[]) table_html = table_match.group(0) try: # Parse HTML with lxml parser = etree.HTMLParser() tree = etree.fromstring(table_html, parser) # Find all rows rows = tree.xpath(".//tr") if not rows: return TableData(num_rows=0, num_cols=0, table_cells=[]) # Calculate grid dimensions num_rows = len(rows) num_cols = 0 # First pass: determine number of columns for row in rows: cells = row.xpath("./td | ./th") col_count = 0 for cell in cells: colspan = int(cell.get("colspan", "1")) col_count += colspan num_cols = max(num_cols, col_count) # Create grid to track cell positions grid: list[list[Union[None, str]]] = [ [None for _ in range(num_cols)] for _ in range(num_rows) ] table_data = TableData(num_rows=num_rows, num_cols=num_cols, table_cells=[]) # Second pass: populate cells for row_idx, row in enumerate(rows): cells = row.xpath("./td | ./th") col_idx = 0 for cell in cells: # Find next available column while col_idx < num_cols and grid[row_idx][col_idx] is not None: col_idx += 1 if col_idx >= num_cols: break # Get cell properties text = "".join(cell.itertext()).strip() colspan = int(cell.get("colspan", "1")) rowspan = int(cell.get("rowspan", "1")) is_header = cell.tag.lower() == "th" # Mark grid cells as occupied for r in range(row_idx, min(row_idx + rowspan, num_rows)): for c in range(col_idx, min(col_idx + colspan, num_cols)): grid[r][c] = text # Create table cell table_cell = TableCell( text=text, row_span=rowspan, col_span=colspan, start_row_offset_idx=row_idx, end_row_offset_idx=row_idx + rowspan, start_col_offset_idx=col_idx, end_col_offset_idx=col_idx + colspan, column_header=is_header and row_idx == 0, row_header=is_header and col_idx == 0, ) table_data.table_cells.append(table_cell) col_idx += colspan return table_data except Exception as e: _log.warning(f"Failed to parse table HTML: {e}") return TableData(num_rows=0, num_cols=0, table_cells=[]) def _collect_annotation_content( lines: list[str], i: int, label_str: str, annotation_pattern: str, visited_lines: set[int], ) -> tuple[str, int]: """Collect content for an annotation. Args: lines: All lines from the document i: Current line index (after annotation line) label_str: The annotation label (e.g., 'table', 'text') annotation_pattern: Regex pattern to match annotations visited_lines: Set of already visited line indices Returns: Tuple of (content string, next line index) """ content_lines = [] # Special handling for table: extract only <table>...</table> if label_str == "table": table_started = False ii = i while ii < len(lines): line = lines[ii] if "<table" in line.lower(): table_started = True if table_started: visited_lines.add(ii) content_lines.append(line.rstrip()) if table_started and "</table>" in line.lower(): break ii += 1 else: # Original logic for other labels while i < len(lines): content_line = lines[i].strip() if content_line: if re.match(annotation_pattern, content_line): break visited_lines.add(i) content_lines.append(lines[i].rstrip()) i += 1 if label_str not in ["figure", "image"]: break else: i += 1 if content_lines: break return "\n".join(content_lines), i def _process_annotation_item( label_str: str, content: str, prov: ProvenanceItem, caption_item: Optional[Union[TextItem, RefItem]], page_doc: DoclingDocument, label_map: dict[str, DocItemLabel], ) -> None: """Process and add a single annotation item to the document. Args: label_str: The annotation label content: The content text prov: Provenance information caption_item: Optional caption item to link page_doc: Document to add item to label_map: Mapping of label strings to DocItemLabel """ doc_label = label_map.get(label_str, DocItemLabel.TEXT) if label_str in ["figure", "image"]: page_doc.add_picture(caption=caption_item, prov=prov) elif label_str == "table": table_data = _parse_table_html(content) page_doc.add_table(data=table_data, caption=caption_item, prov=prov) elif label_str == "title": clean_content = content if content.startswith("#"): hash_count = 0 for char in content: if char == "#": hash_count += 1 else: break clean_content = content[hash_count:].strip() page_doc.add_title(text=clean_content, prov=prov) elif label_str == "sub_title": heading_level = 1 clean_content = content if content.startswith("#"): hash_count = 0 for char in content: if char == "#": hash_count += 1 else: break if hash_count > 1: heading_level = hash_count - 1 clean_content = content[hash_count:].strip() page_doc.add_heading(text=clean_content, level=heading_level, prov=prov) else: page_doc.add_text(label=doc_label, text=content, prov=prov) def parse_deepseekocr_markdown( content: str, original_page_size: Size, page_no: int, filename: str = "file", page_image: Optional[PILImage.Image] = None, ) -> DoclingDocument: """Parse DeepSeek OCR markdown with label[[x1, y1, x2, y2]] format. This function parses markdown content that has been annotated with bounding box coordinates for different document elements. Labels supported: - text: Standard body text - title: Main document or section titles - sub_title: Secondary headings or sub-headers - table: Tabular data - table_caption: Descriptive text for tables - figure: Image-based elements or diagrams - figure_caption: Titles or descriptions for figures/images - header / footer: Content at top or bottom margins of pages Args: content: The annotated markdown content string page_image: Optional PIL Image of the page page_no: Page number (default: 1) filename: Source filename (default: "file") Returns: DoclingDocument with parsed content """ # Label mapping label_map = { "text": DocItemLabel.TEXT, "title": DocItemLabel.TITLE, "sub_title": DocItemLabel.SECTION_HEADER, "table": DocItemLabel.TABLE, "table_caption": DocItemLabel.CAPTION, "figure": DocItemLabel.PICTURE, "figure_caption": DocItemLabel.CAPTION, "image": DocItemLabel.PICTURE, "image_caption": DocItemLabel.CAPTION, "header": DocItemLabel.PAGE_HEADER, "footer": DocItemLabel.PAGE_FOOTER, } # Pattern to match: <|ref|>label<|/ref|><|det|>[[x1, y1, x2, y2]]<|/det|> or label[[x1, y1, x2, y2]] annotation_pattern = r"^(?:<\|ref\|>)?(\w+)(?:<\|/ref\|>)?(?:<\|det\|>)?\[\[([0-9., ]+)\]\](?:<\|/det\|>)?\s*$" # Create a new document origin = DocumentOrigin( filename=filename, mimetype="text/markdown", binary_hash=0, ) page_doc = DoclingDocument(name=filename.rsplit(".", 1)[0], origin=origin) # Get page dimensions - use original page size if provided, otherwise image size pg_width = original_page_size.width pg_height = original_page_size.height # Calculate scale factor for bbox conversion # VLM produces bboxes in unit of 1000 scale_x = pg_width / 1000 scale_y = pg_height / 1000 # Calculate DPI for the image image_dpi = 72 if page_image is not None: image_dpi = int(72 * page_image.width / pg_width) # Add page metadata page_doc.add_page( page_no=page_no, size=Size(width=pg_width, height=pg_height), image=ImageRef.from_pil(image=page_image, dpi=image_dpi) if page_image else None, ) # Split into lines and parse - collect all annotations first lines = content.split("\n") annotations = [] i = 0 visited_lines: set[int] = set() while i < len(lines): if i in visited_lines: i += 1 continue line = lines[i].strip() match = re.match(annotation_pattern, line) if match: label_str = match.group(1) coords_str = match.group(2) try: coords = [float(x.strip()) for x in coords_str.split(",")] if len(coords) == 4: # Scale bounding box from image coordinates to original page coordinates bbox = BoundingBox( l=coords[0] * scale_x, t=coords[1] * scale_y, r=coords[2] * scale_x, b=coords[3] * scale_y, coord_origin=CoordOrigin.TOPLEFT, ) prov = ProvenanceItem(page_no=page_no, bbox=bbox, charspan=[0, 0]) # Get the content (next non-empty line) i += 1 content_text, i = _collect_annotation_content( lines, i, label_str, annotation_pattern, visited_lines ) annotations.append((label_str, content_text, prov)) continue except (ValueError, IndexError): pass i += 1 # Process annotations and link captions that appear AFTER tables/figures for idx, (label_str, content_text, prov) in enumerate(annotations): # Check if NEXT annotation is a caption for this table/figure/image # (caption appears AFTER table in the file: table[[...]] then table_caption[[...]]) caption_item = None if label_str in ["table", "figure", "image"] and idx + 1 < len(annotations): next_label, next_content, next_prov = annotations[idx + 1] if ( (label_str == "table" and next_label == "table_caption") or (label_str == "figure" and next_label == "figure_caption") or (label_str == "image" and next_label == "image_caption") ): # Create caption item caption_label = label_map.get(next_label, DocItemLabel.CAPTION) caption_item = page_doc.add_text( label=caption_label, text=next_content, prov=next_prov, ) # Skip if this is a caption that was already processed if label_str in ["figure_caption", "table_caption", "image_caption"]: if idx > 0: prev_label = annotations[idx - 1][0] if ( (label_str == "table_caption" and prev_label == "table") or (label_str == "figure_caption" and prev_label == "figure") or (label_str == "image_caption" and prev_label == "image") ): continue # Add the item _process_annotation_item( label_str, content_text, prov, caption_item, page_doc, label_map ) return page_doc
{ "repo_id": "docling-project/docling", "file_path": "docling/utils/deepseekocr_utils.py", "license": "MIT License", "lines": 335, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
docling-project/docling:tests/test_deepseekocr_vlm.py
"""Test DeepSeek OCR markdown parsing in VLM pipeline.""" import json import os import sys from pathlib import Path import pytest from docling_core.types.doc import DoclingDocument, Size from PIL import Image as PILImage from docling.datamodel import vlm_model_specs from docling.datamodel.base_models import ( InputFormat, Page, PagePredictions, VlmPrediction, ) from docling.datamodel.document import ConversionResult, InputDocument from docling.datamodel.pipeline_options import VlmPipelineOptions from docling.document_converter import DocumentConverter, PdfFormatOption from docling.pipeline.vlm_pipeline import VlmPipeline from docling.utils.deepseekocr_utils import parse_deepseekocr_markdown from .test_data_gen_flag import GEN_TEST_DATA from .verify_utils import verify_document, verify_export GENERATE = GEN_TEST_DATA def get_md_deepseek_paths(): """Get all DeepSeek markdown test files.""" directory = Path("./tests/data/md_deepseek/") md_files = sorted(directory.glob("*.md")) return md_files def mock_parsing(content: str, filename: str) -> DoclingDocument: """Create a mock conversion result with the DeepSeek OCR markdown as VLM response.""" # Create a page with the DeepSeek OCR markdown as VLM response page = Page(page_no=1) page._image_cache[1.0] = PILImage.new("RGB", (612, 792), color="white") page.predictions = PagePredictions() page.predictions.vlm_response = VlmPrediction(text=content) # Parse the DeepSeek OCR markdown using the utility function doc = parse_deepseekocr_markdown( content=content, original_page_size=Size(width=612, height=792), page_image=page.image, page_no=1, filename=filename, ) return doc def test_e2e_deepseekocr_parsing(): """Test DeepSeek OCR markdown parsing for all test files.""" md_paths = get_md_deepseek_paths() for md_path in md_paths: # Read the annotated markdown content with open(md_path, encoding="utf-8") as f: annotated_content = f.read() # Define groundtruth path gt_path = md_path.parent.parent / "groundtruth" / "docling_v2" / md_path.name # Parse the markdown using mock_parsing doc: DoclingDocument = mock_parsing(annotated_content, md_path.name) # Export to markdown pred_md: str = doc.export_to_markdown() assert verify_export(pred_md, str(gt_path) + ".md", GENERATE), "export to md" # Export to indented text pred_itxt: str = doc._export_to_indented_text( max_text_len=70, explicit_tables=False ) assert verify_export(pred_itxt, str(gt_path) + ".itxt", GENERATE), ( "export to indented-text" ) # Verify document structure assert verify_document(doc, str(gt_path) + ".json", GENERATE), ( "document document" ) def test_e2e_deepseekocr_conversion(): """Test DeepSeek OCR VLM conversion on a PDF file.""" # Skip in CI or if ollama is not available if os.getenv("CI"): pytest.skip("Skipping in CI environment") # Check if ollama is available try: import requests response = requests.get("http://localhost:11434/v1/models", timeout=2) if response.status_code != 200: pytest.skip("Ollama is not available") except Exception: pytest.skip("Ollama is not available") # Setup the converter with DeepSeek OCR VLM pipeline_options = VlmPipelineOptions( vlm_options=vlm_model_specs.DEEPSEEKOCR_OLLAMA, enable_remote_services=True, ) converter = DocumentConverter( format_options={ InputFormat.PDF: PdfFormatOption( pipeline_cls=VlmPipeline, pipeline_options=pipeline_options, ), } ) # Convert the PDF pdf_path = Path("./tests/data/pdf/2206.01062.pdf") conv_result = converter.convert(pdf_path) # Load reference document ref_path = Path("./tests/data/groundtruth/docling_v2/deepseek_title.md.json") ref_doc = DoclingDocument.load_from_json(ref_path) # Validate conversion result doc = conv_result.document # Check number of pages assert len(doc.pages) == 9, f"Number of pages mismatch: {len(doc.pages)}" # Compare features of the first page (excluding bbox which can vary) # Check that we have similar structure assert len(doc.texts) > 0, "Document should have text elements" assert len(doc.pictures) > 0, "Document should have picture elements" # Check that the title is present title_texts = [t for t in doc.texts if t.label == "title"] assert len(title_texts) > 0, "Document should have a title" # Check that we have section headers section_headers = [t for t in doc.texts if t.label == "section_header"] assert len(section_headers) > 0, "Document should have section headers" # Compare with reference document structure (not exact bbox) ref_title_texts = [t for t in ref_doc.texts if t.label == "title"] assert len(title_texts) == len(ref_title_texts), ( f"Title count mismatch: {len(title_texts)} vs {len(ref_title_texts)}" ) print( f"✓ Conversion successful with {len(doc.texts)} text elements and {len(doc.pictures)} pictures" ) if __name__ == "__main__": test_e2e_deepseekocr_parsing() test_e2e_deepseekocr_conversion()
{ "repo_id": "docling-project/docling", "file_path": "tests/test_deepseekocr_vlm.py", "license": "MIT License", "lines": 126, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
docling-project/docling:docs/examples/post_process_ocr_with_vlm.py
import argparse import logging import os import re from collections.abc import Iterable from concurrent.futures import ThreadPoolExecutor from pathlib import Path from typing import Any, Optional, Union import numpy as np from docling_core.types.doc import ( DoclingDocument, ImageRefMode, NodeItem, TextItem, ) from docling_core.types.doc.document import ( ContentLayer, DocItem, FormItem, GraphCell, KeyValueItem, PictureItem, RichTableCell, TableCell, TableItem, ) from PIL import Image, ImageFilter from PIL.ImageOps import crop from pydantic import BaseModel, ConfigDict from tqdm import tqdm from docling.backend.json.docling_json_backend import DoclingJSONBackend from docling.datamodel.accelerator_options import AcceleratorOptions from docling.datamodel.base_models import InputFormat, ItemAndImageEnrichmentElement from docling.datamodel.document import ConversionResult from docling.datamodel.pipeline_options import ( ConvertPipelineOptions, PdfPipelineOptions, PictureDescriptionApiOptions, ) from docling.document_converter import DocumentConverter, FormatOption, PdfFormatOption from docling.exceptions import OperationNotAllowed from docling.models.base_model import BaseModelWithOptions, GenericEnrichmentModel from docling.pipeline.simple_pipeline import SimplePipeline from docling.pipeline.standard_pdf_pipeline import StandardPdfPipeline from docling.utils.api_image_request import api_image_request from docling.utils.profiling import ProfilingScope, TimeRecorder from docling.utils.utils import chunkify # Example on how to apply to Docling Document OCR as a post-processing with "nanonets-ocr2-3b" via LM Studio # Requires LM Studio running inference server with "nanonets-ocr2-3b" model pre-loaded # To run: # uv run python docs/examples/post_process_ocr_with_vlm.py LM_STUDIO_URL = "http://localhost:1234/v1/chat/completions" LM_STUDIO_MODEL = "nanonets-ocr2-3b" DEFAULT_PROMPT = "Extract the text from the above document as if you were reading it naturally. Output pure text, no html and no markdown. Pay attention on line breaks and don't miss text after line break. Put all text in one line." VERBOSE = True SHOW_IMAGE = False SHOW_EMPTY_CROPS = False SHOW_NONEMPTY_CROPS = False PRINT_RESULT_MARKDOWN = False def is_empty_fast_with_lines_pil( pil_img: Image.Image, downscale_max_side: int = 48, # 64 grad_threshold: float = 15.0, # how strong a gradient must be to count as edge min_line_coverage: float = 0.6, # line must cover 60% of height/width max_allowed_lines: int = 10, # allow up to this many strong lines (default 4) edge_fraction_threshold: float = 0.0035, ): """ Fast 'empty' detector using only PIL + NumPy. Treats an image as empty if: - It has very few edges overall, OR - Edges can be explained by at most `max_allowed_lines` long vertical/horizontal lines. Returns: (is_empty: bool, remaining_edge_fraction: float, debug: dict) """ # 1) Convert to grayscale gray = pil_img.convert("L") # 2) Aggressive downscale, keeping aspect ratio w0, h0 = gray.size max_side = max(w0, h0) if max_side > downscale_max_side: # scale = downscale_max_side / max_side # new_w = max(1, int(w0 * scale)) # new_h = max(1, int(h0 * scale)) new_w = downscale_max_side new_h = downscale_max_side gray = gray.resize((new_w, new_h), resample=Image.BILINEAR) w, h = gray.size if w == 0 or h == 0: return True, 0.0, {"reason": "zero_size"} # 3) Small blur to reduce noise gray = gray.filter(ImageFilter.BoxBlur(1)) # 4) Convert to NumPy arr = np.asarray( gray, dtype=np.float32 ) # shape (h, w) in PIL, but note: PIL size is (w, h) H, W = arr.shape # 5) Compute simple gradients (forward differences) gx = np.zeros_like(arr) gy = np.zeros_like(arr) gx[:, :-1] = arr[:, 1:] - arr[:, :-1] # horizontal differences gy[:-1, :] = arr[1:, :] - arr[:-1, :] # vertical differences mag = np.hypot(gx, gy) # gradient magnitude # 6) Threshold gradients to get edges (boolean mask) edges = mag > grad_threshold edge_fraction = edges.mean() # Quick early-exit: almost no edges => empty if edge_fraction < edge_fraction_threshold: return True, float(edge_fraction), {"reason": "few_edges"} # 7) Detect strong vertical & horizontal lines via edge sums col_sum = edges.sum(axis=0) # per column row_sum = edges.sum(axis=1) # per row # Line must have edge pixels in at least `min_line_coverage` of the dimension vert_line_cols = np.where(col_sum >= min_line_coverage * H)[0] horiz_line_rows = np.where(row_sum >= min_line_coverage * W)[0] num_lines = len(vert_line_cols) + len(horiz_line_rows) # If we have more long lines than allowed => non-empty if num_lines > max_allowed_lines: return ( False, float(edge_fraction), { "reason": "too_many_lines", "num_lines": int(num_lines), "edge_fraction": float(edge_fraction), }, ) # 8) Mask out those lines and recompute remaining edges line_mask = np.zeros_like(edges, dtype=bool) if len(vert_line_cols) > 0: line_mask[:, vert_line_cols] = True if len(horiz_line_rows) > 0: line_mask[horiz_line_rows, :] = True remaining_edges = edges & ~line_mask remaining_edge_fraction = remaining_edges.mean() is_empty = remaining_edge_fraction < edge_fraction_threshold debug = { "original_edge_fraction": float(edge_fraction), "remaining_edge_fraction": float(remaining_edge_fraction), "num_vert_lines": len(vert_line_cols), "num_horiz_lines": len(horiz_line_rows), } return is_empty, float(remaining_edge_fraction), debug def remove_break_lines(text: str) -> str: # Replace any newline types with a single space cleaned = re.sub(r"[\r\n]+", " ", text) # Collapse multiple spaces into one cleaned = re.sub(r"\s+", " ", cleaned) return cleaned.strip() def safe_crop(img: Image.Image, bbox): left, top, right, bottom = bbox # Clamp to image boundaries left = max(0, min(left, img.width)) top = max(0, min(top, img.height)) right = max(0, min(right, img.width)) bottom = max(0, min(bottom, img.height)) return img.crop((left, top, right, bottom)) def no_long_repeats(s: str, threshold: int) -> bool: """ Returns False if the string `s` contains more than `threshold` identical characters in a row, otherwise True. """ pattern = r"(.)\1{" + str(threshold) + ",}" return re.search(pattern, s) is None class PostOcrEnrichmentElement(BaseModel): model_config = ConfigDict(arbitrary_types_allowed=True) item: Union[DocItem, TableCell, RichTableCell, GraphCell] image: list[ Image.Image ] # Needs to be an a list of images for multi-provenance elements class PostOcrEnrichmentPipelineOptions(ConvertPipelineOptions): api_options: PictureDescriptionApiOptions class PostOcrEnrichmentPipeline(SimplePipeline): def __init__(self, pipeline_options: PostOcrEnrichmentPipelineOptions): super().__init__(pipeline_options) self.pipeline_options: PostOcrEnrichmentPipelineOptions self.enrichment_pipe = [ PostOcrApiEnrichmentModel( enabled=True, enable_remote_services=True, artifacts_path=None, options=self.pipeline_options.api_options, accelerator_options=AcceleratorOptions(), ) ] @classmethod def get_default_options(cls) -> PostOcrEnrichmentPipelineOptions: return PostOcrEnrichmentPipelineOptions() def _enrich_document(self, conv_res: ConversionResult) -> ConversionResult: def _prepare_elements( conv_res: ConversionResult, model: GenericEnrichmentModel[Any] ) -> Iterable[NodeItem]: for doc_element, _level in conv_res.document.iterate_items( traverse_pictures=True, included_content_layers={ ContentLayer.BODY, ContentLayer.FURNITURE, }, ): # With all content layers, with traverse_pictures=True prepared_elements = ( model.prepare_element( # make this one yield multiple items. conv_res=conv_res, element=doc_element ) ) if prepared_elements is not None: yield prepared_elements with TimeRecorder(conv_res, "doc_enrich", scope=ProfilingScope.DOCUMENT): for model in self.enrichment_pipe: for element_batch in chunkify( _prepare_elements(conv_res, model), model.elements_batch_size, ): for element in model( doc=conv_res.document, element_batch=element_batch ): # Must exhaust! pass return conv_res class PostOcrApiEnrichmentModel( GenericEnrichmentModel[PostOcrEnrichmentElement], BaseModelWithOptions ): expansion_factor: float = 0.001 def prepare_element( self, conv_res: ConversionResult, element: NodeItem ) -> Optional[list[PostOcrEnrichmentElement]]: if not self.is_processable(doc=conv_res.document, element=element): return None allowed = (DocItem, TableItem, GraphCell) assert isinstance(element, allowed) if isinstance(element, KeyValueItem | FormItem): # Yield from the graphCells inside here. result = [] for c in element.graph.cells: element_prov = c.prov # Key / Value have only one provenance! bbox = element_prov.bbox page_ix = element_prov.page_no bbox = bbox.scale_to_size( old_size=conv_res.document.pages[page_ix].size, new_size=conv_res.document.pages[page_ix].image.size, ) expanded_bbox = bbox.expand_by_scale( x_scale=self.expansion_factor, y_scale=self.expansion_factor ).to_top_left_origin( page_height=conv_res.document.pages[page_ix].image.size.height ) good_bbox = True if ( expanded_bbox.l > expanded_bbox.r or expanded_bbox.t > expanded_bbox.b ): good_bbox = False if good_bbox: cropped_image = conv_res.document.pages[ page_ix ].image.pil_image.crop(expanded_bbox.as_tuple()) is_empty, rem_frac, debug = is_empty_fast_with_lines_pil( cropped_image ) if is_empty: if SHOW_EMPTY_CROPS: try: cropped_image.show() except Exception as e: print(f"Error with image: {e}") print( f"Detected empty form item image crop: {rem_frac} - {debug}" ) else: result.append( PostOcrEnrichmentElement(item=c, image=[cropped_image]) ) return result elif isinstance(element, TableItem): element_prov = element.prov[0] page_ix = element_prov.page_no result = [] for i, row in enumerate(element.data.grid): for j, cell in enumerate(row): if hasattr(cell, "bbox"): if cell.bbox: bbox = cell.bbox bbox = bbox.scale_to_size( old_size=conv_res.document.pages[page_ix].size, new_size=conv_res.document.pages[page_ix].image.size, ) expanded_bbox = bbox.expand_by_scale( x_scale=self.table_cell_expansion_factor, y_scale=self.table_cell_expansion_factor, ).to_top_left_origin( page_height=conv_res.document.pages[ page_ix ].image.size.height ) good_bbox = True if ( expanded_bbox.l > expanded_bbox.r or expanded_bbox.t > expanded_bbox.b ): good_bbox = False if good_bbox: cropped_image = conv_res.document.pages[ page_ix ].image.pil_image.crop(expanded_bbox.as_tuple()) is_empty, rem_frac, debug = ( is_empty_fast_with_lines_pil(cropped_image) ) if is_empty: if SHOW_EMPTY_CROPS: try: cropped_image.show() except Exception as e: print(f"Error with image: {e}") print( f"Detected empty table cell image crop: {rem_frac} - {debug}" ) else: if SHOW_NONEMPTY_CROPS: cropped_image.show() result.append( PostOcrEnrichmentElement( item=cell, image=[cropped_image] ) ) return result else: multiple_crops = [] # Crop the image form the page for element_prov in element.prov: # Iterate over provenances bbox = element_prov.bbox page_ix = element_prov.page_no bbox = bbox.scale_to_size( old_size=conv_res.document.pages[page_ix].size, new_size=conv_res.document.pages[page_ix].image.size, ) expanded_bbox = bbox.expand_by_scale( x_scale=self.expansion_factor, y_scale=self.expansion_factor ).to_top_left_origin( page_height=conv_res.document.pages[page_ix].image.size.height ) good_bbox = True if ( expanded_bbox.l > expanded_bbox.r or expanded_bbox.t > expanded_bbox.b ): good_bbox = False if hasattr(element, "text"): if good_bbox: cropped_image = conv_res.document.pages[ page_ix ].image.pil_image.crop(expanded_bbox.as_tuple()) is_empty, rem_frac, debug = is_empty_fast_with_lines_pil( cropped_image ) if is_empty: if SHOW_EMPTY_CROPS: try: cropped_image.show() except Exception as e: print(f"Error with image: {e}") print(f"Detected empty text crop: {rem_frac} - {debug}") else: multiple_crops.append(cropped_image) if hasattr(element, "text"): print(f"\nOLD TEXT: {element.text}") else: print("Not a text element") if len(multiple_crops) > 0: # good crops return [PostOcrEnrichmentElement(item=element, image=multiple_crops)] else: # nothing return [] @classmethod def get_options_type(cls) -> type[PictureDescriptionApiOptions]: return PictureDescriptionApiOptions def __init__( self, *, enabled: bool, enable_remote_services: bool, artifacts_path: Optional[Union[Path, str]], options: PictureDescriptionApiOptions, accelerator_options: AcceleratorOptions, ): self.enabled = enabled self.options = options self.concurrency = 2 self.expansion_factor = 0.05 self.table_cell_expansion_factor = 0.0 # do not modify table cell size self.elements_batch_size = 4 self._accelerator_options = accelerator_options self._artifacts_path = ( Path(artifacts_path) if isinstance(artifacts_path, str) else artifacts_path ) if self.enabled and not enable_remote_services: raise OperationNotAllowed( "Enable remote services by setting pipeline_options.enable_remote_services=True." ) def is_processable(self, doc: DoclingDocument, element: NodeItem) -> bool: return self.enabled def _annotate_images(self, images: Iterable[Image.Image]) -> Iterable[str]: def _api_request(image: Image.Image) -> str: res = api_image_request( image=image, prompt=self.options.prompt, url=self.options.url, # timeout=self.options.timeout, timeout=30, headers=self.options.headers, **self.options.params, ) return res[0] with ThreadPoolExecutor(max_workers=self.concurrency) as executor: yield from executor.map(_api_request, images) def __call__( self, doc: DoclingDocument, element_batch: Iterable[ItemAndImageEnrichmentElement], ) -> Iterable[NodeItem]: if not self.enabled: for element in element_batch: yield element.item return elements: list[TextItem] = [] images: list[Image.Image] = [] img_ind_per_element: list[int] = [] for element_stack in element_batch: for element in element_stack: allowed = (DocItem, TableCell, RichTableCell, GraphCell) assert isinstance(element.item, allowed) for ind, img in enumerate(element.image): elements.append(element.item) images.append(img) # images.append(element.image) img_ind_per_element.append(ind) if not images: return outputs = list(self._annotate_images(images)) for item, output, img_ind in zip(elements, outputs, img_ind_per_element): # Sometimes model can return html tags, which are not strictly needed in our, so it's better to clean them def clean_html_tags(text): for tag in [ "<table>", "<tr>", "<td>", "<strong>", "</table>", "</tr>", "</td>", "</strong>", "<th>", "</th>", "<tbody>", "<tbody>", "<thead>", "</thead>", ]: text = text.replace(tag, "") return text output = clean_html_tags(output).strip() output = remove_break_lines(output) # The last measure against hallucinations # Detect hallucinated string... if output.startswith("The first of these"): output = "" if no_long_repeats(output, 50): if VERBOSE: if isinstance(item, (TextItem)): print(f"\nOLD TEXT: {item.text}") # Re-populate text if isinstance(item, TextItem | GraphCell): if img_ind > 0: # Concat texts across several provenances item.text += " " + output # item.orig += " " + output else: item.text = output # item.orig = output elif isinstance(item, TableCell | RichTableCell): item.text = output elif isinstance(item, PictureItem): pass else: raise ValueError(f"Unknown item type: {type(item)}") if VERBOSE: if isinstance(item, (TextItem)): print(f"NEW TEXT: {item.text}") # Take care of charspans for relevant types if isinstance(item, GraphCell): item.prov.charspan = (0, len(item.text)) elif isinstance(item, TextItem): item.prov[0].charspan = (0, len(item.text)) yield item def convert_pdf(pdf_path: Path, out_intermediate_json: Path): # Let's prepare a Docling document json with embedded page images pipeline_options = PdfPipelineOptions() pipeline_options.generate_page_images = True pipeline_options.generate_picture_images = True # pipeline_options.images_scale = 4.0 pipeline_options.images_scale = 2.0 doc_converter = ( DocumentConverter( # all of the below is optional, has internal defaults. allowed_formats=[InputFormat.PDF], format_options={ InputFormat.PDF: PdfFormatOption( pipeline_cls=StandardPdfPipeline, pipeline_options=pipeline_options ) }, ) ) if VERBOSE: print( "Converting PDF to get a Docling document json with embedded page images..." ) conv_result = doc_converter.convert(pdf_path) conv_result.document.save_as_json( filename=out_intermediate_json, image_mode=ImageRefMode.EMBEDDED ) if PRINT_RESULT_MARKDOWN: md1 = conv_result.document.export_to_markdown() print("*** ORIGINAL MARKDOWN ***") print(md1) def post_process_json(in_json: Path, out_final_json: Path): # Post-Process OCR on top of existing Docling document, per element's bounding box: print(f"Post-process all bounding boxes with OCR... {os.path.basename(in_json)}") pipeline_options = PostOcrEnrichmentPipelineOptions( api_options=PictureDescriptionApiOptions( url=LM_STUDIO_URL, prompt=DEFAULT_PROMPT, provenance="lm-studio-ocr", batch_size=4, concurrency=2, scale=2.0, params={"model": LM_STUDIO_MODEL}, ) ) doc_converter = DocumentConverter( format_options={ InputFormat.JSON_DOCLING: FormatOption( pipeline_cls=PostOcrEnrichmentPipeline, pipeline_options=pipeline_options, backend=DoclingJSONBackend, ) } ) result = doc_converter.convert(in_json) if SHOW_IMAGE: result.document.pages[1].image.pil_image.show() result.document.save_as_json(out_final_json) if PRINT_RESULT_MARKDOWN: md = result.document.export_to_markdown() print("*** MARKDOWN ***") print(md) def process_pdf(pdf_path: Path, scratch_dir: Path, out_dir: Path): inter_json = scratch_dir / (pdf_path.stem + ".json") final_json = out_dir / (pdf_path.stem + ".json") inter_json.parent.mkdir(parents=True, exist_ok=True) final_json.parent.mkdir(parents=True, exist_ok=True) if final_json.exists() and final_json.stat().st_size > 0: print(f"Result already found here: '{final_json}', aborting...") return # already done convert_pdf(pdf_path, inter_json) post_process_json(inter_json, final_json) def process_json(json_path: Path, out_dir: Path): final_json = out_dir / (json_path.stem + ".json") final_json.parent.mkdir(parents=True, exist_ok=True) if final_json.exists() and final_json.stat().st_size > 0: return # already done post_process_json(json_path, final_json) def filter_jsons_by_ocr_list(jsons, folder): """ jsons: list[Path] - JSON files folder: Path - folder containing ocr_documents.txt """ ocr_file = folder / "ocr_documents.txt" # If the file doesn't exist, return the list unchanged if not ocr_file.exists(): return jsons # Read file names (strip whitespace, ignore empty lines) with ocr_file.open("r", encoding="utf-8") as f: allowed = {line.strip() for line in f if line.strip()} # Keep only JSONs whose stem is in allowed list filtered = [p for p in jsons if p.stem in allowed] return filtered def run_jsons(in_path: Path, out_dir: Path): if in_path.is_dir(): jsons = sorted(in_path.glob("*.json")) if not jsons: raise SystemExit("Folder mode expects one or more .json files") # Look for ocr_documents.txt, in case found, respect only the jsons filtered_jsons = filter_jsons_by_ocr_list(jsons, in_path) for j in tqdm(filtered_jsons): print("") print("Processing file...") print(j) process_json(j, out_dir) else: raise SystemExit("Invalid --in path") def main(): logging.getLogger().setLevel(logging.ERROR) p = argparse.ArgumentParser(description="PDF/JSON -> final JSON pipeline") p.add_argument( "--in", dest="in_path", default="tests/data/pdf/2305.03393v1-pg9.pdf", required=False, help="Path to a PDF/JSON file or a folder of JSONs", ) p.add_argument( "--out", dest="out_dir", default="scratch/", required=False, help="Folder for final JSONs (scratch goes inside)", ) args = p.parse_args() in_path = Path(args.in_path).expanduser().resolve() out_dir = Path(args.out_dir).expanduser().resolve() print(f"in_path: {in_path}") print(f"out_dir: {out_dir}") scratch_dir = out_dir / "temp" if not in_path.exists(): raise SystemExit(f"Input not found: {in_path}") if in_path.is_file(): if in_path.suffix.lower() == ".pdf": process_pdf(in_path, scratch_dir, out_dir) elif in_path.suffix.lower() == ".json": process_json(in_path, out_dir) else: raise SystemExit("Single-file mode expects a .pdf or .json") else: run_jsons(in_path, out_dir) if __name__ == "__main__": main()
{ "repo_id": "docling-project/docling", "file_path": "docs/examples/post_process_ocr_with_vlm.py", "license": "MIT License", "lines": 634, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
docling-project/docling:docling/experimental/datamodel/table_crops_layout_options.py
"""Internal options for the experimental TableCrops layout model.""" from typing import ClassVar from docling.datamodel.pipeline_options import BaseLayoutOptions __all__ = ["TableCropsLayoutOptions"] class TableCropsLayoutOptions(BaseLayoutOptions): """Options for TableCropsLayoutModel (internal-only).""" kind: ClassVar[str] = "docling_experimental_table_crops_layout"
{ "repo_id": "docling-project/docling", "file_path": "docling/experimental/datamodel/table_crops_layout_options.py", "license": "MIT License", "lines": 7, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
docling-project/docling:docling/experimental/models/table_crops_layout_model.py
"""Internal TableCrops layout model that marks full pages as table clusters.""" from __future__ import annotations import warnings from collections.abc import Sequence from pathlib import Path from typing import Optional import numpy as np from docling_core.types.doc import DocItemLabel from docling.datamodel.accelerator_options import AcceleratorOptions from docling.datamodel.base_models import BoundingBox, Cluster, LayoutPrediction, Page from docling.datamodel.document import ConversionResult from docling.experimental.datamodel.table_crops_layout_options import ( TableCropsLayoutOptions, ) from docling.models.base_layout_model import BaseLayoutModel __all__ = ["TableCropsLayoutModel"] class TableCropsLayoutModel(BaseLayoutModel): """Experimental layout model that treats the full page as a table cluster. This is useful in cases where a Docling pipeline is applied to images of table crops only. This model is internal and not part of the stable public interface. """ def __init__( self, artifacts_path: Optional[Path], accelerator_options: AcceleratorOptions, options: TableCropsLayoutOptions, enable_remote_services: bool = False, ): _ = enable_remote_services self.options = options self.artifacts_path = artifacts_path self.accelerator_options = accelerator_options @classmethod def get_options_type(cls) -> type[TableCropsLayoutOptions]: return TableCropsLayoutOptions def predict_layout( self, conv_res: ConversionResult, pages: Sequence[Page], ) -> Sequence[LayoutPrediction]: layout_predictions: list[LayoutPrediction] = [] for page in pages: if page._backend is None or not page._backend.is_valid(): existing_prediction = page.predictions.layout or LayoutPrediction() layout_predictions.append(existing_prediction) continue clusters = self._build_page_clusters(page) prediction = LayoutPrediction(clusters=clusters) self._update_confidence(conv_res, page, clusters) layout_predictions.append(prediction) return layout_predictions def _build_page_clusters(self, page: Page) -> list[Cluster]: page_size = page.size if page_size is None: return [] bbox = BoundingBox( l=0.0, t=0.0, r=page_size.width, b=page_size.height, ) cluster = Cluster( id=0, label=DocItemLabel.TABLE, bbox=bbox, confidence=1.0, cells=[], ) clusters = [cluster] if not self.options.skip_cell_assignment: page_cells = list(page.cells) cluster.cells = page_cells if not page_cells and not self.options.keep_empty_clusters: clusters = [] return clusters def _update_confidence( self, conv_res: ConversionResult, page: Page, clusters: list[Cluster] ) -> None: """Populate layout and OCR confidence scores for the page.""" with warnings.catch_warnings(): warnings.filterwarnings( "ignore", "Mean of empty slice|invalid value encountered in scalar divide", RuntimeWarning, "numpy", ) conv_res.confidence.pages[page.page_no].layout_score = 1.0 ocr_cells = [cell for cell in page.cells if cell.from_ocr] ocr_confidence = float(np.mean([cell.confidence for cell in ocr_cells])) conv_res.confidence.pages[page.page_no].ocr_score = ocr_confidence
{ "repo_id": "docling-project/docling", "file_path": "docling/experimental/models/table_crops_layout_model.py", "license": "MIT License", "lines": 90, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
docling-project/docling:docs/examples/experimental/process_table_crops.py
"""Run Docling on an image using the experimental TableCrops layout model.""" from __future__ import annotations from pathlib import Path import docling from docling.datamodel.document import InputFormat from docling.datamodel.pipeline_options import ThreadedPdfPipelineOptions from docling.document_converter import DocumentConverter, ImageFormatOption from docling.experimental.datamodel.table_crops_layout_options import ( TableCropsLayoutOptions, ) from docling.experimental.models.table_crops_layout_model import TableCropsLayoutModel from docling.models.factories import get_layout_factory def main() -> None: # Go up 3 levels to escape 'docs' and reach the repo root sample_image = ( Path(__file__).parent / "../../../tests/data/2305.03393v1-table_crop.png" ) pipeline_options = ThreadedPdfPipelineOptions( layout_options=TableCropsLayoutOptions(), do_table_structure=True, generate_page_images=True, ) converter = DocumentConverter( allowed_formats=[InputFormat.IMAGE], format_options={ InputFormat.IMAGE: ImageFormatOption(pipeline_options=pipeline_options) }, ) conv_res = converter.convert(sample_image) print(conv_res.document.tables[0].export_to_markdown()) if __name__ == "__main__": main()
{ "repo_id": "docling-project/docling", "file_path": "docs/examples/experimental/process_table_crops.py", "license": "MIT License", "lines": 32, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
docling-project/docling:docling/models/base_layout_model.py
from __future__ import annotations from abc import ABC, abstractmethod from collections.abc import Iterable, Sequence from typing import Type from docling.datamodel.base_models import LayoutPrediction, Page from docling.datamodel.document import ConversionResult from docling.datamodel.pipeline_options import BaseLayoutOptions from docling.models.base_model import BaseModelWithOptions, BasePageModel class BaseLayoutModel(BasePageModel, BaseModelWithOptions, ABC): """Shared interface for layout models.""" @classmethod @abstractmethod def get_options_type(cls) -> Type[BaseLayoutOptions]: """Return the options type supported by this layout model.""" @abstractmethod def predict_layout( self, conv_res: ConversionResult, pages: Sequence[Page], ) -> Sequence[LayoutPrediction]: """Produce layout predictions for the provided pages.""" def __call__( self, conv_res: ConversionResult, page_batch: Iterable[Page], ) -> Iterable[Page]: pages = list(page_batch) predictions = self.predict_layout(conv_res, pages) for page, prediction in zip(pages, predictions): page.predictions.layout = prediction yield page
{ "repo_id": "docling-project/docling", "file_path": "docling/models/base_layout_model.py", "license": "MIT License", "lines": 31, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
docling-project/docling:docling/models/base_table_model.py
from __future__ import annotations from abc import ABC, abstractmethod from collections.abc import Iterable, Sequence from typing import Type from docling.datamodel.base_models import Page, TableStructurePrediction from docling.datamodel.document import ConversionResult from docling.datamodel.pipeline_options import BaseTableStructureOptions from docling.models.base_model import BaseModelWithOptions, BasePageModel class BaseTableStructureModel(BasePageModel, BaseModelWithOptions, ABC): """Shared interface for table structure models.""" enabled: bool @classmethod @abstractmethod def get_options_type(cls) -> Type[BaseTableStructureOptions]: """Return the options type supported by this table model.""" @abstractmethod def predict_tables( self, conv_res: ConversionResult, pages: Sequence[Page], ) -> Sequence[TableStructurePrediction]: """Produce table structure predictions for the provided pages.""" def __call__( self, conv_res: ConversionResult, page_batch: Iterable[Page], ) -> Iterable[Page]: if not getattr(self, "enabled", True): yield from page_batch return pages = list(page_batch) predictions = self.predict_tables(conv_res, pages) for page, prediction in zip(pages, predictions): page.predictions.tablestructure = prediction yield page
{ "repo_id": "docling-project/docling", "file_path": "docling/models/base_table_model.py", "license": "MIT License", "lines": 35, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
docling-project/docling:docling/models/factories/layout_factory.py
from docling.models.base_layout_model import BaseLayoutModel from docling.models.factories.base_factory import BaseFactory class LayoutFactory(BaseFactory[BaseLayoutModel]): def __init__(self, *args, **kwargs): super().__init__("layout_engines", *args, **kwargs)
{ "repo_id": "docling-project/docling", "file_path": "docling/models/factories/layout_factory.py", "license": "MIT License", "lines": 5, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
docling-project/docling:docling/models/factories/table_factory.py
from docling.models.base_table_model import BaseTableStructureModel from docling.models.factories.base_factory import BaseFactory class TableStructureFactory(BaseFactory[BaseTableStructureModel]): def __init__(self, *args, **kwargs): super().__init__("table_structure_engines", *args, **kwargs)
{ "repo_id": "docling-project/docling", "file_path": "docling/models/factories/table_factory.py", "license": "MIT License", "lines": 5, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
docling-project/docling:tests/test_conversion_result_json.py
from io import BytesIO from pathlib import Path import pytest from docling.backend.pypdfium2_backend import ( PyPdfiumDocumentBackend, PyPdfiumPageBackend, ) from docling.datamodel.base_models import ConversionStatus, InputFormat from docling.datamodel.document import ConversionAssets from docling.datamodel.pipeline_options import PdfPipelineOptions from docling.document_converter import DocumentConverter, PdfFormatOption def test_conversion_result_json_roundtrip_string(): pdf_doc = Path("./tests/data/pdf/redp5110_sampled.pdf") pipeline_options = PdfPipelineOptions() pipeline_options.do_ocr = False pipeline_options.images_scale = 1.0 pipeline_options.generate_page_images = False pipeline_options.do_table_structure = False pipeline_options.table_structure_options.do_cell_matching = True pipeline_options.generate_parsed_pages = True doc_converter = DocumentConverter( format_options={ InputFormat.PDF: PdfFormatOption( pipeline_options=pipeline_options, backend=PyPdfiumDocumentBackend ) } ) conv_res = doc_converter.convert(pdf_doc) fpath: Path = Path("./test-conversion.zip") conv_res.save(filename=fpath) # returns string when no filename is given # assert isinstance(json_str, str) and len(json_str) > 0 loaded = ConversionAssets.load(filename=fpath) assert loaded.status == conv_res.status assert loaded.document.name == conv_res.document.name
{ "repo_id": "docling-project/docling", "file_path": "tests/test_conversion_result_json.py", "license": "MIT License", "lines": 34, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
docling-project/docling:docs/examples/suryaocr_with_custom_models.py
# Example: Integrating SuryaOCR with Docling for PDF OCR and Markdown Export # # Overview: # - Configures SuryaOCR options for OCR. # - Executes PDF pipeline with SuryaOCR integration. # - Models auto-download from Hugging Face on first run. # # Prerequisites: # - Install: `pip install docling-surya` # - Ensure `docling` imports successfully. # # Execution: # - Run from repo root: `python docs/examples/suryaocr_with_custom_models.py` # - Outputs Markdown to stdout. # # Notes: # - Default source: EPA PDF URL; substitute with local path as needed. # - Models cached in `~/.cache/huggingface`; override with HF_HOME env var. # - Use proxy config for restricted networks. # - **Important Licensing Note**: The `docling-surya` package integrates SuryaOCR, which is licensed under the GNU General Public License (GPL). # Using this integration may impose GPL obligations on your project. Review the license terms carefully. # Requires `pip install docling-surya` # See https://pypi.org/project/docling-surya/ from docling_surya import SuryaOcrOptions from docling.datamodel.base_models import InputFormat from docling.datamodel.pipeline_options import PdfPipelineOptions from docling.document_converter import DocumentConverter, PdfFormatOption def main(): source = "https://19january2021snapshot.epa.gov/sites/static/files/2016-02/documents/epa_sample_letter_sent_to_commissioners_dated_february_29_2015.pdf" pipeline_options = PdfPipelineOptions( do_ocr=True, ocr_model="suryaocr", allow_external_plugins=True, ocr_options=SuryaOcrOptions(lang=["en"]), ) converter = DocumentConverter( format_options={ InputFormat.PDF: PdfFormatOption(pipeline_options=pipeline_options), InputFormat.IMAGE: PdfFormatOption(pipeline_options=pipeline_options), } ) result = converter.convert(source) print(result.document.export_to_markdown()) if __name__ == "__main__": main()
{ "repo_id": "docling-project/docling", "file_path": "docs/examples/suryaocr_with_custom_models.py", "license": "MIT License", "lines": 45, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
docling-project/docling:docs/examples/parquet_images.py
# %% [markdown] # What this example does # - Run a batch conversion on a parquet file with an image column. # # Requirements # - Python 3.9+ # - Install Docling: `pip install docling` # # How to run # - `python docs/examples/parquet_images.py FILE` # # The parquet file should be in the format similar to the ViDoRe V3 dataset. # https://huggingface.co/collections/vidore/vidore-benchmark-v3 # # For example: # - https://huggingface.co/datasets/vidore/vidore_v3_hr/blob/main/corpus/test-00000-of-00001.parquet # # ### Start models with vllm # ```console # vllm serve ibm-granite/granite-docling-258M \ # --host 127.0.0.1 --port 8000 \ # --max-num-seqs 512 \ # --max-num-batched-tokens 8192 \ # --enable-chunked-prefill \ # --gpu-memory-utilization 0.9 # ``` # %% import io import sys import time from pathlib import Path from typing import Annotated, Literal import pyarrow.parquet as pq import typer from PIL import Image from docling.datamodel import vlm_model_specs from docling.datamodel.accelerator_options import AcceleratorDevice, AcceleratorOptions from docling.datamodel.base_models import ConversionStatus, DocumentStream, InputFormat from docling.datamodel.pipeline_options import ( PdfPipelineOptions, PipelineOptions, RapidOcrOptions, VlmPipelineOptions, ) from docling.datamodel.pipeline_options_vlm_model import ApiVlmOptions, ResponseFormat from docling.datamodel.settings import settings from docling.document_converter import DocumentConverter, ImageFormatOption from docling.pipeline.base_pipeline import ConvertPipeline from docling.pipeline.legacy_standard_pdf_pipeline import LegacyStandardPdfPipeline from docling.pipeline.standard_pdf_pipeline import StandardPdfPipeline from docling.pipeline.vlm_pipeline import VlmPipeline from docling.utils.accelerator_utils import decide_device def process_document( images: list[Image.Image], chunk_idx: int, doc_converter: DocumentConverter ): """Builds a tall image and sends it through Docling.""" print(f"\n--- Processing chunk {chunk_idx} with {len(images)} images ---") # Convert images to mode RGB (TIFF pages must match) rgb_images = [im.convert("RGB") for im in images] # First image is the base frame first = rgb_images[0] rest = rgb_images[1:] # Create multi-page TIFF using PIL frames buf = io.BytesIO() first.save( buf, format="TIFF", save_all=True, append_images=rest, compression="tiff_deflate", # good compression, optional ) buf.seek(0) # Docling conversion doc_stream = DocumentStream(name=f"doc_{chunk_idx}.tiff", stream=buf) start_time = time.time() conv_result = doc_converter.convert(doc_stream) runtime = time.time() - start_time assert conv_result.status == ConversionStatus.SUCCESS pages = len(conv_result.pages) print( f"Chunk {chunk_idx} converted in {runtime:.2f} sec ({pages / runtime:.2f} pages/s)." ) def run( filename: Annotated[Path, typer.Argument()] = Path( "docs/examples/data/vidore_v3_hr-slice.parquet" ), doc_size: int = 192, batch_size: int = 64, pipeline: Literal["standard", "vlm", "legacy"] = "standard", ): acc_opts = AcceleratorOptions() device = decide_device(acc_opts.device) ocr_options = RapidOcrOptions() if "cuda" in device: ocr_options = RapidOcrOptions(backend="torch") # On Python 3.14 we only have torch if sys.version_info >= (3, 14): ocr_options = RapidOcrOptions(backend="torch") if pipeline == "standard": pipeline_cls: type[ConvertPipeline] = StandardPdfPipeline pipeline_options: PipelineOptions = PdfPipelineOptions( ocr_options=ocr_options, ocr_batch_size=batch_size, layout_batch_size=batch_size, table_batch_size=4, ) elif pipeline == "legacy": settings.perf.page_batch_size = batch_size pipeline_cls: type[ConvertPipeline] = LegacyStandardPdfPipeline pipeline_options: PipelineOptions = PdfPipelineOptions( ocr_options=ocr_options, ocr_batch_size=batch_size, layout_batch_size=batch_size, table_batch_size=4, ) elif pipeline == "vlm": settings.perf.page_batch_size = batch_size pipeline_cls = VlmPipeline vlm_options = vlm_model_specs.GRANITEDOCLING_VLLM_API vlm_options.concurrency = batch_size vlm_options.scale = 1.0 # avoid rescaling image inputs pipeline_options = VlmPipelineOptions( vlm_options=vlm_options, enable_remote_services=True, # required when using a remote inference service. ) else: raise RuntimeError(f"Pipeline {pipeline} not available.") doc_converter = DocumentConverter( format_options={ InputFormat.IMAGE: ImageFormatOption( pipeline_cls=pipeline_cls, pipeline_options=pipeline_options, ) } ) start_time = time.time() doc_converter.initialize_pipeline(InputFormat.IMAGE) init_runtime = time.time() - start_time print(f"Pipeline initialized in {init_runtime:.2f} seconds.") # ------------------------------------------------------------ # Open parquet file in streaming mode # ------------------------------------------------------------ pf = pq.ParquetFile(filename) image_buffer = [] # holds up to doc_size images chunk_idx = 0 # ------------------------------------------------------------ # Stream batches from parquet # ------------------------------------------------------------ for batch in pf.iter_batches(batch_size=batch_size, columns=["image"]): col = batch.column("image") # Extract Python objects (PIL images) # Arrow stores them as Python objects inside an ObjectArray for i in range(len(col)): img_dict = col[i].as_py() # {"bytes": ..., "path": ...} pil_image = Image.open(io.BytesIO(img_dict["bytes"])) image_buffer.append(pil_image) # If enough images gathered → process one doc if len(image_buffer) == doc_size: process_document(image_buffer, chunk_idx, doc_converter) image_buffer.clear() chunk_idx += 1 # ------------------------------------------------------------ # Process trailing images (last partial chunk) # ------------------------------------------------------------ if image_buffer: process_document(image_buffer, chunk_idx, doc_converter) if __name__ == "__main__": typer.run(run)
{ "repo_id": "docling-project/docling", "file_path": "docs/examples/parquet_images.py", "license": "MIT License", "lines": 168, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
docling-project/docling:tests/test_backend_image_native.py
from io import BytesIO from pathlib import Path import pytest from docling_core.types.doc import BoundingBox, CoordOrigin from PIL import Image from docling.backend.image_backend import ImageDocumentBackend, _ImagePageBackend from docling.datamodel.base_models import DocumentStream, InputFormat from docling.datamodel.document import InputDocument, _DocumentConversionInput from docling.document_converter import DocumentConverter, ImageFormatOption from docling.document_extractor import DocumentExtractor def _make_png_stream( width: int = 64, height: int = 48, color=(123, 45, 67) ) -> DocumentStream: img = Image.new("RGB", (width, height), color) buf = BytesIO() img.save(buf, format="PNG") buf.seek(0) return DocumentStream(name="test.png", stream=buf) def _make_multipage_tiff_stream(num_pages: int = 3, size=(32, 32)) -> DocumentStream: frames = [ Image.new("RGB", size, (i * 10 % 255, i * 20 % 255, i * 30 % 255)) for i in range(num_pages) ] buf = BytesIO() frames[0].save(buf, format="TIFF", save_all=True, append_images=frames[1:]) buf.seek(0) return DocumentStream(name="test.tiff", stream=buf) def test_docs_builder_uses_image_backend_for_image_stream(): stream = _make_png_stream() conv_input = _DocumentConversionInput(path_or_stream_iterator=[stream]) # Provide format options mapping that includes IMAGE -> ImageFormatOption (which carries ImageDocumentBackend) format_options = {InputFormat.IMAGE: ImageFormatOption()} docs = list(conv_input.docs(format_options)) assert len(docs) == 1 in_doc = docs[0] assert in_doc.format == InputFormat.IMAGE assert isinstance(in_doc._backend, ImageDocumentBackend) assert in_doc.page_count == 1 def test_docs_builder_multipage_tiff_counts_frames(): stream = _make_multipage_tiff_stream(num_pages=4) conv_input = _DocumentConversionInput(path_or_stream_iterator=[stream]) format_options = {InputFormat.IMAGE: ImageFormatOption()} in_doc = next(conv_input.docs(format_options)) assert isinstance(in_doc._backend, ImageDocumentBackend) assert in_doc.page_count == 4 def test_converter_default_maps_image_to_image_backend(): converter = DocumentConverter(allowed_formats=[InputFormat.IMAGE]) backend_cls = converter.format_to_options[InputFormat.IMAGE].backend assert backend_cls is ImageDocumentBackend def test_extractor_default_maps_image_to_image_backend(): extractor = DocumentExtractor(allowed_formats=[InputFormat.IMAGE]) backend_cls = extractor.extraction_format_to_options[InputFormat.IMAGE].backend assert backend_cls is ImageDocumentBackend def _get_backend_from_stream(stream: DocumentStream): """Helper to create InputDocument with ImageDocumentBackend from a stream.""" in_doc = InputDocument( path_or_stream=stream.stream, format=InputFormat.IMAGE, backend=ImageDocumentBackend, filename=stream.name, ) return in_doc._backend def test_num_pages_single(): """Test page count for single-page image.""" stream = _make_png_stream(width=100, height=80) doc_backend = _get_backend_from_stream(stream) assert doc_backend.page_count() == 1 def test_num_pages_multipage(): """Test page count for multi-page TIFF.""" stream = _make_multipage_tiff_stream(num_pages=5, size=(64, 64)) doc_backend = _get_backend_from_stream(stream) assert doc_backend.page_count() == 5 def test_get_size(): """Test getting page size.""" width, height = 120, 90 stream = _make_png_stream(width=width, height=height) doc_backend = _get_backend_from_stream(stream) page_backend: _ImagePageBackend = doc_backend.load_page(0) size = page_backend.get_size() assert size.width == width assert size.height == height def test_get_page_image_full(): """Test getting full page image.""" width, height = 100, 80 stream = _make_png_stream(width=width, height=height) doc_backend = _get_backend_from_stream(stream) page_backend: _ImagePageBackend = doc_backend.load_page(0) img = page_backend.get_page_image() assert img.width == width assert img.height == height def test_get_page_image_scaled(): """Test getting scaled page image.""" width, height = 100, 80 scale = 2.0 stream = _make_png_stream(width=width, height=height) doc_backend = _get_backend_from_stream(stream) page_backend: _ImagePageBackend = doc_backend.load_page(0) img = page_backend.get_page_image(scale=scale) assert img.width == round(width * scale) assert img.height == round(height * scale) def test_crop_page_image(): """Test cropping page image.""" width, height = 200, 150 stream = _make_png_stream(width=width, height=height) doc_backend = _get_backend_from_stream(stream) page_backend: _ImagePageBackend = doc_backend.load_page(0) # Crop a region from the center cropbox = BoundingBox(l=50, t=30, r=150, b=120, coord_origin=CoordOrigin.TOPLEFT) img = page_backend.get_page_image(cropbox=cropbox) assert img.width == 100 # 150 - 50 assert img.height == 90 # 120 - 30 def test_crop_page_image_scaled(): """Test cropping and scaling page image.""" width, height = 200, 150 scale = 0.5 stream = _make_png_stream(width=width, height=height) doc_backend = _get_backend_from_stream(stream) page_backend: _ImagePageBackend = doc_backend.load_page(0) cropbox = BoundingBox(l=50, t=30, r=150, b=120, coord_origin=CoordOrigin.TOPLEFT) img = page_backend.get_page_image(scale=scale, cropbox=cropbox) assert img.width == round(100 * scale) # cropped width * scale assert img.height == round(90 * scale) # cropped height * scale def test_get_bitmap_rects(): """Test getting bitmap rects - should return full page rectangle.""" width, height = 100, 80 stream = _make_png_stream(width=width, height=height) doc_backend = _get_backend_from_stream(stream) page_backend: _ImagePageBackend = doc_backend.load_page(0) rects = list(page_backend.get_bitmap_rects()) assert len(rects) == 1 bbox = rects[0] assert bbox.l == 0.0 assert bbox.t == 0.0 assert bbox.r == float(width) assert bbox.b == float(height) assert bbox.coord_origin == CoordOrigin.TOPLEFT def test_get_bitmap_rects_scaled(): """Test getting bitmap rects with scaling.""" width, height = 100, 80 scale = 2.0 stream = _make_png_stream(width=width, height=height) doc_backend = _get_backend_from_stream(stream) page_backend: _ImagePageBackend = doc_backend.load_page(0) rects = list(page_backend.get_bitmap_rects(scale=scale)) assert len(rects) == 1 bbox = rects[0] assert bbox.l == 0.0 assert bbox.t == 0.0 assert bbox.r == float(width * scale) assert bbox.b == float(height * scale) assert bbox.coord_origin == CoordOrigin.TOPLEFT def test_get_text_in_rect(): """Test that get_text_in_rect returns empty string for images (no OCR).""" stream = _make_png_stream() doc_backend = _get_backend_from_stream(stream) page_backend: _ImagePageBackend = doc_backend.load_page(0) bbox = BoundingBox(l=10, t=10, r=50, b=50, coord_origin=CoordOrigin.TOPLEFT) text = page_backend.get_text_in_rect(bbox) assert text == "" def test_multipage_access(): """Test accessing different pages in multi-page image.""" num_pages = 4 stream = _make_multipage_tiff_stream(num_pages=num_pages, size=(64, 64)) doc_backend = _get_backend_from_stream(stream) assert doc_backend.page_count() == num_pages # Access each page for i in range(num_pages): page_backend = doc_backend.load_page(i) assert page_backend.is_valid() size = page_backend.get_size() assert size.width == 64 assert size.height == 64
{ "repo_id": "docling-project/docling", "file_path": "tests/test_backend_image_native.py", "license": "MIT License", "lines": 172, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
docling-project/docling:docling/experimental/datamodel/threaded_layout_vlm_pipeline_options.py
"""Options for the threaded layout+VLM pipeline.""" from typing import Union from pydantic import model_validator from docling.datamodel.layout_model_specs import DOCLING_LAYOUT_HERON from docling.datamodel.pipeline_options import LayoutOptions, PaginatedPipelineOptions from docling.datamodel.pipeline_options_vlm_model import ( ApiVlmOptions, InlineVlmOptions, ResponseFormat, ) from docling.datamodel.vlm_model_specs import GRANITEDOCLING_TRANSFORMERS class ThreadedLayoutVlmPipelineOptions(PaginatedPipelineOptions): """Pipeline options for the threaded layout+VLM pipeline.""" images_scale: float = 2.0 # VLM configuration (will be enhanced with layout awareness by the pipeline) vlm_options: Union[InlineVlmOptions, ApiVlmOptions] = GRANITEDOCLING_TRANSFORMERS # Layout model configuration layout_options: LayoutOptions = LayoutOptions( model_spec=DOCLING_LAYOUT_HERON, skip_cell_assignment=True ) # Threading and batching controls layout_batch_size: int = 4 vlm_batch_size: int = 4 batch_timeout_seconds: float = 2.0 queue_max_size: int = 50 @model_validator(mode="after") def validate_response_format(self): """Validate that VLM response format is DOCTAGS (required for this pipeline).""" if self.vlm_options.response_format != ResponseFormat.DOCTAGS: raise ValueError( f"ThreadedLayoutVlmPipeline only supports DOCTAGS response format, " f"but got {self.vlm_options.response_format}. " f"Please set vlm_options.response_format=ResponseFormat.DOCTAGS" ) return self
{ "repo_id": "docling-project/docling", "file_path": "docling/experimental/datamodel/threaded_layout_vlm_pipeline_options.py", "license": "MIT License", "lines": 35, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
docling-project/docling:docling/experimental/pipeline/threaded_layout_vlm_pipeline.py
"""Threaded Layout+VLM Pipeline ================================ A specialized two-stage threaded pipeline that combines layout model preprocessing with VLM processing. The layout model detects document elements and coordinates, which are then injected into the VLM prompt for enhanced structured output. """ from __future__ import annotations import itertools import logging from pathlib import Path from typing import TYPE_CHECKING, List, Optional, Union, cast from docling_core.types.doc import DoclingDocument from docling_core.types.doc.document import DocTagsDocument from PIL import Image as PILImage if TYPE_CHECKING: from docling_core.types.doc.page import SegmentedPage from docling.backend.abstract_backend import AbstractDocumentBackend from docling.backend.pdf_backend import PdfDocumentBackend from docling.datamodel.base_models import ConversionStatus, Page from docling.datamodel.document import ConversionResult from docling.datamodel.pipeline_options_vlm_model import ( ApiVlmOptions, InferenceFramework, InlineVlmOptions, ) from docling.datamodel.settings import settings from docling.experimental.datamodel.threaded_layout_vlm_pipeline_options import ( ThreadedLayoutVlmPipelineOptions, ) from docling.models.base_model import BaseVlmPageModel from docling.models.stages.layout.layout_model import LayoutModel from docling.models.vlm_pipeline_models.api_vlm_model import ApiVlmModel from docling.models.vlm_pipeline_models.hf_transformers_model import ( HuggingFaceTransformersVlmModel, ) from docling.models.vlm_pipeline_models.mlx_model import HuggingFaceMlxModel from docling.pipeline.base_pipeline import BasePipeline from docling.pipeline.standard_pdf_pipeline import ( ProcessingResult, RunContext, ThreadedItem, ThreadedPipelineStage, ThreadedQueue, ) from docling.utils.profiling import ProfilingScope, TimeRecorder _log = logging.getLogger(__name__) class ThreadedLayoutVlmPipeline(BasePipeline): """Two-stage threaded pipeline: Layout Model → VLM Model.""" def __init__(self, pipeline_options: ThreadedLayoutVlmPipelineOptions) -> None: super().__init__(pipeline_options) self.pipeline_options: ThreadedLayoutVlmPipelineOptions = pipeline_options self._run_seq = itertools.count(1) # deterministic, monotonic run ids # VLM model type (initialized in _init_models) self.vlm_model: BaseVlmPageModel # Initialize models self._init_models() def _init_models(self) -> None: """Initialize layout and VLM models.""" art_path = self._resolve_artifacts_path() # Layout model self.layout_model = LayoutModel( artifacts_path=art_path, accelerator_options=self.pipeline_options.accelerator_options, options=self.pipeline_options.layout_options, ) # VLM model based on options type # Create layout-aware VLM options internally base_vlm_options = self.pipeline_options.vlm_options class LayoutAwareVlmOptions(type(base_vlm_options)): # type: ignore[misc] def build_prompt( self, page: Optional[SegmentedPage], *, _internal_page: Optional[Page] = None, ) -> str: base_prompt = self.prompt augmented_prompt = base_prompt # In this layout-aware pipeline, _internal_page is always provided if _internal_page is None: return base_prompt if not _internal_page.size: _log.warning( f"Page size not available for page {_internal_page.page_no}. Cannot enhance prompt with layout info." ) return base_prompt if _internal_page.predictions.layout: from docling_core.types.doc.tokens import DocumentToken layout_elements = [] for cluster in _internal_page.predictions.layout.clusters: # Get proper tag name from DocItemLabel tag_name = DocumentToken.create_token_name_from_doc_item_label( label=cluster.label ) # Convert bbox to tuple and get location tokens bbox_tuple = cluster.bbox.as_tuple() location_tokens = DocumentToken.get_location( bbox=bbox_tuple, page_w=_internal_page.size.width, page_h=_internal_page.size.height, ) # Create XML element with DocTags format xml_element = f"<{tag_name}>{location_tokens}</{tag_name}>" layout_elements.append(xml_element) if layout_elements: # Join elements with newlines and wrap in layout tags layout_xml = ( "<layout>" + "\n".join(layout_elements) + "</layout>" ) layout_injection = f"{layout_xml}" augmented_prompt = base_prompt + layout_injection _log.debug( "Enhanced Prompt with Layout Info: %s\n", augmented_prompt ) return augmented_prompt vlm_options = LayoutAwareVlmOptions(**base_vlm_options.model_dump()) if isinstance(base_vlm_options, ApiVlmOptions): self.vlm_model = ApiVlmModel( enabled=True, enable_remote_services=self.pipeline_options.enable_remote_services, vlm_options=vlm_options, ) elif isinstance(base_vlm_options, InlineVlmOptions): if vlm_options.inference_framework == InferenceFramework.TRANSFORMERS: self.vlm_model = HuggingFaceTransformersVlmModel( enabled=True, artifacts_path=art_path, accelerator_options=self.pipeline_options.accelerator_options, vlm_options=vlm_options, ) elif vlm_options.inference_framework == InferenceFramework.MLX: self.vlm_model = HuggingFaceMlxModel( enabled=True, artifacts_path=art_path, accelerator_options=self.pipeline_options.accelerator_options, vlm_options=vlm_options, ) elif vlm_options.inference_framework == InferenceFramework.VLLM: from docling.models.vlm_pipeline_models.vllm_model import VllmVlmModel self.vlm_model = VllmVlmModel( enabled=True, artifacts_path=art_path, accelerator_options=self.pipeline_options.accelerator_options, vlm_options=vlm_options, ) else: raise ValueError( f"Unsupported VLM inference framework: {vlm_options.inference_framework}" ) else: raise ValueError(f"Unsupported VLM options type: {type(base_vlm_options)}") def _resolve_artifacts_path(self) -> Optional[Path]: """Resolve artifacts path from options or settings.""" if self.pipeline_options.artifacts_path: p = Path(self.pipeline_options.artifacts_path).expanduser() elif settings.artifacts_path: p = Path(settings.artifacts_path).expanduser() else: return None if not p.is_dir(): raise RuntimeError( f"{p} does not exist or is not a directory containing the required models" ) return p def _create_run_ctx(self) -> RunContext: """Create pipeline stages and wire them together.""" opts = self.pipeline_options # Layout stage layout_stage = ThreadedPipelineStage( name="layout", model=self.layout_model, batch_size=opts.layout_batch_size, batch_timeout=opts.batch_timeout_seconds, queue_max_size=opts.queue_max_size, ) # VLM stage - now layout-aware through enhanced build_prompt vlm_stage = ThreadedPipelineStage( name="vlm", model=self.vlm_model, batch_size=opts.vlm_batch_size, batch_timeout=opts.batch_timeout_seconds, queue_max_size=opts.queue_max_size, ) # Wire stages output_q = ThreadedQueue(opts.queue_max_size) layout_stage.add_output_queue(vlm_stage.input_queue) vlm_stage.add_output_queue(output_q) stages = [layout_stage, vlm_stage] return RunContext( stages=stages, first_stage=layout_stage, output_queue=output_q ) def _build_document(self, conv_res: ConversionResult) -> ConversionResult: """Build document using threaded layout+VLM pipeline.""" run_id = next(self._run_seq) assert isinstance(conv_res.input._backend, PdfDocumentBackend) backend = conv_res.input._backend # Initialize pages start_page, end_page = conv_res.input.limits.page_range pages: List[Page] = [] images_scale = self.pipeline_options.images_scale for i in range(conv_res.input.page_count): if start_page - 1 <= i <= end_page - 1: page = Page(page_no=i + 1) if images_scale is not None: page._default_image_scale = images_scale page._backend = backend.load_page(i) if page._backend and page._backend.is_valid(): page.size = page._backend.get_size() conv_res.pages.append(page) pages.append(page) if not pages: conv_res.status = ConversionStatus.FAILURE return conv_res total_pages = len(pages) ctx = self._create_run_ctx() for st in ctx.stages: st.start() proc = ProcessingResult(total_expected=total_pages) fed_idx = 0 batch_size = 32 try: while proc.success_count + proc.failure_count < total_pages: # Feed pages to first stage while fed_idx < total_pages: ok = ctx.first_stage.input_queue.put( ThreadedItem( payload=pages[fed_idx], run_id=run_id, page_no=pages[fed_idx].page_no, conv_res=conv_res, ), timeout=0.0, ) if ok: fed_idx += 1 if fed_idx == total_pages: ctx.first_stage.input_queue.close() else: break # Drain results from output out_batch = ctx.output_queue.get_batch(batch_size, timeout=0.05) for itm in out_batch: if itm.run_id != run_id: continue if itm.is_failed or itm.error: proc.failed_pages.append( (itm.page_no, itm.error or RuntimeError("unknown error")) ) else: assert itm.payload is not None proc.pages.append(itm.payload) # Handle early termination if not out_batch and ctx.output_queue.closed: missing = total_pages - (proc.success_count + proc.failure_count) if missing > 0: proc.failed_pages.extend( [(-1, RuntimeError("pipeline terminated early"))] * missing ) break finally: for st in ctx.stages: st.stop() ctx.output_queue.close() self._integrate_results(conv_res, proc) return conv_res def _integrate_results( self, conv_res: ConversionResult, proc: ProcessingResult ) -> None: """Integrate processing results into conversion result.""" page_map = {p.page_no: p for p in proc.pages} # Track failed pages for cleanup failed_page_nos = {fp for fp, _ in proc.failed_pages} # Collect pages that will be removed (failed pages) for resource cleanup pages_to_remove = [p for p in conv_res.pages if p.page_no in failed_page_nos] conv_res.pages = [ page_map.get(p.page_no, p) for p in conv_res.pages if p.page_no in page_map or not any(fp == p.page_no for fp, _ in proc.failed_pages) ] if proc.is_complete_failure: conv_res.status = ConversionStatus.FAILURE elif proc.is_partial_success: conv_res.status = ConversionStatus.PARTIAL_SUCCESS else: conv_res.status = ConversionStatus.SUCCESS # Clean up resources for failed pages that were removed for p in pages_to_remove: if p._backend is not None: p._backend.unload() p._image_cache = {} # Clean up parsed_page if it exists (it's Optional[SegmentedPdfPage]) if p.parsed_page is not None: del p.parsed_page p.parsed_page = None # Clean up images if not needed for remaining pages if not self.pipeline_options.generate_page_images: for p in conv_res.pages: p._image_cache = {} def _assemble_document(self, conv_res: ConversionResult) -> ConversionResult: """Assemble final document from VLM predictions.""" from docling_core.types.doc import DocItem, ImageRef, PictureItem from docling.datamodel.pipeline_options_vlm_model import ResponseFormat with TimeRecorder(conv_res, "doc_assemble", scope=ProfilingScope.DOCUMENT): # Response format validation is done in ThreadedLayoutVlmPipelineOptions # This check is kept as a safety net, but should never trigger if validation works if ( self.pipeline_options.vlm_options.response_format != ResponseFormat.DOCTAGS ): raise RuntimeError( f"Unsupported VLM response format {self.pipeline_options.vlm_options.response_format}. Only DOCTAGS format is supported." ) conv_res.document = self._turn_dt_into_doc(conv_res) # Generate images of the requested element types if self.pipeline_options.generate_picture_images: # Create mapping from page_no to Page object since pages may be non-continuous page_map = {p.page_no: p for p in conv_res.pages} scale = self.pipeline_options.images_scale for element, _level in conv_res.document.iterate_items(): if not isinstance(element, DocItem) or len(element.prov) == 0: continue if ( isinstance(element, PictureItem) and self.pipeline_options.generate_picture_images ): page_no = element.prov[0].page_no page = page_map.get(page_no) if page is None: _log.warning( f"Page {page_no} not found in conversion result for picture element. Skipping image generation." ) continue assert page.size is not None assert page.image is not None crop_bbox = ( element.prov[0] .bbox.scaled(scale=scale) .to_top_left_origin(page_height=page.size.height * scale) ) cropped_im = page.image.crop(crop_bbox.as_tuple()) element.image = ImageRef.from_pil( cropped_im, dpi=int(72 * scale) ) return conv_res def _turn_dt_into_doc(self, conv_res: ConversionResult) -> DoclingDocument: """Convert DOCTAGS response format to DoclingDocument.""" doctags_list = [] image_list = [] for page in conv_res.pages: # Only include pages that have both an image and VLM predictions if page.image and page.predictions.vlm_response: predicted_doctags = page.predictions.vlm_response.text image_list.append(page.image) doctags_list.append(predicted_doctags) doctags_list_c = cast(List[Union[Path, str]], doctags_list) image_list_c = cast(List[Union[Path, PILImage.Image]], image_list) doctags_doc = DocTagsDocument.from_doctags_and_image_pairs( doctags_list_c, image_list_c ) document = DoclingDocument.load_from_doctags(doctag_document=doctags_doc) return document @classmethod def get_default_options(cls) -> ThreadedLayoutVlmPipelineOptions: return ThreadedLayoutVlmPipelineOptions() @classmethod def is_backend_supported(cls, backend: AbstractDocumentBackend) -> bool: return isinstance(backend, PdfDocumentBackend) def _determine_status(self, conv_res: ConversionResult) -> ConversionStatus: return conv_res.status def _unload(self, conv_res: ConversionResult) -> None: for p in conv_res.pages: if p._backend is not None: p._backend.unload() if conv_res.input._backend: conv_res.input._backend.unload()
{ "repo_id": "docling-project/docling", "file_path": "docling/experimental/pipeline/threaded_layout_vlm_pipeline.py", "license": "MIT License", "lines": 375, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
docling-project/docling:docs/examples/demo_layout_vlm.py
#!/usr/bin/env python3 """Demo script for the new ThreadedLayoutVlmPipeline. This script demonstrates the usage of the experimental ThreadedLayoutVlmPipeline pipeline that combines layout model preprocessing with VLM processing in a threaded manner. """ import argparse import logging import traceback from pathlib import Path from docling.datamodel.base_models import ConversionStatus, InputFormat from docling.datamodel.pipeline_options_vlm_model import ApiVlmOptions, ResponseFormat from docling.datamodel.vlm_model_specs import GRANITEDOCLING_TRANSFORMERS from docling.document_converter import DocumentConverter, PdfFormatOption from docling.experimental.datamodel.threaded_layout_vlm_pipeline_options import ( ThreadedLayoutVlmPipelineOptions, ) from docling.experimental.pipeline.threaded_layout_vlm_pipeline import ( ThreadedLayoutVlmPipeline, ) _log = logging.getLogger(__name__) def _parse_args(): parser = argparse.ArgumentParser( description="Demo script for the experimental ThreadedLayoutVlmPipeline" ) parser.add_argument( "--input-file", type=str, default="tests/data/pdf/code_and_formula.pdf", help="Path to a PDF file", ) parser.add_argument( "--output-dir", type=str, default="scratch/demo_layout_vlm/", help="Output directory for converted files", ) return parser.parse_args() # Can be used to read multiple pdf files under a folder # def _get_docs(input_doc_path): # """Yield DocumentStream objects from list of input document paths""" # for path in input_doc_path: # buf = BytesIO(path.read_bytes()) # stream = DocumentStream(name=path.name, stream=buf) # yield stream def openai_compatible_vlm_options( model: str, prompt: str, format: ResponseFormat, hostname_and_port, temperature: float = 0.7, max_tokens: int = 4096, api_key: str = "", skip_special_tokens=False, ): headers = {} if api_key: headers["Authorization"] = f"Bearer {api_key}" options = ApiVlmOptions( url=f"http://{hostname_and_port}/v1/chat/completions", # LM studio defaults to port 1234, VLLM to 8000 params=dict( model=model, max_tokens=max_tokens, skip_special_tokens=skip_special_tokens, # needed for VLLM ), headers=headers, prompt=prompt, timeout=90, scale=2.0, temperature=temperature, response_format=format, ) return options def demo_threaded_layout_vlm_pipeline( input_doc_path: Path, out_dir_layout_aware: Path, use_api_vlm: bool ): """Demonstrate the threaded layout+VLM pipeline.""" vlm_options = GRANITEDOCLING_TRANSFORMERS.model_copy() if use_api_vlm: vlm_options = openai_compatible_vlm_options( model="granite-docling-258m-mlx", # For VLLM use "ibm-granite/granite-docling-258M" hostname_and_port="localhost:1234", # LM studio defaults to port 1234, VLLM to 8000 prompt="Convert this page to docling.", format=ResponseFormat.DOCTAGS, api_key="", ) vlm_options.track_input_prompt = True # Configure pipeline options print("Configuring pipeline options...") pipeline_options_layout_aware = ThreadedLayoutVlmPipelineOptions( # VLM configuration - defaults to GRANITEDOCLING_TRANSFORMERS vlm_options=vlm_options, # Layout configuration - defaults to DOCLING_LAYOUT_HERON # Batch sizes for parallel processing layout_batch_size=2, vlm_batch_size=1, # Queue configuration queue_max_size=10, # Image processing images_scale=vlm_options.scale, generate_page_images=True, enable_remote_services=use_api_vlm, ) # Create converter with the new pipeline print("Initializing DocumentConverter (this may take a while - loading models)...") doc_converter_layout_enhanced = DocumentConverter( format_options={ InputFormat.PDF: PdfFormatOption( pipeline_cls=ThreadedLayoutVlmPipeline, pipeline_options=pipeline_options_layout_aware, ) } ) result_layout_aware = doc_converter_layout_enhanced.convert( source=input_doc_path, raises_on_error=False ) if result_layout_aware.status == ConversionStatus.FAILURE: _log.error(f"Conversion failed: {result_layout_aware.status}") doc_filename = result_layout_aware.input.file.stem result_layout_aware.document.save_as_json( out_dir_layout_aware / f"{doc_filename}.json" ) result_layout_aware.document.save_as_html( out_dir_layout_aware / f"{doc_filename}.html", split_page_view=True ) for page in result_layout_aware.pages: _log.info("Page %s of VLM response:", page.page_no) if page.predictions.vlm_response: _log.info(page.predictions.vlm_response) if __name__ == "__main__": logging.basicConfig(level=logging.INFO) try: args = _parse_args() _log.info( f"Parsed arguments: input={args.input_file}, output={args.output_dir}" ) input_path = Path(args.input_file) if not input_path.exists(): raise FileNotFoundError(f"Input file does not exist: {input_path}") if input_path.suffix.lower() != ".pdf": raise ValueError(f"Input file must be a PDF: {input_path}") out_dir_layout_aware = Path(args.output_dir) / "layout_aware/" out_dir_layout_aware.mkdir(parents=True, exist_ok=True) use_api_vlm = False # Set to False to use inline VLM model demo_threaded_layout_vlm_pipeline(input_path, out_dir_layout_aware, use_api_vlm) except Exception: traceback.print_exc() raise
{ "repo_id": "docling-project/docling", "file_path": "docs/examples/demo_layout_vlm.py", "license": "MIT License", "lines": 147, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
docling-project/docling:docling/pipeline/legacy_standard_pdf_pipeline.py
import logging import warnings from pathlib import Path from typing import Optional, cast import numpy as np from docling_core.types.doc import DocItem, ImageRef, PictureItem, TableItem from docling.backend.abstract_backend import AbstractDocumentBackend from docling.backend.pdf_backend import PdfDocumentBackend from docling.datamodel.base_models import AssembledUnit, Page from docling.datamodel.document import ConversionResult from docling.datamodel.layout_model_specs import LayoutModelConfig from docling.datamodel.pipeline_options import PdfPipelineOptions from docling.datamodel.settings import settings from docling.models.base_ocr_model import BaseOcrModel from docling.models.factories import ( get_layout_factory, get_ocr_factory, get_table_structure_factory, ) from docling.models.stages.code_formula.code_formula_model import ( CodeFormulaModel, CodeFormulaModelOptions, ) from docling.models.stages.page_assemble.page_assemble_model import ( PageAssembleModel, PageAssembleOptions, ) from docling.models.stages.page_preprocessing.page_preprocessing_model import ( PagePreprocessingModel, PagePreprocessingOptions, ) from docling.models.stages.reading_order.readingorder_model import ( ReadingOrderModel, ReadingOrderOptions, ) from docling.pipeline.base_pipeline import PaginatedPipeline from docling.utils.model_downloader import download_models from docling.utils.profiling import ProfilingScope, TimeRecorder _log = logging.getLogger(__name__) class LegacyStandardPdfPipeline(PaginatedPipeline): def __init__(self, pipeline_options: PdfPipelineOptions): super().__init__(pipeline_options) self.pipeline_options: PdfPipelineOptions with warnings.catch_warnings(): # deprecated generate_table_images warnings.filterwarnings("ignore", category=DeprecationWarning) self.keep_images = ( self.pipeline_options.generate_page_images or self.pipeline_options.generate_picture_images or self.pipeline_options.generate_table_images ) self.reading_order_model = ReadingOrderModel(options=ReadingOrderOptions()) ocr_model = self.get_ocr_model(artifacts_path=self.artifacts_path) layout_factory = get_layout_factory( allow_external_plugins=self.pipeline_options.allow_external_plugins ) layout_model = layout_factory.create_instance( options=pipeline_options.layout_options, artifacts_path=self.artifacts_path, accelerator_options=pipeline_options.accelerator_options, enable_remote_services=pipeline_options.enable_remote_services, ) table_factory = get_table_structure_factory( allow_external_plugins=self.pipeline_options.allow_external_plugins ) table_model = table_factory.create_instance( options=pipeline_options.table_structure_options, enabled=pipeline_options.do_table_structure, artifacts_path=self.artifacts_path, accelerator_options=pipeline_options.accelerator_options, enable_remote_services=pipeline_options.enable_remote_services, ) self.build_pipe = [ # Pre-processing PagePreprocessingModel( options=PagePreprocessingOptions( images_scale=pipeline_options.images_scale, ) ), # OCR ocr_model, # Layout model layout_model, # Table structure model table_model, # Page assemble PageAssembleModel(options=PageAssembleOptions()), ] self.enrichment_pipe = [ # Code Formula Enrichment Model CodeFormulaModel( enabled=pipeline_options.do_code_enrichment or pipeline_options.do_formula_enrichment, artifacts_path=self.artifacts_path, options=CodeFormulaModelOptions( do_code_enrichment=pipeline_options.do_code_enrichment, do_formula_enrichment=pipeline_options.do_formula_enrichment, ), accelerator_options=pipeline_options.accelerator_options, ), *self.enrichment_pipe, ] if ( self.pipeline_options.do_formula_enrichment or self.pipeline_options.do_code_enrichment or self.pipeline_options.do_picture_classification or self.pipeline_options.do_picture_description ): self.keep_backend = True @staticmethod def download_models_hf( local_dir: Optional[Path] = None, force: bool = False ) -> Path: warnings.warn( "The usage of LegacyStandardPdfPipeline.download_models_hf() is deprecated " "use instead the utility `docling-tools models download`, or " "the upstream method docling.utils.models_downloader.download_all()", DeprecationWarning, stacklevel=3, ) output_dir = download_models(output_dir=local_dir, force=force, progress=False) return output_dir def get_ocr_model(self, artifacts_path: Optional[Path] = None) -> BaseOcrModel: factory = get_ocr_factory( allow_external_plugins=self.pipeline_options.allow_external_plugins ) return factory.create_instance( options=self.pipeline_options.ocr_options, enabled=self.pipeline_options.do_ocr, artifacts_path=artifacts_path, accelerator_options=self.pipeline_options.accelerator_options, ) def initialize_page(self, conv_res: ConversionResult, page: Page) -> Page: with TimeRecorder(conv_res, "page_init"): page._backend = conv_res.input._backend.load_page(page.page_no - 1) # type: ignore if page._backend is not None and page._backend.is_valid(): page.size = page._backend.get_size() return page def _assemble_document(self, conv_res: ConversionResult) -> ConversionResult: all_elements = [] all_headers = [] all_body = [] with TimeRecorder(conv_res, "doc_assemble", scope=ProfilingScope.DOCUMENT): for p in conv_res.pages: if p.assembled is not None: for el in p.assembled.body: all_body.append(el) for el in p.assembled.headers: all_headers.append(el) for el in p.assembled.elements: all_elements.append(el) conv_res.assembled = AssembledUnit( elements=all_elements, headers=all_headers, body=all_body ) conv_res.document = self.reading_order_model(conv_res) # Generate page images in the output if self.pipeline_options.generate_page_images: for page in conv_res.pages: assert page.image is not None page_no = page.page_no conv_res.document.pages[page_no].image = ImageRef.from_pil( page.image, dpi=int(72 * self.pipeline_options.images_scale) ) # Generate images of the requested element types with warnings.catch_warnings(): # deprecated generate_table_images warnings.filterwarnings("ignore", category=DeprecationWarning) if ( self.pipeline_options.generate_picture_images or self.pipeline_options.generate_table_images ): scale = self.pipeline_options.images_scale for element, _level in conv_res.document.iterate_items(): if not isinstance(element, DocItem) or len(element.prov) == 0: continue if ( isinstance(element, PictureItem) and self.pipeline_options.generate_picture_images ) or ( isinstance(element, TableItem) and self.pipeline_options.generate_table_images ): page_ix = element.prov[0].page_no - 1 page = next( (p for p in conv_res.pages if p.page_no == page_ix), cast("Page", None), ) assert page is not None assert page.size is not None assert page.image is not None crop_bbox = ( element.prov[0] .bbox.scaled(scale=scale) .to_top_left_origin( page_height=page.size.height * scale ) ) cropped_im = page.image.crop(crop_bbox.as_tuple()) element.image = ImageRef.from_pil( cropped_im, dpi=int(72 * scale) ) # Aggregate confidence values for document: if len(conv_res.pages) > 0: with warnings.catch_warnings(): warnings.filterwarnings( "ignore", category=RuntimeWarning, message="Mean of empty slice|All-NaN slice encountered", ) conv_res.confidence.layout_score = float( np.nanmean( [c.layout_score for c in conv_res.confidence.pages.values()] ) ) conv_res.confidence.parse_score = float( np.nanquantile( [c.parse_score for c in conv_res.confidence.pages.values()], q=0.1, # parse score should relate to worst 10% of pages. ) ) conv_res.confidence.table_score = float( np.nanmean( [c.table_score for c in conv_res.confidence.pages.values()] ) ) conv_res.confidence.ocr_score = float( np.nanmean( [c.ocr_score for c in conv_res.confidence.pages.values()] ) ) return conv_res @classmethod def get_default_options(cls) -> PdfPipelineOptions: return PdfPipelineOptions() @classmethod def is_backend_supported(cls, backend: AbstractDocumentBackend): return isinstance(backend, PdfDocumentBackend)
{ "repo_id": "docling-project/docling", "file_path": "docling/pipeline/legacy_standard_pdf_pipeline.py", "license": "MIT License", "lines": 235, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
docling-project/docling:docs/examples/gpu_standard_pipeline.py
# %% [markdown] # # What this example does # - Run a conversion using the best setup for GPU for the standard pipeline # # Requirements # - Python 3.9+ # - Install Docling: `pip install docling` # # How to run # - `python docs/examples/gpu_standard_pipeline.py` # # This example is part of a set of GPU optimization strategies. Read more about it in [GPU support](../../usage/gpu/) # # ## Example code # %% import datetime import logging import time from pathlib import Path import numpy as np from pydantic import TypeAdapter from docling.datamodel.accelerator_options import AcceleratorDevice, AcceleratorOptions from docling.datamodel.base_models import ConversionStatus, InputFormat from docling.datamodel.pipeline_options import ( ThreadedPdfPipelineOptions, ) from docling.document_converter import DocumentConverter, PdfFormatOption from docling.pipeline.threaded_standard_pdf_pipeline import ThreadedStandardPdfPipeline from docling.utils.profiling import ProfilingItem _log = logging.getLogger(__name__) def main(): logging.getLogger("docling").setLevel(logging.WARNING) _log.setLevel(logging.INFO) data_folder = Path(__file__).parent / "../../tests/data" # input_doc_path = data_folder / "pdf" / "2305.03393v1.pdf" # 14 pages input_doc_path = data_folder / "pdf" / "redp5110_sampled.pdf" # 18 pages pipeline_options = ThreadedPdfPipelineOptions( accelerator_options=AcceleratorOptions( device=AcceleratorDevice.CUDA, ), ocr_batch_size=4, layout_batch_size=64, table_batch_size=4, ) pipeline_options.do_ocr = False doc_converter = DocumentConverter( format_options={ InputFormat.PDF: PdfFormatOption( pipeline_cls=ThreadedStandardPdfPipeline, pipeline_options=pipeline_options, ) } ) start_time = time.time() doc_converter.initialize_pipeline(InputFormat.PDF) init_runtime = time.time() - start_time _log.info(f"Pipeline initialized in {init_runtime:.2f} seconds.") start_time = time.time() conv_result = doc_converter.convert(input_doc_path) pipeline_runtime = time.time() - start_time assert conv_result.status == ConversionStatus.SUCCESS num_pages = len(conv_result.pages) _log.info(f"Document converted in {pipeline_runtime:.2f} seconds.") _log.info(f" {num_pages / pipeline_runtime:.2f} pages/second.") if __name__ == "__main__": main()
{ "repo_id": "docling-project/docling", "file_path": "docs/examples/gpu_standard_pipeline.py", "license": "MIT License", "lines": 67, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple