| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| """ |
| Document layout analysis and OCR using dots.ocr with vLLM. |
| |
| This script processes document images through the dots.ocr model to extract |
| layout information, text content, or both. Supports multiple output formats |
| including JSON, structured columns, and markdown. |
| |
| Features: |
| - Layout detection with bounding boxes and categories |
| - Text extraction with reading order preservation |
| - Multiple prompt modes for different tasks |
| - Flexible output formats |
| - Multilingual document support |
| """ |
|
|
| import argparse |
| import base64 |
| import io |
| import json |
| import logging |
| import os |
| import sys |
| from typing import Any, Dict, List, Optional, Union |
|
|
| import torch |
| from datasets import load_dataset |
| from huggingface_hub import login |
| from PIL import Image |
| from toolz import partition_all |
| from tqdm.auto import tqdm |
|
|
| |
| try: |
| from vllm import LLM, SamplingParams |
| VLLM_AVAILABLE = True |
| except ImportError: |
| VLLM_AVAILABLE = False |
| |
| from transformers import AutoModelForCausalLM, AutoProcessor |
|
|
| logging.basicConfig(level=logging.INFO) |
| logger = logging.getLogger(__name__) |
|
|
| |
| try: |
| from qwen_vl_utils import process_vision_info |
| QWEN_VL_AVAILABLE = True |
| except ImportError: |
| QWEN_VL_AVAILABLE = False |
| logger.warning("qwen_vl_utils not available, transformers backend may not work properly") |
|
|
| |
| PROMPT_MODES = { |
| "layout-all": """Please output the layout information from the PDF image, including each layout element's bbox, its category, and the corresponding text content within the bbox. |
| |
| 1. Bbox format: [x1, y1, x2, y2] |
| |
| 2. Layout Categories: The possible categories are ['Caption', 'Footnote', 'Formula', 'List-item', 'Page-footer', 'Page-header', 'Picture', 'Section-header', 'Table', 'Text', 'Title']. |
| |
| 3. Text Extraction & Formatting Rules: |
| - Picture: For the 'Picture' category, the text field should be omitted. |
| - Formula: Format its text as LaTeX. |
| - Table: Format its text as HTML. |
| - All Others (Text, Title, etc.): Format their text as Markdown. |
| |
| 4. Constraints: |
| - The output text must be the original text from the image, with no translation. |
| - All layout elements must be sorted according to human reading order. |
| |
| 5. Final Output: The entire output must be a single JSON object. |
| """, |
| |
| "layout-only": """Please output the layout information from this PDF image, including each layout's bbox and its category. The bbox should be in the format [x1, y1, x2, y2]. The layout categories for the PDF document include ['Caption', 'Footnote', 'Formula', 'List-item', 'Page-footer', 'Page-header', 'Picture', 'Section-header', 'Table', 'Text', 'Title']. Do not output the corresponding text. The layout result should be in JSON format.""", |
| |
| "ocr": """Extract the text content from this image.""", |
| |
| "grounding-ocr": """Extract text from the given bounding box on the image (format: [x1, y1, x2, y2]).\nBounding Box:\n""" |
| } |
|
|
|
|
| def check_cuda_availability(): |
| """Check if CUDA is available and exit if not.""" |
| if not torch.cuda.is_available(): |
| logger.error("CUDA is not available. This script requires a GPU.") |
| logger.error("Please run on a machine with a CUDA-capable GPU.") |
| sys.exit(1) |
| else: |
| logger.info(f"CUDA is available. GPU: {torch.cuda.get_device_name(0)}") |
|
|
|
|
| def make_dots_message( |
| image: Union[Image.Image, Dict[str, Any], str], |
| mode: str = "layout-all", |
| bbox: Optional[List[int]] = None, |
| ) -> List[Dict]: |
| """Create chat message for dots.ocr processing.""" |
| |
| if isinstance(image, Image.Image): |
| pil_img = image |
| elif isinstance(image, dict) and "bytes" in image: |
| pil_img = Image.open(io.BytesIO(image["bytes"])) |
| elif isinstance(image, str): |
| pil_img = Image.open(image) |
| else: |
| raise ValueError(f"Unsupported image type: {type(image)}") |
|
|
| |
| buf = io.BytesIO() |
| pil_img.save(buf, format="PNG") |
| data_uri = f"data:image/png;base64,{base64.b64encode(buf.getvalue()).decode()}" |
|
|
| |
| prompt = PROMPT_MODES.get(mode, PROMPT_MODES["layout-all"]) |
| |
| |
| if mode == "grounding-ocr" and bbox: |
| prompt = prompt + str(bbox) |
| |
| |
| return [ |
| { |
| "role": "user", |
| "content": [ |
| {"type": "image_url", "image_url": {"url": data_uri}}, |
| {"type": "text", "text": prompt}, |
| ], |
| } |
| ] |
|
|
|
|
| def parse_dots_output( |
| output: str, |
| output_format: str = "json", |
| filter_category: Optional[str] = None, |
| mode: str = "layout-all", |
| ) -> Union[str, Dict[str, List]]: |
| """Parse dots.ocr output and convert to requested format.""" |
| |
| |
| if mode == "ocr": |
| return output.strip() |
| |
| try: |
| |
| data = json.loads(output.strip()) |
| |
| |
| if filter_category and "categories" in data: |
| indices = [i for i, cat in enumerate(data["categories"]) if cat == filter_category] |
| filtered_data = { |
| "bboxes": [data["bboxes"][i] for i in indices], |
| "categories": [data["categories"][i] for i in indices], |
| } |
| |
| |
| if "texts" in data: |
| filtered_data["texts"] = [data["texts"][i] for i in indices] |
| |
| |
| if "reading_order" in data: |
| |
| filtered_reading_order = [] |
| for group in data.get("reading_order", []): |
| filtered_group = [idx for idx in group if idx in indices] |
| if filtered_group: |
| |
| remapped_group = [indices.index(idx) for idx in filtered_group] |
| filtered_reading_order.append(remapped_group) |
| if filtered_reading_order: |
| filtered_data["reading_order"] = filtered_reading_order |
| |
| data = filtered_data |
| |
| if output_format == "json": |
| return json.dumps(data, ensure_ascii=False) |
| |
| elif output_format == "structured": |
| |
| result = { |
| "bboxes": data.get("bboxes", []), |
| "categories": data.get("categories", []), |
| } |
| |
| |
| if mode == "layout-all": |
| result["texts"] = data.get("texts", []) |
| else: |
| result["texts"] = [] |
| |
| return result |
| |
| elif output_format == "markdown": |
| |
| |
| if mode != "layout-all" or "texts" not in data: |
| logger.warning("Markdown format works best with layout-all mode") |
| return json.dumps(data, ensure_ascii=False) |
| |
| md_lines = [] |
| texts = data.get("texts", []) |
| categories = data.get("categories", []) |
| reading_order = data.get("reading_order", []) |
| |
| |
| if reading_order: |
| for group in reading_order: |
| for idx in group: |
| if idx < len(texts) and idx < len(categories): |
| text = texts[idx] |
| category = categories[idx] |
| md_lines.append(format_markdown_text(text, category)) |
| else: |
| |
| for text, category in zip(texts, categories): |
| md_lines.append(format_markdown_text(text, category)) |
| |
| return "\n".join(md_lines) |
| |
| except json.JSONDecodeError as e: |
| logger.warning(f"Failed to parse JSON output: {e}") |
| return output.strip() |
| except Exception as e: |
| logger.error(f"Error parsing output: {e}") |
| return output.strip() |
|
|
|
|
| def format_markdown_text(text: str, category: str) -> str: |
| """Format text based on its category for markdown output.""" |
| if category == "Title": |
| return f"# {text}\n" |
| elif category == "Section-header": |
| return f"## {text}\n" |
| elif category == "List-item": |
| return f"- {text}" |
| elif category == "Page-header" or category == "Page-footer": |
| return f"_{text}_\n" |
| elif category == "Caption": |
| return f"**{text}**\n" |
| elif category == "Footnote": |
| return f"[^{text}]\n" |
| elif category == "Table": |
| |
| return f"\n{text}\n" |
| elif category == "Formula": |
| |
| return f"\n${text}$\n" |
| elif category == "Picture": |
| |
| return "\n![Image]()\n" |
| else: |
| return f"{text}\n" |
|
|
|
|
| def process_with_transformers( |
| images: List[Union[Image.Image, Dict[str, Any], str]], |
| model, |
| processor, |
| mode: str = "layout-all", |
| max_new_tokens: int = 16384, |
| ) -> List[str]: |
| """Process images using transformers instead of vLLM.""" |
| outputs = [] |
| |
| for image in tqdm(images, desc="Processing with transformers"): |
| |
| if isinstance(image, dict) and "bytes" in image: |
| pil_image = Image.open(io.BytesIO(image["bytes"])) |
| elif isinstance(image, str): |
| pil_image = Image.open(image) |
| else: |
| pil_image = image |
| |
| |
| prompt = PROMPT_MODES.get(mode, PROMPT_MODES["layout-all"]) |
| |
| |
| messages = [ |
| { |
| "role": "user", |
| "content": [ |
| {"type": "image", "image": pil_image}, |
| {"type": "text", "text": prompt} |
| ] |
| } |
| ] |
| |
| |
| text = processor.apply_chat_template( |
| messages, |
| tokenize=False, |
| add_generation_prompt=True |
| ) |
| |
| if QWEN_VL_AVAILABLE: |
| |
| image_inputs, video_inputs = process_vision_info(messages) |
| inputs = processor( |
| text=[text], |
| images=image_inputs, |
| videos=video_inputs, |
| padding=True, |
| return_tensors="pt", |
| ) |
| else: |
| |
| inputs = processor( |
| text=text, |
| images=[pil_image], |
| return_tensors="pt", |
| ) |
| |
| inputs = inputs.to(model.device) |
| |
| |
| with torch.no_grad(): |
| generated_ids = model.generate( |
| **inputs, |
| max_new_tokens=max_new_tokens, |
| temperature=0.0, |
| do_sample=False, |
| ) |
| |
| |
| generated_ids_trimmed = [ |
| out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) |
| ] |
| output_text = processor.batch_decode( |
| generated_ids_trimmed, |
| skip_special_tokens=True, |
| clean_up_tokenization_spaces=False |
| )[0] |
| |
| outputs.append(output_text.strip()) |
| |
| return outputs |
|
|
|
|
| def main( |
| input_dataset: str, |
| output_dataset: str, |
| image_column: str = "image", |
| mode: str = "layout-all", |
| output_format: str = "json", |
| filter_category: Optional[str] = None, |
| batch_size: int = 32, |
| model: str = "rednote-hilab/dots.ocr", |
| max_model_len: int = 24000, |
| max_tokens: int = 16384, |
| gpu_memory_utilization: float = 0.8, |
| hf_token: Optional[str] = None, |
| split: str = "train", |
| max_samples: Optional[int] = None, |
| private: bool = False, |
| use_transformers: bool = False, |
| |
| output_column: str = "dots_ocr_output", |
| bbox_column: str = "layout_bboxes", |
| category_column: str = "layout_categories", |
| text_column: str = "layout_texts", |
| markdown_column: str = "markdown", |
| ): |
| """Process images from HF dataset through dots.ocr model.""" |
|
|
| |
| check_cuda_availability() |
|
|
| |
| os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1" |
|
|
| |
| HF_TOKEN = hf_token or os.environ.get("HF_TOKEN") |
| if HF_TOKEN: |
| login(token=HF_TOKEN) |
|
|
| |
| logger.info(f"Loading dataset: {input_dataset}") |
| dataset = load_dataset(input_dataset, split=split) |
|
|
| |
| if image_column not in dataset.column_names: |
| raise ValueError( |
| f"Column '{image_column}' not found. Available: {dataset.column_names}" |
| ) |
|
|
| |
| if max_samples: |
| dataset = dataset.select(range(min(max_samples, len(dataset)))) |
| logger.info(f"Limited to {len(dataset)} samples") |
|
|
| |
| all_outputs = [] |
| |
| if use_transformers or not VLLM_AVAILABLE: |
| |
| if not use_transformers and not VLLM_AVAILABLE: |
| logger.warning("vLLM not available, falling back to transformers") |
| |
| logger.info(f"Initializing transformers with model: {model}") |
| hf_model = AutoModelForCausalLM.from_pretrained( |
| model, |
| torch_dtype=torch.bfloat16, |
| device_map="auto", |
| trust_remote_code=True, |
| ) |
| processor = AutoProcessor.from_pretrained(model, trust_remote_code=True) |
| |
| logger.info(f"Processing {len(dataset)} images with transformers") |
| logger.info(f"Mode: {mode}, Output format: {output_format}") |
| |
| |
| all_images = [dataset[i][image_column] for i in range(len(dataset))] |
| raw_outputs = process_with_transformers( |
| all_images, |
| hf_model, |
| processor, |
| mode=mode, |
| max_new_tokens=max_tokens |
| ) |
| |
| |
| for raw_text in raw_outputs: |
| parsed = parse_dots_output(raw_text, output_format, filter_category, mode) |
| all_outputs.append(parsed) |
| |
| else: |
| |
| logger.info(f"Initializing vLLM with model: {model}") |
| llm = LLM( |
| model=model, |
| trust_remote_code=True, |
| max_model_len=max_model_len, |
| gpu_memory_utilization=gpu_memory_utilization, |
| ) |
|
|
| sampling_params = SamplingParams( |
| temperature=0.0, |
| max_tokens=max_tokens, |
| ) |
|
|
| logger.info(f"Processing {len(dataset)} images in batches of {batch_size}") |
| logger.info(f"Mode: {mode}, Output format: {output_format}") |
|
|
| |
| for batch_indices in tqdm( |
| partition_all(batch_size, range(len(dataset))), |
| total=(len(dataset) + batch_size - 1) // batch_size, |
| desc="dots.ocr processing", |
| ): |
| batch_indices = list(batch_indices) |
| batch_images = [dataset[i][image_column] for i in batch_indices] |
|
|
| try: |
| |
| batch_messages = [make_dots_message(img, mode=mode) for img in batch_images] |
|
|
| |
| outputs = llm.chat(batch_messages, sampling_params) |
|
|
| |
| for output in outputs: |
| raw_text = output.outputs[0].text.strip() |
| parsed = parse_dots_output(raw_text, output_format, filter_category, mode) |
| all_outputs.append(parsed) |
|
|
| except Exception as e: |
| logger.error(f"Error processing batch: {e}") |
| |
| all_outputs.extend([{"error": str(e)}] * len(batch_images)) |
|
|
| |
| logger.info("Adding output columns to dataset") |
| |
| if output_format == "json": |
| dataset = dataset.add_column(output_column, all_outputs) |
| |
| elif output_format == "structured": |
| |
| bboxes = [] |
| categories = [] |
| texts = [] |
| |
| for output in all_outputs: |
| if isinstance(output, dict) and "error" not in output: |
| bboxes.append(output.get("bboxes", [])) |
| categories.append(output.get("categories", [])) |
| texts.append(output.get("texts", [])) |
| else: |
| bboxes.append([]) |
| categories.append([]) |
| texts.append([]) |
| |
| dataset = dataset.add_column(bbox_column, bboxes) |
| dataset = dataset.add_column(category_column, categories) |
| dataset = dataset.add_column(text_column, texts) |
| |
| elif output_format == "markdown": |
| dataset = dataset.add_column(markdown_column, all_outputs) |
| |
| else: |
| dataset = dataset.add_column(output_column, all_outputs) |
|
|
| |
| logger.info(f"Pushing to {output_dataset}") |
| dataset.push_to_hub(output_dataset, private=private, token=HF_TOKEN) |
|
|
| logger.info("✅ dots.ocr processing complete!") |
| logger.info( |
| f"Dataset available at: https://huggingface.co/datasets/{output_dataset}" |
| ) |
|
|
|
|
| if __name__ == "__main__": |
| |
| if len(sys.argv) == 1: |
| print("=" * 80) |
| print("dots.ocr Document Layout Analysis and OCR") |
| print("=" * 80) |
| print("\nThis script processes document images using the dots.ocr model to") |
| print("extract layout information, text content, or both.") |
| print("\nFeatures:") |
| print("- Layout detection with bounding boxes and categories") |
| print("- Text extraction with reading order preservation") |
| print("- Multiple output formats (JSON, structured, markdown)") |
| print("- Multilingual document support") |
| print("\nExample usage:") |
| print("\n1. Full layout analysis + OCR (default):") |
| print(" uv run dots-ocr.py document-images analyzed-docs") |
| print("\n2. Layout detection only:") |
| print(" uv run dots-ocr.py scanned-pdfs layout-analysis --mode layout-only") |
| print("\n3. Simple OCR (text only):") |
| print(" uv run dots-ocr.py documents extracted-text --mode ocr") |
| print("\n4. Convert to markdown:") |
| print(" uv run dots-ocr.py papers papers-markdown --output-format markdown") |
| print("\n5. Extract only tables:") |
| print(" uv run dots-ocr.py reports table-data --filter-category Table") |
| print("\n6. Structured output with custom columns:") |
| print(" uv run dots-ocr.py docs analyzed \\") |
| print(" --output-format structured \\") |
| print(" --bbox-column boxes \\") |
| print(" --category-column types \\") |
| print(" --text-column content") |
| print("\n7. Process a subset for testing:") |
| print(" uv run dots-ocr.py large-dataset test-output --max-samples 10") |
| print("\n8. Use transformers backend (more compatible):") |
| print(" uv run dots-ocr.py documents analyzed --use-transformers") |
| print("\n9. Running on HF Jobs:") |
| print(" hf jobs run --gpu l4x1 \\") |
| print(" -e HF_TOKEN=$(python3 -c \"from huggingface_hub import get_token; print(get_token())\") \\") |
| print( |
| " uv run https://huggingface.co/datasets/uv-scripts/ocr/raw/main/dots-ocr.py \\" |
| ) |
| print(" your-document-dataset \\") |
| print(" your-analyzed-output \\") |
| print(" --use-transformers") |
| print("\n" + "=" * 80) |
| print("\nFor full help, run: uv run dots-ocr.py --help") |
| sys.exit(0) |
|
|
| parser = argparse.ArgumentParser( |
| description="Document layout analysis and OCR using dots.ocr", |
| formatter_class=argparse.RawDescriptionHelpFormatter, |
| epilog=""" |
| Modes: |
| layout-all - Extract layout + text content (default) |
| layout-only - Extract only layout information (bbox + category) |
| ocr - Extract only text content |
| grounding-ocr - Extract text from specific bbox (requires --bbox) |
| |
| Output Formats: |
| json - Raw JSON output from model (default) |
| structured - Separate columns for bboxes, categories, texts |
| markdown - Convert to markdown format |
| |
| Examples: |
| # Basic layout + OCR |
| uv run dots-ocr.py my-docs analyzed-docs |
| |
| # Layout detection only |
| uv run dots-ocr.py papers layouts --mode layout-only |
| |
| # Convert to markdown |
| uv run dots-ocr.py scans readable --output-format markdown |
| |
| # Extract only formulas |
| uv run dots-ocr.py math-docs formulas --filter-category Formula |
| """, |
| ) |
|
|
| parser.add_argument("input_dataset", help="Input dataset ID from Hugging Face Hub") |
| parser.add_argument("output_dataset", help="Output dataset ID for Hugging Face Hub") |
| parser.add_argument( |
| "--image-column", |
| default="image", |
| help="Column containing images (default: image)", |
| ) |
| parser.add_argument( |
| "--mode", |
| choices=["layout-all", "layout-only", "ocr", "grounding-ocr"], |
| default="layout-all", |
| help="Processing mode (default: layout-all)", |
| ) |
| parser.add_argument( |
| "--output-format", |
| choices=["json", "structured", "markdown"], |
| default="json", |
| help="Output format (default: json)", |
| ) |
| parser.add_argument( |
| "--filter-category", |
| choices=['Caption', 'Footnote', 'Formula', 'List-item', 'Page-footer', |
| 'Page-header', 'Picture', 'Section-header', 'Table', 'Text', 'Title'], |
| help="Filter results by layout category", |
| ) |
| parser.add_argument( |
| "--batch-size", |
| type=int, |
| default=32, |
| help="Batch size for processing (default: 32)", |
| ) |
| parser.add_argument( |
| "--model", |
| default="rednote-hilab/dots.ocr", |
| help="Model to use (default: rednote-hilab/dots.ocr)", |
| ) |
| parser.add_argument( |
| "--max-model-len", |
| type=int, |
| default=24000, |
| help="Maximum model context length (default: 24000)", |
| ) |
| parser.add_argument( |
| "--max-tokens", |
| type=int, |
| default=16384, |
| help="Maximum tokens to generate (default: 16384)", |
| ) |
| parser.add_argument( |
| "--gpu-memory-utilization", |
| type=float, |
| default=0.8, |
| help="GPU memory utilization (default: 0.8)", |
| ) |
| parser.add_argument("--hf-token", help="Hugging Face API token") |
| parser.add_argument( |
| "--split", default="train", help="Dataset split to use (default: train)" |
| ) |
| parser.add_argument( |
| "--max-samples", |
| type=int, |
| help="Maximum number of samples to process (for testing)", |
| ) |
| parser.add_argument( |
| "--private", action="store_true", help="Make output dataset private" |
| ) |
| parser.add_argument( |
| "--use-transformers", |
| action="store_true", |
| help="Use transformers instead of vLLM (more compatible but slower)", |
| ) |
| |
| |
| parser.add_argument( |
| "--output-column", |
| default="dots_ocr_output", |
| help="Column name for JSON output (default: dots_ocr_output)", |
| ) |
| parser.add_argument( |
| "--bbox-column", |
| default="layout_bboxes", |
| help="Column name for bboxes in structured mode (default: layout_bboxes)", |
| ) |
| parser.add_argument( |
| "--category-column", |
| default="layout_categories", |
| help="Column name for categories in structured mode (default: layout_categories)", |
| ) |
| parser.add_argument( |
| "--text-column", |
| default="layout_texts", |
| help="Column name for texts in structured mode (default: layout_texts)", |
| ) |
| parser.add_argument( |
| "--markdown-column", |
| default="markdown", |
| help="Column name for markdown output (default: markdown)", |
| ) |
|
|
| args = parser.parse_args() |
|
|
| main( |
| input_dataset=args.input_dataset, |
| output_dataset=args.output_dataset, |
| image_column=args.image_column, |
| mode=args.mode, |
| output_format=args.output_format, |
| filter_category=args.filter_category, |
| batch_size=args.batch_size, |
| model=args.model, |
| max_model_len=args.max_model_len, |
| max_tokens=args.max_tokens, |
| gpu_memory_utilization=args.gpu_memory_utilization, |
| hf_token=args.hf_token, |
| split=args.split, |
| max_samples=args.max_samples, |
| private=args.private, |
| use_transformers=args.use_transformers, |
| output_column=args.output_column, |
| bbox_column=args.bbox_column, |
| category_column=args.category_column, |
| text_column=args.text_column, |
| markdown_column=args.markdown_column, |
| ) |