davanstrien HF Staff Claude Opus 4.6 commited on
Commit
aad164c
·
1 Parent(s): d28bb20

Add DeepSeek-OCR-2 vLLM script (rewrite from fixed v1 pattern)

Browse files

Rewrites the broken draft (base64/llm.chat/resolution modes) to use the
proven v1 pattern: llm.generate() with PIL images + NGramPerReqLogitsProcessor.
Key v2 change: limit_mm_per_prompt={"image": 1} for the new architecture.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

Files changed (1) hide show
  1. deepseek-ocr2-vllm.py +62 -200
deepseek-ocr2-vllm.py CHANGED
@@ -24,14 +24,15 @@ This script processes images through the DeepSeek-OCR-2 model (3B parameters
24
  with Visual Causal Flow architecture) to extract text and structure as markdown,
25
  using vLLM for efficient batch processing.
26
 
27
- Tested with vLLM 0.8.5+, PyTorch 2.6.0, Transformers 4.46.3+
 
 
28
 
29
  NOTE: Uses vLLM nightly wheels. First run may take a few minutes to download
30
  and install dependencies.
31
 
32
  Features:
33
  - Visual Causal Flow architecture for enhanced visual encoding
34
- - Multiple resolution modes (Tiny/Small/Base/Large/Gundam)
35
  - LaTeX equation recognition
36
  - Table extraction and formatting
37
  - Document structure preservation
@@ -41,14 +42,13 @@ Features:
41
  """
42
 
43
  import argparse
44
- import base64
45
  import io
46
  import json
47
  import logging
48
  import os
49
  import sys
50
- from typing import Any, Dict, List, Union
51
  from datetime import datetime
 
52
 
53
  import torch
54
  from datasets import load_dataset
@@ -57,24 +57,12 @@ from PIL import Image
57
  from toolz import partition_all
58
  from tqdm.auto import tqdm
59
  from vllm import LLM, SamplingParams
 
60
 
61
  logging.basicConfig(level=logging.INFO)
62
  logger = logging.getLogger(__name__)
63
 
64
- # Resolution mode presets - v2 optimized for 768×768 tiles
65
- RESOLUTION_MODES = {
66
- "tiny": {"base_size": 512, "image_size": 512, "crop_mode": False},
67
- "small": {"base_size": 640, "image_size": 640, "crop_mode": False},
68
- "base": {"base_size": 1024, "image_size": 768, "crop_mode": False}, # v2 optimized
69
- "large": {"base_size": 1280, "image_size": 1024, "crop_mode": False},
70
- "gundam": {
71
- "base_size": 1024,
72
- "image_size": 768, # v2 optimized
73
- "crop_mode": True,
74
- }, # Dynamic resolution
75
- }
76
-
77
- # Prompt mode presets (compatible with both v1 and v2)
78
  PROMPT_MODES = {
79
  "document": "<image>\n<|grounding|>Convert the document to markdown.",
80
  "image": "<image>\n<|grounding|>OCR this image.",
@@ -94,40 +82,17 @@ def check_cuda_availability():
94
  logger.info(f"CUDA is available. GPU: {torch.cuda.get_device_name(0)}")
95
 
96
 
97
- def make_ocr_message(
98
- image: Union[Image.Image, Dict[str, Any], str],
99
- prompt: str = "<image>\n<|grounding|>Convert the document to markdown. ",
100
- ) -> List[Dict]:
101
- """Create chat message for OCR processing."""
102
- # Convert to PIL Image if needed
103
  if isinstance(image, Image.Image):
104
- pil_img = image
105
  elif isinstance(image, dict) and "bytes" in image:
106
- pil_img = Image.open(io.BytesIO(image["bytes"]))
107
  elif isinstance(image, str):
108
- pil_img = Image.open(image)
109
  else:
110
  raise ValueError(f"Unsupported image type: {type(image)}")
111
 
112
- # Convert to RGB
113
- pil_img = pil_img.convert("RGB")
114
-
115
- # Convert to base64 data URI
116
- buf = io.BytesIO()
117
- pil_img.save(buf, format="PNG")
118
- data_uri = f"data:image/png;base64,{base64.b64encode(buf.getvalue()).decode()}"
119
-
120
- # Return message in vLLM format
121
- return [
122
- {
123
- "role": "user",
124
- "content": [
125
- {"type": "image_url", "image_url": {"url": data_uri}},
126
- {"type": "text", "text": prompt},
127
- ],
128
- }
129
- ]
130
-
131
 
132
  def create_dataset_card(
133
  source_dataset: str,
@@ -138,10 +103,6 @@ def create_dataset_card(
138
  max_model_len: int,
139
  max_tokens: int,
140
  gpu_memory_utilization: float,
141
- resolution_mode: str,
142
- base_size: int,
143
- image_size: int,
144
- crop_mode: bool,
145
  image_column: str = "image",
146
  split: str = "train",
147
  ) -> str:
@@ -177,10 +138,6 @@ This dataset contains markdown-formatted OCR results from images in [{source_dat
177
  - **Output Column**: `markdown`
178
  - **Dataset Split**: `{split}`
179
  - **Batch Size**: {batch_size}
180
- - **Resolution Mode**: {resolution_mode}
181
- - **Base Size**: {base_size}
182
- - **Image Size**: {image_size}
183
- - **Crop Mode**: {crop_mode}
184
  - **Max Model Length**: {max_model_len:,} tokens
185
  - **Max Output Tokens**: {max_tokens:,}
186
  - **GPU Memory Utilization**: {gpu_memory_utilization:.1%}
@@ -188,31 +145,17 @@ This dataset contains markdown-formatted OCR results from images in [{source_dat
188
  ## Model Information
189
 
190
  DeepSeek-OCR-2 is a 3B parameter vision-language model featuring Visual Causal
191
- Flow architecture for more human-like visual encoding. Building on DeepSeek-OCR
192
- v1, it offers enhanced document understanding with dynamic resolution up to
193
- (0-6)×768×768 + 1×1024×1024 patches.
194
-
195
- ### Key Improvements (v2)
196
- - 🧠 **Visual Causal Flow** - More human-like visual encoding architecture
197
- - 🎯 **Enhanced multi-patch** - Better processing of 768×768 tiles
198
- - 📝 **Improved structure** - Superior document hierarchy preservation
199
- - 📐 **Better layouts** - Enhanced handling of complex multi-column documents
200
 
201
  ### Capabilities
202
- - 📐 **LaTeX equations** - Mathematical formulas preserved in LaTeX format
203
- - 📊 **Tables** - Extracted and formatted as HTML/markdown
204
- - 📝 **Document structure** - Headers, lists, and formatting maintained
205
- - 🖼️ **Image grounding** - Spatial layout and bounding box information
206
- - 🔍 **Complex layouts** - Multi-column and hierarchical structures
207
- - 🌍 **Multilingual** - Supports multiple languages
208
-
209
- ### Resolution Modes
210
-
211
- - **Tiny** (512×512): Fast processing, 64 vision tokens
212
- - **Small** (640×640): Balanced speed/quality, 100 vision tokens
213
- - **Base** (1024×768): High quality, ~192 vision tokens ⭐ v2 optimized
214
- - **Large** (1280×1024): Maximum quality, ~328 vision tokens
215
- - **Gundam** (dynamic): Adaptive multi-tile with 768×768 patches (default)
216
 
217
  ## Dataset Structure
218
 
@@ -248,7 +191,6 @@ This dataset was generated using the [uv-scripts/ocr](https://huggingface.co/dat
248
  uv run https://huggingface.co/datasets/uv-scripts/ocr/raw/main/deepseek-ocr2-vllm.py \\\\
249
  {source_dataset} \\\\
250
  <output-dataset> \\\\
251
- --resolution-mode {resolution_mode} \\\\
252
  --image-column {image_column}
253
  ```
254
 
@@ -257,7 +199,7 @@ uv run https://huggingface.co/datasets/uv-scripts/ocr/raw/main/deepseek-ocr2-vll
257
  - **Processing Speed**: ~{num_samples / (float(processing_time.split()[0]) * 60):.1f} images/second
258
  - **Processing Method**: Batch processing with vLLM (2-3x speedup over sequential)
259
 
260
- Generated with 🤖 [UV Scripts](https://huggingface.co/uv-scripts)
261
  """
262
 
263
 
@@ -265,12 +207,8 @@ def main(
265
  input_dataset: str,
266
  output_dataset: str,
267
  image_column: str = "image",
268
- batch_size: int = 8, # Smaller batch size to avoid potential memory issues
269
  model: str = "deepseek-ai/DeepSeek-OCR-2",
270
- resolution_mode: str = "gundam",
271
- base_size: int = None,
272
- image_size: int = None,
273
- crop_mode: bool = None,
274
  max_model_len: int = 8192,
275
  max_tokens: int = 8192,
276
  gpu_memory_utilization: float = 0.8,
@@ -296,41 +234,10 @@ def main(
296
  if HF_TOKEN:
297
  login(token=HF_TOKEN)
298
 
299
- # Determine resolution settings
300
- if resolution_mode in RESOLUTION_MODES:
301
- mode_config = RESOLUTION_MODES[resolution_mode]
302
- final_base_size = (
303
- base_size if base_size is not None else mode_config["base_size"]
304
- )
305
- final_image_size = (
306
- image_size if image_size is not None else mode_config["image_size"]
307
- )
308
- final_crop_mode = (
309
- crop_mode if crop_mode is not None else mode_config["crop_mode"]
310
- )
311
- logger.info(f"Using resolution mode: {resolution_mode}")
312
- else:
313
- # Custom mode - require all parameters
314
- if base_size is None or image_size is None or crop_mode is None:
315
- raise ValueError(
316
- f"Invalid resolution mode '{resolution_mode}'. "
317
- f"Use one of {list(RESOLUTION_MODES.keys())} or specify "
318
- f"--base-size, --image-size, and --crop-mode manually."
319
- )
320
- final_base_size = base_size
321
- final_image_size = image_size
322
- final_crop_mode = crop_mode
323
- resolution_mode = "custom"
324
-
325
- logger.info(
326
- f"Resolution: base_size={final_base_size}, "
327
- f"image_size={final_image_size}, crop_mode={final_crop_mode}"
328
- )
329
-
330
  # Determine prompt
331
  if prompt is not None:
332
  final_prompt = prompt
333
- logger.info(f"Using custom prompt")
334
  elif prompt_mode in PROMPT_MODES:
335
  final_prompt = PROMPT_MODES[prompt_mode]
336
  logger.info(f"Using prompt mode: {prompt_mode}")
@@ -362,31 +269,35 @@ def main(
362
  dataset = dataset.select(range(min(max_samples, len(dataset))))
363
  logger.info(f"Limited to {len(dataset)} samples")
364
 
365
- # Initialize vLLM
366
  logger.info(f"Initializing vLLM with model: {model}")
367
  logger.info("This may take a few minutes on first run...")
368
 
369
- # Add specific parameters for DeepSeek-OCR-2 compatibility
370
  llm = LLM(
371
  model=model,
372
  trust_remote_code=True,
373
  max_model_len=max_model_len,
374
  gpu_memory_utilization=gpu_memory_utilization,
 
 
375
  limit_mm_per_prompt={"image": 1},
376
- enforce_eager=False, # Use torch.compile instead of eager execution
377
  )
378
 
379
  sampling_params = SamplingParams(
380
- temperature=0.0, # Deterministic for OCR
381
  max_tokens=max_tokens,
 
 
 
 
 
 
382
  )
383
 
384
  logger.info(f"Processing {len(dataset)} images in batches of {batch_size}")
385
- logger.info(
386
- "Using vLLM for batch processing - should be faster than sequential processing"
387
- )
388
 
389
- # Process images in batches
390
  all_markdown = []
391
 
392
  for batch_indices in tqdm(
@@ -398,11 +309,16 @@ def main(
398
  batch_images = [dataset[i][image_column] for i in batch_indices]
399
 
400
  try:
401
- # Create messages for batch
402
- batch_messages = [make_ocr_message(img, final_prompt) for img in batch_images]
 
 
 
 
 
403
 
404
- # Process with vLLM
405
- outputs = llm.chat(batch_messages, sampling_params)
406
 
407
  # Extract outputs
408
  for output in outputs:
@@ -411,7 +327,6 @@ def main(
411
 
412
  except Exception as e:
413
  logger.error(f"Error processing batch: {e}")
414
- # Add error placeholders for failed batch
415
  all_markdown.extend(["[OCR FAILED]"] * len(batch_images))
416
 
417
  # Calculate processing time
@@ -444,10 +359,6 @@ def main(
444
  "column_name": "markdown",
445
  "model_id": model,
446
  "processing_date": datetime.now().isoformat(),
447
- "resolution_mode": resolution_mode,
448
- "base_size": final_base_size,
449
- "image_size": final_image_size,
450
- "crop_mode": final_crop_mode,
451
  "prompt": final_prompt,
452
  "prompt_mode": prompt_mode if prompt is None else "custom",
453
  "batch_size": batch_size,
@@ -457,7 +368,7 @@ def main(
457
  "script": "deepseek-ocr2-vllm.py",
458
  "script_version": "1.0.0",
459
  "script_url": "https://huggingface.co/datasets/uv-scripts/ocr/raw/main/deepseek-ocr2-vllm.py",
460
- "implementation": "vllm (batch processing)",
461
  }
462
  existing_info.append(new_info)
463
 
@@ -480,19 +391,15 @@ def main(
480
  max_model_len=max_model_len,
481
  max_tokens=max_tokens,
482
  gpu_memory_utilization=gpu_memory_utilization,
483
- resolution_mode=resolution_mode,
484
- base_size=final_base_size,
485
- image_size=final_image_size,
486
- crop_mode=final_crop_mode,
487
  image_column=image_column,
488
  split=split,
489
  )
490
 
491
  card = DatasetCard(card_content)
492
  card.push_to_hub(output_dataset, token=HF_TOKEN)
493
- logger.info("Dataset card created and pushed!")
494
 
495
- logger.info("OCR conversion complete!")
496
  logger.info(
497
  f"Dataset available at: https://huggingface.co/datasets/{output_dataset}"
498
  )
@@ -510,37 +417,28 @@ if __name__ == "__main__":
510
  print("with vLLM for efficient batch processing.")
511
  print("\nFeatures:")
512
  print("- Visual Causal Flow architecture for enhanced encoding")
513
- print("- Multiple resolution modes (Tiny/Small/Base/Large/Gundam)")
514
  print("- LaTeX equation recognition")
515
  print("- Table extraction and formatting")
516
  print("- Document structure preservation")
517
  print("- Image grounding and spatial layout")
518
  print("- Multilingual support")
519
- print("- Fast batch processing with vLLM (2-3x speedup)")
520
  print("\nExample usage:")
521
- print("\n1. Basic OCR conversion (Gundam mode - dynamic resolution):")
522
  print(" uv run deepseek-ocr2-vllm.py document-images markdown-docs")
523
- print("\n2. High quality mode (Large - 1280×1024):")
524
  print(
525
- " uv run deepseek-ocr2-vllm.py scanned-pdfs extracted-text --resolution-mode large"
526
  )
527
- print("\n3. Fast processing (Tiny - 512×512):")
528
- print(" uv run deepseek-ocr2-vllm.py quick-test output --resolution-mode tiny")
529
- print("\n4. Parse figures from documents:")
530
- print(" uv run deepseek-ocr2-vllm.py scientific-papers figures --prompt-mode figure")
531
- print("\n5. Free OCR without layout:")
532
  print(" uv run deepseek-ocr2-vllm.py images text --prompt-mode free")
533
- print("\n6. Process a subset for testing:")
534
  print(
535
  " uv run deepseek-ocr2-vllm.py large-dataset test-output --max-samples 10"
536
  )
537
- print("\n7. Custom resolution:")
538
- print(" uv run deepseek-ocr2-vllm.py dataset output \\")
539
- print(" --base-size 1024 --image-size 768 --crop-mode")
540
- print("\n8. Running on HF Jobs:")
541
  print(" hf jobs uv run --flavor l4x1 \\")
542
  print(" -s HF_TOKEN \\")
543
- print(" -e UV_TORCH_BACKEND=auto \\")
544
  print(
545
  " https://huggingface.co/datasets/uv-scripts/ocr/raw/main/deepseek-ocr2-vllm.py \\"
546
  )
@@ -554,13 +452,6 @@ if __name__ == "__main__":
554
  description="OCR images to markdown using DeepSeek-OCR-2 (vLLM)",
555
  formatter_class=argparse.RawDescriptionHelpFormatter,
556
  epilog="""
557
- Resolution Modes:
558
- tiny 512×512 pixels, fast processing (64 vision tokens)
559
- small 640×640 pixels, balanced (100 vision tokens)
560
- base 1024×768 pixels, high quality (~192 vision tokens) ⭐ v2 optimized
561
- large 1280×1024 pixels, maximum quality (~328 vision tokens)
562
- gundam Dynamic multi-tile with 768×768 patches (adaptive)
563
-
564
  Prompt Modes:
565
  document Convert document to markdown with grounding (default)
566
  image OCR any image with grounding
@@ -569,29 +460,25 @@ Prompt Modes:
569
  describe Generate detailed image descriptions
570
 
571
  Examples:
572
- # Basic usage with default Gundam mode
573
  uv run deepseek-ocr2-vllm.py my-images-dataset ocr-results
574
 
575
- # High quality processing
576
- uv run deepseek-ocr2-vllm.py documents extracted-text --resolution-mode large
577
-
578
- # Fast processing for testing
579
- uv run deepseek-ocr2-vllm.py dataset output --resolution-mode tiny --max-samples 100
580
-
581
  # Parse figures from a document dataset
582
  uv run deepseek-ocr2-vllm.py scientific-papers figures --prompt-mode figure
583
 
584
- # Free OCR without layout (fastest)
585
  uv run deepseek-ocr2-vllm.py images text --prompt-mode free
586
 
587
  # Custom prompt for specific task
588
- uv run deepseek-ocr2-vllm.py dataset output --prompt "<image>\nExtract all table data."
589
-
590
- # Custom resolution settings
591
- uv run deepseek-ocr2-vllm.py dataset output --base-size 1024 --image-size 768 --crop-mode
592
 
593
  # With custom batch size for performance tuning
594
  uv run deepseek-ocr2-vllm.py dataset output --batch-size 16 --max-model-len 16384
 
 
 
 
 
595
  """,
596
  )
597
 
@@ -613,27 +500,6 @@ Examples:
613
  default="deepseek-ai/DeepSeek-OCR-2",
614
  help="Model to use (default: deepseek-ai/DeepSeek-OCR-2)",
615
  )
616
- parser.add_argument(
617
- "--resolution-mode",
618
- default="gundam",
619
- choices=list(RESOLUTION_MODES.keys()) + ["custom"],
620
- help="Resolution mode preset (default: gundam)",
621
- )
622
- parser.add_argument(
623
- "--base-size",
624
- type=int,
625
- help="Base resolution size (overrides resolution-mode)",
626
- )
627
- parser.add_argument(
628
- "--image-size",
629
- type=int,
630
- help="Image tile size (overrides resolution-mode)",
631
- )
632
- parser.add_argument(
633
- "--crop-mode",
634
- action="store_true",
635
- help="Enable dynamic multi-tile cropping (overrides resolution-mode)",
636
- )
637
  parser.add_argument(
638
  "--max-model-len",
639
  type=int,
@@ -694,10 +560,6 @@ Examples:
694
  image_column=args.image_column,
695
  batch_size=args.batch_size,
696
  model=args.model,
697
- resolution_mode=args.resolution_mode,
698
- base_size=args.base_size,
699
- image_size=args.image_size,
700
- crop_mode=args.crop_mode if args.crop_mode else None,
701
  max_model_len=args.max_model_len,
702
  max_tokens=args.max_tokens,
703
  gpu_memory_utilization=args.gpu_memory_utilization,
 
24
  with Visual Causal Flow architecture) to extract text and structure as markdown,
25
  using vLLM for efficient batch processing.
26
 
27
+ Uses the official vLLM offline pattern: llm.generate() with PIL images
28
+ and NGramPerReqLogitsProcessor to prevent repetition on complex documents.
29
+ See: https://docs.vllm.ai/projects/recipes/en/latest/DeepSeek/DeepSeek-OCR.html
30
 
31
  NOTE: Uses vLLM nightly wheels. First run may take a few minutes to download
32
  and install dependencies.
33
 
34
  Features:
35
  - Visual Causal Flow architecture for enhanced visual encoding
 
36
  - LaTeX equation recognition
37
  - Table extraction and formatting
38
  - Document structure preservation
 
42
  """
43
 
44
  import argparse
 
45
  import io
46
  import json
47
  import logging
48
  import os
49
  import sys
 
50
  from datetime import datetime
51
+ from typing import Any, Dict, Union
52
 
53
  import torch
54
  from datasets import load_dataset
 
57
  from toolz import partition_all
58
  from tqdm.auto import tqdm
59
  from vllm import LLM, SamplingParams
60
+ from vllm.model_executor.models.deepseek_ocr import NGramPerReqLogitsProcessor
61
 
62
  logging.basicConfig(level=logging.INFO)
63
  logger = logging.getLogger(__name__)
64
 
65
+ # Prompt mode presets (from DeepSeek-OCR GitHub)
 
 
 
 
 
 
 
 
 
 
 
 
 
66
  PROMPT_MODES = {
67
  "document": "<image>\n<|grounding|>Convert the document to markdown.",
68
  "image": "<image>\n<|grounding|>OCR this image.",
 
82
  logger.info(f"CUDA is available. GPU: {torch.cuda.get_device_name(0)}")
83
 
84
 
85
+ def to_pil(image: Union[Image.Image, Dict[str, Any], str]) -> Image.Image:
86
+ """Convert various image formats to PIL Image."""
 
 
 
 
87
  if isinstance(image, Image.Image):
88
+ return image
89
  elif isinstance(image, dict) and "bytes" in image:
90
+ return Image.open(io.BytesIO(image["bytes"]))
91
  elif isinstance(image, str):
92
+ return Image.open(image)
93
  else:
94
  raise ValueError(f"Unsupported image type: {type(image)}")
95
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
 
97
  def create_dataset_card(
98
  source_dataset: str,
 
103
  max_model_len: int,
104
  max_tokens: int,
105
  gpu_memory_utilization: float,
 
 
 
 
106
  image_column: str = "image",
107
  split: str = "train",
108
  ) -> str:
 
138
  - **Output Column**: `markdown`
139
  - **Dataset Split**: `{split}`
140
  - **Batch Size**: {batch_size}
 
 
 
 
141
  - **Max Model Length**: {max_model_len:,} tokens
142
  - **Max Output Tokens**: {max_tokens:,}
143
  - **GPU Memory Utilization**: {gpu_memory_utilization:.1%}
 
145
  ## Model Information
146
 
147
  DeepSeek-OCR-2 is a 3B parameter vision-language model featuring Visual Causal
148
+ Flow architecture for more human-like visual encoding. Building on DeepSeek-OCR v1,
149
+ it offers enhanced document understanding with dynamic resolution up to
150
+ (0-6)x768x768 + 1x1024x1024 patches.
 
 
 
 
 
 
151
 
152
  ### Capabilities
153
+ - LaTeX equations - Mathematical formulas preserved in LaTeX format
154
+ - Tables - Extracted and formatted as HTML/markdown
155
+ - Document structure - Headers, lists, and formatting maintained
156
+ - Image grounding - Spatial layout and bounding box information
157
+ - Complex layouts - Multi-column and hierarchical structures
158
+ - Multilingual - Supports multiple languages
 
 
 
 
 
 
 
 
159
 
160
  ## Dataset Structure
161
 
 
191
  uv run https://huggingface.co/datasets/uv-scripts/ocr/raw/main/deepseek-ocr2-vllm.py \\\\
192
  {source_dataset} \\\\
193
  <output-dataset> \\\\
 
194
  --image-column {image_column}
195
  ```
196
 
 
199
  - **Processing Speed**: ~{num_samples / (float(processing_time.split()[0]) * 60):.1f} images/second
200
  - **Processing Method**: Batch processing with vLLM (2-3x speedup over sequential)
201
 
202
+ Generated with [UV Scripts](https://huggingface.co/uv-scripts)
203
  """
204
 
205
 
 
207
  input_dataset: str,
208
  output_dataset: str,
209
  image_column: str = "image",
210
+ batch_size: int = 8,
211
  model: str = "deepseek-ai/DeepSeek-OCR-2",
 
 
 
 
212
  max_model_len: int = 8192,
213
  max_tokens: int = 8192,
214
  gpu_memory_utilization: float = 0.8,
 
234
  if HF_TOKEN:
235
  login(token=HF_TOKEN)
236
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
237
  # Determine prompt
238
  if prompt is not None:
239
  final_prompt = prompt
240
+ logger.info("Using custom prompt")
241
  elif prompt_mode in PROMPT_MODES:
242
  final_prompt = PROMPT_MODES[prompt_mode]
243
  logger.info(f"Using prompt mode: {prompt_mode}")
 
269
  dataset = dataset.select(range(min(max_samples, len(dataset))))
270
  logger.info(f"Limited to {len(dataset)} samples")
271
 
272
+ # Initialize vLLM (matches official DeepSeek-OCR vLLM recipe)
273
  logger.info(f"Initializing vLLM with model: {model}")
274
  logger.info("This may take a few minutes on first run...")
275
 
 
276
  llm = LLM(
277
  model=model,
278
  trust_remote_code=True,
279
  max_model_len=max_model_len,
280
  gpu_memory_utilization=gpu_memory_utilization,
281
+ enable_prefix_caching=False,
282
+ mm_processor_cache_gb=0,
283
  limit_mm_per_prompt={"image": 1},
284
+ logits_processors=[NGramPerReqLogitsProcessor],
285
  )
286
 
287
  sampling_params = SamplingParams(
288
+ temperature=0.0,
289
  max_tokens=max_tokens,
290
+ skip_special_tokens=False,
291
+ extra_args=dict(
292
+ ngram_size=30,
293
+ window_size=90,
294
+ whitelist_token_ids={128821, 128822},
295
+ ),
296
  )
297
 
298
  logger.info(f"Processing {len(dataset)} images in batches of {batch_size}")
 
 
 
299
 
300
+ # Process images in batches using llm.generate() with PIL images
301
  all_markdown = []
302
 
303
  for batch_indices in tqdm(
 
309
  batch_images = [dataset[i][image_column] for i in batch_indices]
310
 
311
  try:
312
+ # Build model inputs with PIL images (official vLLM pattern)
313
+ model_inputs = []
314
+ for img in batch_images:
315
+ pil_img = to_pil(img).convert("RGB")
316
+ model_inputs.append(
317
+ {"prompt": final_prompt, "multi_modal_data": {"image": pil_img}}
318
+ )
319
 
320
+ # Process with vLLM generate API
321
+ outputs = llm.generate(model_inputs, sampling_params)
322
 
323
  # Extract outputs
324
  for output in outputs:
 
327
 
328
  except Exception as e:
329
  logger.error(f"Error processing batch: {e}")
 
330
  all_markdown.extend(["[OCR FAILED]"] * len(batch_images))
331
 
332
  # Calculate processing time
 
359
  "column_name": "markdown",
360
  "model_id": model,
361
  "processing_date": datetime.now().isoformat(),
 
 
 
 
362
  "prompt": final_prompt,
363
  "prompt_mode": prompt_mode if prompt is None else "custom",
364
  "batch_size": batch_size,
 
368
  "script": "deepseek-ocr2-vllm.py",
369
  "script_version": "1.0.0",
370
  "script_url": "https://huggingface.co/datasets/uv-scripts/ocr/raw/main/deepseek-ocr2-vllm.py",
371
+ "implementation": "vllm (batch processing, llm.generate + NGramPerReqLogitsProcessor)",
372
  }
373
  existing_info.append(new_info)
374
 
 
391
  max_model_len=max_model_len,
392
  max_tokens=max_tokens,
393
  gpu_memory_utilization=gpu_memory_utilization,
 
 
 
 
394
  image_column=image_column,
395
  split=split,
396
  )
397
 
398
  card = DatasetCard(card_content)
399
  card.push_to_hub(output_dataset, token=HF_TOKEN)
400
+ logger.info("Dataset card created and pushed!")
401
 
402
+ logger.info("OCR conversion complete!")
403
  logger.info(
404
  f"Dataset available at: https://huggingface.co/datasets/{output_dataset}"
405
  )
 
417
  print("with vLLM for efficient batch processing.")
418
  print("\nFeatures:")
419
  print("- Visual Causal Flow architecture for enhanced encoding")
 
420
  print("- LaTeX equation recognition")
421
  print("- Table extraction and formatting")
422
  print("- Document structure preservation")
423
  print("- Image grounding and spatial layout")
424
  print("- Multilingual support")
425
+ print("- Fast batch processing with vLLM")
426
  print("\nExample usage:")
427
+ print("\n1. Basic OCR conversion (document mode with grounding):")
428
  print(" uv run deepseek-ocr2-vllm.py document-images markdown-docs")
429
+ print("\n2. Parse figures from documents:")
430
  print(
431
+ " uv run deepseek-ocr2-vllm.py scientific-papers figures --prompt-mode figure"
432
  )
433
+ print("\n3. Free OCR without layout:")
 
 
 
 
434
  print(" uv run deepseek-ocr2-vllm.py images text --prompt-mode free")
435
+ print("\n4. Process a subset for testing:")
436
  print(
437
  " uv run deepseek-ocr2-vllm.py large-dataset test-output --max-samples 10"
438
  )
439
+ print("\n5. Running on HF Jobs:")
 
 
 
440
  print(" hf jobs uv run --flavor l4x1 \\")
441
  print(" -s HF_TOKEN \\")
 
442
  print(
443
  " https://huggingface.co/datasets/uv-scripts/ocr/raw/main/deepseek-ocr2-vllm.py \\"
444
  )
 
452
  description="OCR images to markdown using DeepSeek-OCR-2 (vLLM)",
453
  formatter_class=argparse.RawDescriptionHelpFormatter,
454
  epilog="""
 
 
 
 
 
 
 
455
  Prompt Modes:
456
  document Convert document to markdown with grounding (default)
457
  image OCR any image with grounding
 
460
  describe Generate detailed image descriptions
461
 
462
  Examples:
463
+ # Basic usage
464
  uv run deepseek-ocr2-vllm.py my-images-dataset ocr-results
465
 
 
 
 
 
 
 
466
  # Parse figures from a document dataset
467
  uv run deepseek-ocr2-vllm.py scientific-papers figures --prompt-mode figure
468
 
469
+ # Free OCR without layout
470
  uv run deepseek-ocr2-vllm.py images text --prompt-mode free
471
 
472
  # Custom prompt for specific task
473
+ uv run deepseek-ocr2-vllm.py dataset output --prompt "<image>\\nExtract all table data."
 
 
 
474
 
475
  # With custom batch size for performance tuning
476
  uv run deepseek-ocr2-vllm.py dataset output --batch-size 16 --max-model-len 16384
477
+
478
+ # Running on HF Jobs
479
+ hf jobs uv run --flavor l4x1 -s HF_TOKEN \\
480
+ https://huggingface.co/datasets/uv-scripts/ocr/raw/main/deepseek-ocr2-vllm.py \\
481
+ my-dataset my-output --max-samples 10
482
  """,
483
  )
484
 
 
500
  default="deepseek-ai/DeepSeek-OCR-2",
501
  help="Model to use (default: deepseek-ai/DeepSeek-OCR-2)",
502
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
503
  parser.add_argument(
504
  "--max-model-len",
505
  type=int,
 
560
  image_column=args.image_column,
561
  batch_size=args.batch_size,
562
  model=args.model,
 
 
 
 
563
  max_model_len=args.max_model_len,
564
  max_tokens=args.max_tokens,
565
  gpu_memory_utilization=args.gpu_memory_utilization,