davanstrien HF Staff commited on
Commit
ee2a300
·
1 Parent(s): c7d345d

Add DeepSeek-OCR-2 script with Visual Causal Flow architecture (testing)

Browse files
Files changed (1) hide show
  1. deepseek-ocr2-vllm.py +715 -0
deepseek-ocr2-vllm.py ADDED
@@ -0,0 +1,715 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # requires-python = ">=3.11"
3
+ # dependencies = [
4
+ # "datasets",
5
+ # "huggingface-hub[hf_transfer]",
6
+ # "pillow",
7
+ # "vllm",
8
+ # "tqdm",
9
+ # "toolz",
10
+ # "torch",
11
+ # ]
12
+ #
13
+ # [[tool.uv.index]]
14
+ # url = "https://wheels.vllm.ai/nightly"
15
+ #
16
+ # [tool.uv]
17
+ # prerelease = "allow"
18
+ # ///
19
+
20
+ """
21
+ Convert document images to markdown using DeepSeek-OCR-2 with vLLM.
22
+
23
+ This script processes images through the DeepSeek-OCR-2 model (3B parameters
24
+ with Visual Causal Flow architecture) to extract text and structure as markdown,
25
+ using vLLM for efficient batch processing.
26
+
27
+ Tested with vLLM 0.8.5+, PyTorch 2.6.0, Transformers 4.46.3+
28
+
29
+ NOTE: Uses vLLM nightly wheels. First run may take a few minutes to download
30
+ and install dependencies.
31
+
32
+ Features:
33
+ - Visual Causal Flow architecture for enhanced visual encoding
34
+ - Multiple resolution modes (Tiny/Small/Base/Large/Gundam)
35
+ - LaTeX equation recognition
36
+ - Table extraction and formatting
37
+ - Document structure preservation
38
+ - Image grounding and descriptions
39
+ - Multilingual support
40
+ - Batch processing with vLLM for better performance
41
+ """
42
+
43
+ import argparse
44
+ import base64
45
+ import io
46
+ import json
47
+ import logging
48
+ import os
49
+ import sys
50
+ from typing import Any, Dict, List, Union
51
+ from datetime import datetime
52
+
53
+ import torch
54
+ from datasets import load_dataset
55
+ from huggingface_hub import DatasetCard, login
56
+ from PIL import Image
57
+ from toolz import partition_all
58
+ from tqdm.auto import tqdm
59
+ from vllm import LLM, SamplingParams
60
+
61
+ logging.basicConfig(level=logging.INFO)
62
+ logger = logging.getLogger(__name__)
63
+
64
+ # Resolution mode presets - v2 optimized for 768×768 tiles
65
+ RESOLUTION_MODES = {
66
+ "tiny": {"base_size": 512, "image_size": 512, "crop_mode": False},
67
+ "small": {"base_size": 640, "image_size": 640, "crop_mode": False},
68
+ "base": {"base_size": 1024, "image_size": 768, "crop_mode": False}, # v2 optimized
69
+ "large": {"base_size": 1280, "image_size": 1024, "crop_mode": False},
70
+ "gundam": {
71
+ "base_size": 1024,
72
+ "image_size": 768, # v2 optimized
73
+ "crop_mode": True,
74
+ }, # Dynamic resolution
75
+ }
76
+
77
+ # Prompt mode presets (compatible with both v1 and v2)
78
+ PROMPT_MODES = {
79
+ "document": "<image>\n<|grounding|>Convert the document to markdown.",
80
+ "image": "<image>\n<|grounding|>OCR this image.",
81
+ "free": "<image>\nFree OCR.",
82
+ "figure": "<image>\nParse the figure.",
83
+ "describe": "<image>\nDescribe this image in detail.",
84
+ }
85
+
86
+
87
+ def check_cuda_availability():
88
+ """Check if CUDA is available and exit if not."""
89
+ if not torch.cuda.is_available():
90
+ logger.error("CUDA is not available. This script requires a GPU.")
91
+ logger.error("Please run on a machine with a CUDA-capable GPU.")
92
+ sys.exit(1)
93
+ else:
94
+ logger.info(f"CUDA is available. GPU: {torch.cuda.get_device_name(0)}")
95
+
96
+
97
+ def make_ocr_message(
98
+ image: Union[Image.Image, Dict[str, Any], str],
99
+ prompt: str = "<image>\n<|grounding|>Convert the document to markdown. ",
100
+ ) -> List[Dict]:
101
+ """Create chat message for OCR processing."""
102
+ # Convert to PIL Image if needed
103
+ if isinstance(image, Image.Image):
104
+ pil_img = image
105
+ elif isinstance(image, dict) and "bytes" in image:
106
+ pil_img = Image.open(io.BytesIO(image["bytes"]))
107
+ elif isinstance(image, str):
108
+ pil_img = Image.open(image)
109
+ else:
110
+ raise ValueError(f"Unsupported image type: {type(image)}")
111
+
112
+ # Convert to RGB
113
+ pil_img = pil_img.convert("RGB")
114
+
115
+ # Convert to base64 data URI
116
+ buf = io.BytesIO()
117
+ pil_img.save(buf, format="PNG")
118
+ data_uri = f"data:image/png;base64,{base64.b64encode(buf.getvalue()).decode()}"
119
+
120
+ # Return message in vLLM format
121
+ return [
122
+ {
123
+ "role": "user",
124
+ "content": [
125
+ {"type": "image_url", "image_url": {"url": data_uri}},
126
+ {"type": "text", "text": prompt},
127
+ ],
128
+ }
129
+ ]
130
+
131
+
132
+ def create_dataset_card(
133
+ source_dataset: str,
134
+ model: str,
135
+ num_samples: int,
136
+ processing_time: str,
137
+ batch_size: int,
138
+ max_model_len: int,
139
+ max_tokens: int,
140
+ gpu_memory_utilization: float,
141
+ resolution_mode: str,
142
+ base_size: int,
143
+ image_size: int,
144
+ crop_mode: bool,
145
+ image_column: str = "image",
146
+ split: str = "train",
147
+ ) -> str:
148
+ """Create a dataset card documenting the OCR process."""
149
+ model_name = model.split("/")[-1]
150
+
151
+ return f"""---
152
+ tags:
153
+ - ocr
154
+ - document-processing
155
+ - deepseek
156
+ - deepseek-ocr-2
157
+ - markdown
158
+ - uv-script
159
+ - generated
160
+ ---
161
+
162
+ # Document OCR using {model_name}
163
+
164
+ This dataset contains markdown-formatted OCR results from images in [{source_dataset}](https://huggingface.co/datasets/{source_dataset}) using DeepSeek-OCR-2.
165
+
166
+ ## Processing Details
167
+
168
+ - **Source Dataset**: [{source_dataset}](https://huggingface.co/datasets/{source_dataset})
169
+ - **Model**: [{model}](https://huggingface.co/{model})
170
+ - **Number of Samples**: {num_samples:,}
171
+ - **Processing Time**: {processing_time}
172
+ - **Processing Date**: {datetime.now().strftime("%Y-%m-%d %H:%M UTC")}
173
+
174
+ ### Configuration
175
+
176
+ - **Image Column**: `{image_column}`
177
+ - **Output Column**: `markdown`
178
+ - **Dataset Split**: `{split}`
179
+ - **Batch Size**: {batch_size}
180
+ - **Resolution Mode**: {resolution_mode}
181
+ - **Base Size**: {base_size}
182
+ - **Image Size**: {image_size}
183
+ - **Crop Mode**: {crop_mode}
184
+ - **Max Model Length**: {max_model_len:,} tokens
185
+ - **Max Output Tokens**: {max_tokens:,}
186
+ - **GPU Memory Utilization**: {gpu_memory_utilization:.1%}
187
+
188
+ ## Model Information
189
+
190
+ DeepSeek-OCR-2 is a 3B parameter vision-language model featuring Visual Causal
191
+ Flow architecture for more human-like visual encoding. Building on DeepSeek-OCR
192
+ v1, it offers enhanced document understanding with dynamic resolution up to
193
+ (0-6)×768×768 + 1×1024×1024 patches.
194
+
195
+ ### Key Improvements (v2)
196
+ - 🧠 **Visual Causal Flow** - More human-like visual encoding architecture
197
+ - 🎯 **Enhanced multi-patch** - Better processing of 768×768 tiles
198
+ - 📝 **Improved structure** - Superior document hierarchy preservation
199
+ - 📐 **Better layouts** - Enhanced handling of complex multi-column documents
200
+
201
+ ### Capabilities
202
+ - 📐 **LaTeX equations** - Mathematical formulas preserved in LaTeX format
203
+ - 📊 **Tables** - Extracted and formatted as HTML/markdown
204
+ - 📝 **Document structure** - Headers, lists, and formatting maintained
205
+ - 🖼️ **Image grounding** - Spatial layout and bounding box information
206
+ - 🔍 **Complex layouts** - Multi-column and hierarchical structures
207
+ - 🌍 **Multilingual** - Supports multiple languages
208
+
209
+ ### Resolution Modes
210
+
211
+ - **Tiny** (512×512): Fast processing, 64 vision tokens
212
+ - **Small** (640×640): Balanced speed/quality, 100 vision tokens
213
+ - **Base** (1024×768): High quality, ~192 vision tokens ⭐ v2 optimized
214
+ - **Large** (1280×1024): Maximum quality, ~328 vision tokens
215
+ - **Gundam** (dynamic): Adaptive multi-tile with 768×768 patches (default)
216
+
217
+ ## Dataset Structure
218
+
219
+ The dataset contains all original columns plus:
220
+ - `markdown`: The extracted text in markdown format with preserved structure
221
+ - `inference_info`: JSON list tracking all OCR models applied to this dataset
222
+
223
+ ## Usage
224
+
225
+ ```python
226
+ from datasets import load_dataset
227
+ import json
228
+
229
+ # Load the dataset
230
+ dataset = load_dataset("{{{{output_dataset_id}}}}", split="{split}")
231
+
232
+ # Access the markdown text
233
+ for example in dataset:
234
+ print(example["markdown"])
235
+ break
236
+
237
+ # View all OCR models applied to this dataset
238
+ inference_info = json.loads(dataset[0]["inference_info"])
239
+ for info in inference_info:
240
+ print(f"Column: {{{{info['column_name']}}}} - Model: {{{{info['model_id']}}}}")
241
+ ```
242
+
243
+ ## Reproduction
244
+
245
+ This dataset was generated using the [uv-scripts/ocr](https://huggingface.co/datasets/uv-scripts/ocr) DeepSeek-OCR-2 vLLM script:
246
+
247
+ ```bash
248
+ uv run https://huggingface.co/datasets/uv-scripts/ocr/raw/main/deepseek-ocr2-vllm.py \\\\
249
+ {source_dataset} \\\\
250
+ <output-dataset> \\\\
251
+ --resolution-mode {resolution_mode} \\\\
252
+ --image-column {image_column}
253
+ ```
254
+
255
+ ## Performance
256
+
257
+ - **Processing Speed**: ~{num_samples / (float(processing_time.split()[0]) * 60):.1f} images/second
258
+ - **Processing Method**: Batch processing with vLLM (2-3x speedup over sequential)
259
+
260
+ Generated with 🤖 [UV Scripts](https://huggingface.co/uv-scripts)
261
+ """
262
+
263
+
264
+ def main(
265
+ input_dataset: str,
266
+ output_dataset: str,
267
+ image_column: str = "image",
268
+ batch_size: int = 8, # Smaller batch size to avoid potential memory issues
269
+ model: str = "deepseek-ai/DeepSeek-OCR-2",
270
+ resolution_mode: str = "gundam",
271
+ base_size: int = None,
272
+ image_size: int = None,
273
+ crop_mode: bool = None,
274
+ max_model_len: int = 8192,
275
+ max_tokens: int = 8192,
276
+ gpu_memory_utilization: float = 0.8,
277
+ prompt_mode: str = "document",
278
+ prompt: str = None,
279
+ hf_token: str = None,
280
+ split: str = "train",
281
+ max_samples: int = None,
282
+ private: bool = False,
283
+ shuffle: bool = False,
284
+ seed: int = 42,
285
+ ):
286
+ """Process images from HF dataset through DeepSeek-OCR-2 model with vLLM."""
287
+
288
+ # Check CUDA availability first
289
+ check_cuda_availability()
290
+
291
+ # Track processing start time
292
+ start_time = datetime.now()
293
+
294
+ # Enable HF_TRANSFER for faster downloads
295
+ os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
296
+
297
+ # Login to HF if token provided
298
+ HF_TOKEN = hf_token or os.environ.get("HF_TOKEN")
299
+ if HF_TOKEN:
300
+ login(token=HF_TOKEN)
301
+
302
+ # Determine resolution settings
303
+ if resolution_mode in RESOLUTION_MODES:
304
+ mode_config = RESOLUTION_MODES[resolution_mode]
305
+ final_base_size = (
306
+ base_size if base_size is not None else mode_config["base_size"]
307
+ )
308
+ final_image_size = (
309
+ image_size if image_size is not None else mode_config["image_size"]
310
+ )
311
+ final_crop_mode = (
312
+ crop_mode if crop_mode is not None else mode_config["crop_mode"]
313
+ )
314
+ logger.info(f"Using resolution mode: {resolution_mode}")
315
+ else:
316
+ # Custom mode - require all parameters
317
+ if base_size is None or image_size is None or crop_mode is None:
318
+ raise ValueError(
319
+ f"Invalid resolution mode '{resolution_mode}'. "
320
+ f"Use one of {list(RESOLUTION_MODES.keys())} or specify "
321
+ f"--base-size, --image-size, and --crop-mode manually."
322
+ )
323
+ final_base_size = base_size
324
+ final_image_size = image_size
325
+ final_crop_mode = crop_mode
326
+ resolution_mode = "custom"
327
+
328
+ logger.info(
329
+ f"Resolution: base_size={final_base_size}, "
330
+ f"image_size={final_image_size}, crop_mode={final_crop_mode}"
331
+ )
332
+
333
+ # Determine prompt
334
+ if prompt is not None:
335
+ final_prompt = prompt
336
+ logger.info(f"Using custom prompt")
337
+ elif prompt_mode in PROMPT_MODES:
338
+ final_prompt = PROMPT_MODES[prompt_mode]
339
+ logger.info(f"Using prompt mode: {prompt_mode}")
340
+ else:
341
+ raise ValueError(
342
+ f"Invalid prompt mode '{prompt_mode}'. "
343
+ f"Use one of {list(PROMPT_MODES.keys())} or specify --prompt"
344
+ )
345
+
346
+ logger.info(f"Prompt: {final_prompt}")
347
+
348
+ # Load dataset
349
+ logger.info(f"Loading dataset: {input_dataset}")
350
+ dataset = load_dataset(input_dataset, split=split)
351
+
352
+ # Validate image column
353
+ if image_column not in dataset.column_names:
354
+ raise ValueError(
355
+ f"Column '{image_column}' not found. Available: {dataset.column_names}"
356
+ )
357
+
358
+ # Shuffle if requested
359
+ if shuffle:
360
+ logger.info(f"Shuffling dataset with seed {seed}")
361
+ dataset = dataset.shuffle(seed=seed)
362
+
363
+ # Limit samples if requested
364
+ if max_samples:
365
+ dataset = dataset.select(range(min(max_samples, len(dataset))))
366
+ logger.info(f"Limited to {len(dataset)} samples")
367
+
368
+ # Initialize vLLM
369
+ logger.info(f"Initializing vLLM with model: {model}")
370
+ logger.info("This may take a few minutes on first run...")
371
+
372
+ # Add specific parameters for DeepSeek-OCR-2 compatibility
373
+ llm = LLM(
374
+ model=model,
375
+ trust_remote_code=True,
376
+ max_model_len=max_model_len,
377
+ gpu_memory_utilization=gpu_memory_utilization,
378
+ limit_mm_per_prompt={"image": 1},
379
+ enforce_eager=False, # Use torch.compile instead of eager execution
380
+ )
381
+
382
+ sampling_params = SamplingParams(
383
+ temperature=0.0, # Deterministic for OCR
384
+ max_tokens=max_tokens,
385
+ )
386
+
387
+ logger.info(f"Processing {len(dataset)} images in batches of {batch_size}")
388
+ logger.info(
389
+ "Using vLLM for batch processing - should be faster than sequential processing"
390
+ )
391
+
392
+ # Process images in batches
393
+ all_markdown = []
394
+
395
+ for batch_indices in tqdm(
396
+ partition_all(batch_size, range(len(dataset))),
397
+ total=(len(dataset) + batch_size - 1) // batch_size,
398
+ desc="DeepSeek-OCR-2 vLLM processing",
399
+ ):
400
+ batch_indices = list(batch_indices)
401
+ batch_images = [dataset[i][image_column] for i in batch_indices]
402
+
403
+ try:
404
+ # Create messages for batch
405
+ batch_messages = [make_ocr_message(img, final_prompt) for img in batch_images]
406
+
407
+ # Process with vLLM
408
+ outputs = llm.chat(batch_messages, sampling_params)
409
+
410
+ # Extract outputs
411
+ for output in outputs:
412
+ text = output.outputs[0].text.strip()
413
+ all_markdown.append(text)
414
+
415
+ except Exception as e:
416
+ logger.error(f"Error processing batch: {e}")
417
+ # Add error placeholders for failed batch
418
+ all_markdown.extend(["[OCR FAILED]"] * len(batch_images))
419
+
420
+ # Calculate processing time
421
+ processing_duration = datetime.now() - start_time
422
+ processing_time_str = f"{processing_duration.total_seconds() / 60:.1f} min"
423
+
424
+ # Add markdown column to dataset
425
+ logger.info("Adding markdown column to dataset")
426
+ dataset = dataset.add_column("markdown", all_markdown)
427
+
428
+ # Handle inference_info tracking
429
+ logger.info("Updating inference_info...")
430
+
431
+ # Check for existing inference_info
432
+ if "inference_info" in dataset.column_names:
433
+ # Parse existing info from first row (all rows have same info)
434
+ try:
435
+ existing_info = json.loads(dataset[0]["inference_info"])
436
+ if not isinstance(existing_info, list):
437
+ existing_info = [existing_info] # Convert old format to list
438
+ except (json.JSONDecodeError, TypeError):
439
+ existing_info = []
440
+ # Remove old column to update it
441
+ dataset = dataset.remove_columns(["inference_info"])
442
+ else:
443
+ existing_info = []
444
+
445
+ # Add new inference info
446
+ new_info = {
447
+ "column_name": "markdown",
448
+ "model_id": model,
449
+ "processing_date": datetime.now().isoformat(),
450
+ "resolution_mode": resolution_mode,
451
+ "base_size": final_base_size,
452
+ "image_size": final_image_size,
453
+ "crop_mode": final_crop_mode,
454
+ "prompt": final_prompt,
455
+ "prompt_mode": prompt_mode if prompt is None else "custom",
456
+ "batch_size": batch_size,
457
+ "max_tokens": max_tokens,
458
+ "gpu_memory_utilization": gpu_memory_utilization,
459
+ "max_model_len": max_model_len,
460
+ "script": "deepseek-ocr2-vllm.py",
461
+ "script_version": "1.0.0",
462
+ "script_url": "https://huggingface.co/datasets/uv-scripts/ocr/raw/main/deepseek-ocr2-vllm.py",
463
+ "implementation": "vllm (batch processing)",
464
+ }
465
+ existing_info.append(new_info)
466
+
467
+ # Add updated inference_info column
468
+ info_json = json.dumps(existing_info, ensure_ascii=False)
469
+ dataset = dataset.add_column("inference_info", [info_json] * len(dataset))
470
+
471
+ # Push to hub
472
+ logger.info(f"Pushing to {output_dataset}")
473
+ dataset.push_to_hub(output_dataset, private=private, token=HF_TOKEN)
474
+
475
+ # Create and push dataset card
476
+ logger.info("Creating dataset card...")
477
+ card_content = create_dataset_card(
478
+ source_dataset=input_dataset,
479
+ model=model,
480
+ num_samples=len(dataset),
481
+ processing_time=processing_time_str,
482
+ batch_size=batch_size,
483
+ max_model_len=max_model_len,
484
+ max_tokens=max_tokens,
485
+ gpu_memory_utilization=gpu_memory_utilization,
486
+ resolution_mode=resolution_mode,
487
+ base_size=final_base_size,
488
+ image_size=final_image_size,
489
+ crop_mode=final_crop_mode,
490
+ image_column=image_column,
491
+ split=split,
492
+ )
493
+
494
+ card = DatasetCard(card_content)
495
+ card.push_to_hub(output_dataset, token=HF_TOKEN)
496
+ logger.info("✅ Dataset card created and pushed!")
497
+
498
+ logger.info("✅ OCR conversion complete!")
499
+ logger.info(
500
+ f"Dataset available at: https://huggingface.co/datasets/{output_dataset}"
501
+ )
502
+ logger.info(f"Processing time: {processing_time_str}")
503
+
504
+
505
+ if __name__ == "__main__":
506
+ # Show example usage if no arguments
507
+ if len(sys.argv) == 1:
508
+ print("=" * 80)
509
+ print("DeepSeek-OCR-2 to Markdown Converter (vLLM)")
510
+ print("=" * 80)
511
+ print("\nThis script converts document images to markdown using")
512
+ print("DeepSeek-OCR-2 (3B parameters with Visual Causal Flow)")
513
+ print("with vLLM for efficient batch processing.")
514
+ print("\nFeatures:")
515
+ print("- Visual Causal Flow architecture for enhanced encoding")
516
+ print("- Multiple resolution modes (Tiny/Small/Base/Large/Gundam)")
517
+ print("- LaTeX equation recognition")
518
+ print("- Table extraction and formatting")
519
+ print("- Document structure preservation")
520
+ print("- Image grounding and spatial layout")
521
+ print("- Multilingual support")
522
+ print("- ⚡ Fast batch processing with vLLM (2-3x speedup)")
523
+ print("\nExample usage:")
524
+ print("\n1. Basic OCR conversion (Gundam mode - dynamic resolution):")
525
+ print(" uv run deepseek-ocr2-vllm.py document-images markdown-docs")
526
+ print("\n2. High quality mode (Large - 1280×1024):")
527
+ print(
528
+ " uv run deepseek-ocr2-vllm.py scanned-pdfs extracted-text --resolution-mode large"
529
+ )
530
+ print("\n3. Fast processing (Tiny - 512×512):")
531
+ print(" uv run deepseek-ocr2-vllm.py quick-test output --resolution-mode tiny")
532
+ print("\n4. Parse figures from documents:")
533
+ print(" uv run deepseek-ocr2-vllm.py scientific-papers figures --prompt-mode figure")
534
+ print("\n5. Free OCR without layout:")
535
+ print(" uv run deepseek-ocr2-vllm.py images text --prompt-mode free")
536
+ print("\n6. Process a subset for testing:")
537
+ print(
538
+ " uv run deepseek-ocr2-vllm.py large-dataset test-output --max-samples 10"
539
+ )
540
+ print("\n7. Custom resolution:")
541
+ print(" uv run deepseek-ocr2-vllm.py dataset output \\")
542
+ print(" --base-size 1024 --image-size 768 --crop-mode")
543
+ print("\n8. Running on HF Jobs:")
544
+ print(" hf jobs uv run --flavor l4x1 \\")
545
+ print(" -s HF_TOKEN \\")
546
+ print(" -e UV_TORCH_BACKEND=auto \\")
547
+ print(
548
+ " https://huggingface.co/datasets/uv-scripts/ocr/raw/main/deepseek-ocr2-vllm.py \\"
549
+ )
550
+ print(" your-document-dataset \\")
551
+ print(" your-markdown-output")
552
+ print("\n" + "=" * 80)
553
+ print("\nFor full help, run: uv run deepseek-ocr2-vllm.py --help")
554
+ sys.exit(0)
555
+
556
+ parser = argparse.ArgumentParser(
557
+ description="OCR images to markdown using DeepSeek-OCR-2 (vLLM)",
558
+ formatter_class=argparse.RawDescriptionHelpFormatter,
559
+ epilog="""
560
+ Resolution Modes:
561
+ tiny 512×512 pixels, fast processing (64 vision tokens)
562
+ small 640×640 pixels, balanced (100 vision tokens)
563
+ base 1024×768 pixels, high quality (~192 vision tokens) ⭐ v2 optimized
564
+ large 1280×1024 pixels, maximum quality (~328 vision tokens)
565
+ gundam Dynamic multi-tile with 768×768 patches (adaptive)
566
+
567
+ Prompt Modes:
568
+ document Convert document to markdown with grounding (default)
569
+ image OCR any image with grounding
570
+ free Free OCR without layout preservation
571
+ figure Parse figures from documents
572
+ describe Generate detailed image descriptions
573
+
574
+ Examples:
575
+ # Basic usage with default Gundam mode
576
+ uv run deepseek-ocr2-vllm.py my-images-dataset ocr-results
577
+
578
+ # High quality processing
579
+ uv run deepseek-ocr2-vllm.py documents extracted-text --resolution-mode large
580
+
581
+ # Fast processing for testing
582
+ uv run deepseek-ocr2-vllm.py dataset output --resolution-mode tiny --max-samples 100
583
+
584
+ # Parse figures from a document dataset
585
+ uv run deepseek-ocr2-vllm.py scientific-papers figures --prompt-mode figure
586
+
587
+ # Free OCR without layout (fastest)
588
+ uv run deepseek-ocr2-vllm.py images text --prompt-mode free
589
+
590
+ # Custom prompt for specific task
591
+ uv run deepseek-ocr2-vllm.py dataset output --prompt "<image>\nExtract all table data."
592
+
593
+ # Custom resolution settings
594
+ uv run deepseek-ocr2-vllm.py dataset output --base-size 1024 --image-size 768 --crop-mode
595
+
596
+ # With custom batch size for performance tuning
597
+ uv run deepseek-ocr2-vllm.py dataset output --batch-size 16 --max-model-len 16384
598
+ """,
599
+ )
600
+
601
+ parser.add_argument("input_dataset", help="Input dataset ID from Hugging Face Hub")
602
+ parser.add_argument("output_dataset", help="Output dataset ID for Hugging Face Hub")
603
+ parser.add_argument(
604
+ "--image-column",
605
+ default="image",
606
+ help="Column containing images (default: image)",
607
+ )
608
+ parser.add_argument(
609
+ "--batch-size",
610
+ type=int,
611
+ default=8,
612
+ help="Batch size for processing (default: 8, adjust based on GPU memory)",
613
+ )
614
+ parser.add_argument(
615
+ "--model",
616
+ default="deepseek-ai/DeepSeek-OCR-2",
617
+ help="Model to use (default: deepseek-ai/DeepSeek-OCR-2)",
618
+ )
619
+ parser.add_argument(
620
+ "--resolution-mode",
621
+ default="gundam",
622
+ choices=list(RESOLUTION_MODES.keys()) + ["custom"],
623
+ help="Resolution mode preset (default: gundam)",
624
+ )
625
+ parser.add_argument(
626
+ "--base-size",
627
+ type=int,
628
+ help="Base resolution size (overrides resolution-mode)",
629
+ )
630
+ parser.add_argument(
631
+ "--image-size",
632
+ type=int,
633
+ help="Image tile size (overrides resolution-mode)",
634
+ )
635
+ parser.add_argument(
636
+ "--crop-mode",
637
+ action="store_true",
638
+ help="Enable dynamic multi-tile cropping (overrides resolution-mode)",
639
+ )
640
+ parser.add_argument(
641
+ "--max-model-len",
642
+ type=int,
643
+ default=8192,
644
+ help="Maximum model context length (default: 8192)",
645
+ )
646
+ parser.add_argument(
647
+ "--max-tokens",
648
+ type=int,
649
+ default=8192,
650
+ help="Maximum tokens to generate (default: 8192)",
651
+ )
652
+ parser.add_argument(
653
+ "--gpu-memory-utilization",
654
+ type=float,
655
+ default=0.8,
656
+ help="GPU memory utilization (default: 0.8)",
657
+ )
658
+ parser.add_argument(
659
+ "--prompt-mode",
660
+ default="document",
661
+ choices=list(PROMPT_MODES.keys()),
662
+ help="Prompt mode preset (default: document). Use --prompt for custom prompts.",
663
+ )
664
+ parser.add_argument(
665
+ "--prompt",
666
+ help="Custom OCR prompt (overrides --prompt-mode)",
667
+ )
668
+ parser.add_argument("--hf-token", help="Hugging Face API token")
669
+ parser.add_argument(
670
+ "--split", default="train", help="Dataset split to use (default: train)"
671
+ )
672
+ parser.add_argument(
673
+ "--max-samples",
674
+ type=int,
675
+ help="Maximum number of samples to process (for testing)",
676
+ )
677
+ parser.add_argument(
678
+ "--private", action="store_true", help="Make output dataset private"
679
+ )
680
+ parser.add_argument(
681
+ "--shuffle",
682
+ action="store_true",
683
+ help="Shuffle the dataset before processing (useful for random sampling)",
684
+ )
685
+ parser.add_argument(
686
+ "--seed",
687
+ type=int,
688
+ default=42,
689
+ help="Random seed for shuffling (default: 42)",
690
+ )
691
+
692
+ args = parser.parse_args()
693
+
694
+ main(
695
+ input_dataset=args.input_dataset,
696
+ output_dataset=args.output_dataset,
697
+ image_column=args.image_column,
698
+ batch_size=args.batch_size,
699
+ model=args.model,
700
+ resolution_mode=args.resolution_mode,
701
+ base_size=args.base_size,
702
+ image_size=args.image_size,
703
+ crop_mode=args.crop_mode if args.crop_mode else None,
704
+ max_model_len=args.max_model_len,
705
+ max_tokens=args.max_tokens,
706
+ gpu_memory_utilization=args.gpu_memory_utilization,
707
+ prompt_mode=args.prompt_mode,
708
+ prompt=args.prompt,
709
+ hf_token=args.hf_token,
710
+ split=args.split,
711
+ max_samples=args.max_samples,
712
+ private=args.private,
713
+ shuffle=args.shuffle,
714
+ seed=args.seed,
715
+ )