davanstrien HF Staff commited on
Commit
ee0f0a2
·
verified ·
1 Parent(s): ddec3fc

Upload paddleocr-vl-1.5.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. paddleocr-vl-1.5.py +729 -0
paddleocr-vl-1.5.py ADDED
@@ -0,0 +1,729 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # requires-python = ">=3.11"
3
+ # dependencies = [
4
+ # "datasets",
5
+ # "huggingface-hub",
6
+ # "pillow",
7
+ # "torch",
8
+ # "transformers>=4.45.0",
9
+ # "accelerate",
10
+ # "tqdm",
11
+ # "toolz",
12
+ # ]
13
+ # ///
14
+
15
+ """
16
+ Convert document images to text/tables/formulas using PaddleOCR-VL-1.5 with transformers.
17
+
18
+ PaddleOCR-VL-1.5 is a compact 0.9B OCR model that achieves 94.5% SOTA accuracy on
19
+ OmniDocBench v1.5. It supports multiple task modes including OCR, table recognition,
20
+ formula extraction, chart analysis, text spotting, and seal recognition.
21
+
22
+ NOTE: This script uses transformers batch inference (not vLLM) because PaddleOCR-VL
23
+ only supports vLLM in server mode, which doesn't fit the UV script pattern.
24
+ Transformers batch inference via apply_chat_template(padding=True) provides
25
+ efficient batching while keeping the single-command interface.
26
+
27
+ Features:
28
+ - 🎯 SOTA Performance: 94.5% on OmniDocBench v1.5
29
+ - 🧩 Ultra-compact: Only 0.9B parameters
30
+ - 📝 OCR mode: General text extraction to markdown
31
+ - 📊 Table mode: HTML table recognition
32
+ - 📐 Formula mode: LaTeX mathematical notation
33
+ - 📈 Chart mode: Structured chart analysis
34
+ - 🔍 Spotting mode: Text spotting with localization
35
+ - 🔖 Seal mode: Seal/stamp recognition
36
+ - 🌍 Multilingual support
37
+ - ⚡ Fast inference with batch processing
38
+
39
+ Model: PaddlePaddle/PaddleOCR-VL-1.5
40
+ Backend: Transformers (batch inference)
41
+ Performance: 94.5% SOTA on OmniDocBench v1.5
42
+ """
43
+
44
+ import argparse
45
+ import io
46
+ import json
47
+ import logging
48
+ import math
49
+ import os
50
+ import sys
51
+ from datetime import datetime
52
+ from typing import Any, Dict, List, Union
53
+
54
+ import torch
55
+ from datasets import load_dataset
56
+ from huggingface_hub import DatasetCard, login
57
+ from PIL import Image
58
+ from toolz import partition_all
59
+ from tqdm.auto import tqdm
60
+
61
+ logging.basicConfig(level=logging.INFO)
62
+ logger = logging.getLogger(__name__)
63
+
64
+
65
+ # Model configuration
66
+ MODEL_ID = "PaddlePaddle/PaddleOCR-VL-1.5"
67
+
68
+ # Task mode configurations from official PaddleOCR-VL-1.5 documentation
69
+ TASK_MODES = {
70
+ "ocr": "OCR:",
71
+ "table": "Table Recognition:",
72
+ "formula": "Formula Recognition:",
73
+ "chart": "Chart Recognition:",
74
+ "spotting": "Spotting:",
75
+ "seal": "Seal Recognition:",
76
+ }
77
+
78
+ # Task descriptions for dataset card
79
+ TASK_DESCRIPTIONS = {
80
+ "ocr": "General text extraction to markdown format",
81
+ "table": "Table extraction to HTML format",
82
+ "formula": "Mathematical formula recognition to LaTeX",
83
+ "chart": "Chart and diagram analysis",
84
+ "spotting": "Text spotting with localization",
85
+ "seal": "Seal and stamp recognition",
86
+ }
87
+
88
+ # Max pixels configuration - spotting uses higher resolution
89
+ MAX_PIXELS = {
90
+ "spotting": 2048 * 28 * 28, # Higher resolution for spotting
91
+ "default": 1280 * 28 * 28,
92
+ }
93
+
94
+
95
+ def check_cuda_availability():
96
+ """Check if CUDA is available and exit if not."""
97
+ if not torch.cuda.is_available():
98
+ logger.error("CUDA is not available. This script requires a GPU.")
99
+ logger.error("Please run on a machine with a CUDA-capable GPU.")
100
+ sys.exit(1)
101
+ else:
102
+ logger.info(f"CUDA is available. GPU: {torch.cuda.get_device_name(0)}")
103
+
104
+
105
+ def smart_resize(
106
+ height: int,
107
+ width: int,
108
+ factor: int = 28,
109
+ min_pixels: int = 28 * 28 * 130,
110
+ max_pixels: int = 28 * 28 * 1280,
111
+ ) -> tuple[int, int]:
112
+ """
113
+ PaddleOCR-VL's intelligent resize logic.
114
+
115
+ Rescales the image so that:
116
+ 1. Both dimensions are divisible by 'factor' (28)
117
+ 2. Total pixels are within [min_pixels, max_pixels]
118
+ 3. Aspect ratio is maintained as closely as possible
119
+
120
+ Args:
121
+ height: Original image height
122
+ width: Original image width
123
+ factor: Dimension divisibility factor (default: 28)
124
+ min_pixels: Minimum total pixels (default: 100,880)
125
+ max_pixels: Maximum total pixels (default: 1,003,520)
126
+
127
+ Returns:
128
+ Tuple of (new_height, new_width)
129
+ """
130
+ if height < factor:
131
+ width = round((width * factor) / height)
132
+ height = factor
133
+
134
+ if width < factor:
135
+ height = round((height * factor) / width)
136
+ width = factor
137
+
138
+ if max(height, width) / min(height, width) > 200:
139
+ logger.warning(
140
+ f"Extreme aspect ratio detected: {max(height, width) / min(height, width):.1f}"
141
+ )
142
+
143
+ h_bar = round(height / factor) * factor
144
+ w_bar = round(width / factor) * factor
145
+
146
+ if h_bar * w_bar > max_pixels:
147
+ beta = math.sqrt((height * width) / max_pixels)
148
+ h_bar = math.floor(height / beta / factor) * factor
149
+ w_bar = math.floor(width / beta / factor) * factor
150
+ elif h_bar * w_bar < min_pixels:
151
+ beta = math.sqrt(min_pixels / (height * width))
152
+ h_bar = math.ceil(height * beta / factor) * factor
153
+ w_bar = math.ceil(width * beta / factor) * factor
154
+
155
+ return h_bar, w_bar
156
+
157
+
158
+ def prepare_image(
159
+ image: Union[Image.Image, Dict[str, Any], str],
160
+ task_mode: str = "ocr",
161
+ apply_smart_resize: bool = True,
162
+ ) -> Image.Image:
163
+ """
164
+ Prepare image for PaddleOCR-VL-1.5 processing.
165
+
166
+ Args:
167
+ image: PIL Image, dict with bytes, or file path
168
+ task_mode: Task mode (affects max_pixels for spotting)
169
+ apply_smart_resize: Whether to apply smart resize
170
+
171
+ Returns:
172
+ Processed PIL Image
173
+ """
174
+ # Convert to PIL Image if needed
175
+ if isinstance(image, Image.Image):
176
+ pil_img = image
177
+ elif isinstance(image, dict) and "bytes" in image:
178
+ pil_img = Image.open(io.BytesIO(image["bytes"]))
179
+ elif isinstance(image, str):
180
+ pil_img = Image.open(image)
181
+ else:
182
+ raise ValueError(f"Unsupported image type: {type(image)}")
183
+
184
+ # Convert to RGB
185
+ pil_img = pil_img.convert("RGB")
186
+
187
+ # Apply smart resize if requested
188
+ if apply_smart_resize:
189
+ max_pixels = MAX_PIXELS.get(task_mode, MAX_PIXELS["default"])
190
+ original_size = pil_img.size
191
+ new_height, new_width = smart_resize(
192
+ pil_img.height, pil_img.width, max_pixels=max_pixels
193
+ )
194
+ if (new_width, new_height) != (pil_img.width, pil_img.height):
195
+ pil_img = pil_img.resize((new_width, new_height), Image.Resampling.LANCZOS)
196
+ logger.debug(f"Resized image from {original_size} to {pil_img.size}")
197
+
198
+ return pil_img
199
+
200
+
201
+ def create_message(image: Image.Image, task_mode: str) -> List[Dict]:
202
+ """
203
+ Create chat message for PaddleOCR-VL-1.5 processing.
204
+
205
+ Args:
206
+ image: Prepared PIL Image
207
+ task_mode: Task mode (ocr, table, formula, chart, spotting, seal)
208
+
209
+ Returns:
210
+ Message in chat format
211
+ """
212
+ return [
213
+ {
214
+ "role": "user",
215
+ "content": [
216
+ {"type": "image", "image": image},
217
+ {"type": "text", "text": TASK_MODES[task_mode]},
218
+ ],
219
+ }
220
+ ]
221
+
222
+
223
+ def create_dataset_card(
224
+ source_dataset: str,
225
+ model: str,
226
+ task_mode: str,
227
+ num_samples: int,
228
+ processing_time: str,
229
+ batch_size: int,
230
+ max_tokens: int,
231
+ apply_smart_resize: bool,
232
+ image_column: str = "image",
233
+ split: str = "train",
234
+ ) -> str:
235
+ """Create a dataset card documenting the OCR process."""
236
+ task_description = TASK_DESCRIPTIONS[task_mode]
237
+
238
+ return f"""---
239
+ tags:
240
+ - ocr
241
+ - document-processing
242
+ - paddleocr-vl-1.5
243
+ - {task_mode}
244
+ - uv-script
245
+ - generated
246
+ ---
247
+
248
+ # Document Processing using PaddleOCR-VL-1.5 ({task_mode.upper()} mode)
249
+
250
+ This dataset contains {task_mode.upper()} results from images in [{source_dataset}](https://huggingface.co/datasets/{source_dataset}) using PaddleOCR-VL-1.5, an ultra-compact 0.9B SOTA OCR model.
251
+
252
+ ## Processing Details
253
+
254
+ - **Source Dataset**: [{source_dataset}](https://huggingface.co/datasets/{source_dataset})
255
+ - **Model**: [{model}](https://huggingface.co/{model})
256
+ - **Task Mode**: `{task_mode}` - {task_description}
257
+ - **Number of Samples**: {num_samples:,}
258
+ - **Processing Time**: {processing_time}
259
+ - **Processing Date**: {datetime.now().strftime("%Y-%m-%d %H:%M UTC")}
260
+
261
+ ### Configuration
262
+
263
+ - **Image Column**: `{image_column}`
264
+ - **Output Column**: `paddleocr_1.5_{task_mode}`
265
+ - **Dataset Split**: `{split}`
266
+ - **Batch Size**: {batch_size}
267
+ - **Smart Resize**: {"Enabled" if apply_smart_resize else "Disabled"}
268
+ - **Max Output Tokens**: {max_tokens:,}
269
+ - **Backend**: Transformers (batch inference)
270
+
271
+ ## Model Information
272
+
273
+ PaddleOCR-VL-1.5 is a state-of-the-art, resource-efficient model for document parsing:
274
+ - 🎯 **SOTA Performance** - 94.5% on OmniDocBench v1.5
275
+ - 🧩 **Ultra-compact** - Only 0.9B parameters
276
+ - 📝 **OCR mode** - General text extraction
277
+ - 📊 **Table mode** - HTML table recognition
278
+ - 📐 **Formula mode** - LaTeX mathematical notation
279
+ - 📈 **Chart mode** - Structured chart analysis
280
+ - 🔍 **Spotting mode** - Text spotting with localization
281
+ - 🔖 **Seal mode** - Seal and stamp recognition
282
+ - 🌍 **Multilingual** - Support for multiple languages
283
+ - ⚡ **Fast** - Efficient batch inference
284
+
285
+ ### Task Modes
286
+
287
+ - **OCR**: Extract text content to markdown format
288
+ - **Table Recognition**: Extract tables to HTML format
289
+ - **Formula Recognition**: Extract mathematical formulas to LaTeX
290
+ - **Chart Recognition**: Analyze and describe charts/diagrams
291
+ - **Spotting**: Text spotting with location information
292
+ - **Seal Recognition**: Extract text from seals and stamps
293
+
294
+ ## Dataset Structure
295
+
296
+ The dataset contains all original columns plus:
297
+ - `paddleocr_1.5_{task_mode}`: The extracted content based on task mode
298
+ - `inference_info`: JSON list tracking all OCR models applied to this dataset
299
+
300
+ ## Usage
301
+
302
+ ```python
303
+ from datasets import load_dataset
304
+ import json
305
+
306
+ # Load the dataset
307
+ dataset = load_dataset("{{output_dataset_id}}", split="{split}")
308
+
309
+ # Access the extracted content
310
+ for example in dataset:
311
+ print(example["paddleocr_1.5_{task_mode}"])
312
+ break
313
+
314
+ # View all OCR models applied to this dataset
315
+ inference_info = json.loads(dataset[0]["inference_info"])
316
+ for info in inference_info:
317
+ print(f"Task: {{info['task_mode']}} - Model: {{info['model_id']}}")
318
+ ```
319
+
320
+ ## Reproduction
321
+
322
+ This dataset was generated using the [uv-scripts/ocr](https://huggingface.co/datasets/uv-scripts/ocr) PaddleOCR-VL-1.5 script:
323
+
324
+ ```bash
325
+ uv run https://huggingface.co/datasets/uv-scripts/ocr/raw/main/paddleocr-vl-1.5.py \\
326
+ {source_dataset} \\
327
+ <output-dataset> \\
328
+ --task-mode {task_mode} \\
329
+ --image-column {image_column} \\
330
+ --batch-size {batch_size}
331
+ ```
332
+
333
+ ## Performance
334
+
335
+ - **Model Size**: 0.9B parameters
336
+ - **Benchmark Score**: 94.5% SOTA on OmniDocBench v1.5
337
+ - **Processing Speed**: ~{num_samples / (float(processing_time.split()[0]) * 60):.2f} images/second
338
+ - **Backend**: Transformers batch inference
339
+
340
+ Generated with 🤖 [UV Scripts](https://huggingface.co/uv-scripts)
341
+ """
342
+
343
+
344
+ def main(
345
+ input_dataset: str,
346
+ output_dataset: str,
347
+ image_column: str = "image",
348
+ batch_size: int = 8,
349
+ task_mode: str = "ocr",
350
+ max_tokens: int = 512,
351
+ apply_smart_resize: bool = True,
352
+ hf_token: str = None,
353
+ split: str = "train",
354
+ max_samples: int = None,
355
+ private: bool = False,
356
+ shuffle: bool = False,
357
+ seed: int = 42,
358
+ output_column: str = None,
359
+ ):
360
+ """Process images from HF dataset through PaddleOCR-VL-1.5 model."""
361
+
362
+ # Check CUDA availability first
363
+ check_cuda_availability()
364
+
365
+ # Track processing start time
366
+ start_time = datetime.now()
367
+
368
+ # Login to HF if token provided
369
+ HF_TOKEN = hf_token or os.environ.get("HF_TOKEN")
370
+ if HF_TOKEN:
371
+ login(token=HF_TOKEN)
372
+
373
+ # Validate task mode
374
+ if task_mode not in TASK_MODES:
375
+ raise ValueError(
376
+ f"Invalid task_mode '{task_mode}'. Choose from: {list(TASK_MODES.keys())}"
377
+ )
378
+
379
+ # Auto-generate output column name based on task mode
380
+ if output_column is None:
381
+ output_column = f"paddleocr_1.5_{task_mode}"
382
+
383
+ logger.info(f"Using task mode: {task_mode} - {TASK_DESCRIPTIONS[task_mode]}")
384
+ logger.info(f"Output will be written to column: {output_column}")
385
+
386
+ # Load dataset
387
+ logger.info(f"Loading dataset: {input_dataset}")
388
+ dataset = load_dataset(input_dataset, split=split)
389
+
390
+ # Validate image column
391
+ if image_column not in dataset.column_names:
392
+ raise ValueError(
393
+ f"Column '{image_column}' not found. Available: {dataset.column_names}"
394
+ )
395
+
396
+ # Shuffle if requested
397
+ if shuffle:
398
+ logger.info(f"Shuffling dataset with seed {seed}")
399
+ dataset = dataset.shuffle(seed=seed)
400
+
401
+ # Limit samples if requested
402
+ if max_samples:
403
+ dataset = dataset.select(range(min(max_samples, len(dataset))))
404
+ logger.info(f"Limited to {len(dataset)} samples")
405
+
406
+ # Import transformers components
407
+ logger.info(f"Loading model: {MODEL_ID}")
408
+ logger.info("This may take a minute on first run...")
409
+
410
+ from transformers import AutoModelForImageTextToText, AutoProcessor
411
+
412
+ # Load processor and model
413
+ processor = AutoProcessor.from_pretrained(MODEL_ID, trust_remote_code=True)
414
+ model = (
415
+ AutoModelForImageTextToText.from_pretrained(
416
+ MODEL_ID,
417
+ torch_dtype=torch.bfloat16,
418
+ trust_remote_code=True,
419
+ )
420
+ .to("cuda")
421
+ .eval()
422
+ )
423
+
424
+ logger.info(f"Model loaded on {next(model.parameters()).device}")
425
+ logger.info(f"Processing {len(dataset)} images in batches of {batch_size}")
426
+ if apply_smart_resize:
427
+ max_pixels = MAX_PIXELS.get(task_mode, MAX_PIXELS["default"])
428
+ logger.info(f"Smart resize enabled (max_pixels={max_pixels:,})")
429
+
430
+ # Process images in batches
431
+ all_outputs = []
432
+
433
+ for batch_indices in tqdm(
434
+ partition_all(batch_size, range(len(dataset))),
435
+ total=(len(dataset) + batch_size - 1) // batch_size,
436
+ desc=f"PaddleOCR-VL-1.5 {task_mode.upper()}",
437
+ ):
438
+ batch_indices = list(batch_indices)
439
+ batch_images = [dataset[i][image_column] for i in batch_indices]
440
+
441
+ try:
442
+ # Prepare images and create messages
443
+ processed_images = [
444
+ prepare_image(
445
+ img, task_mode=task_mode, apply_smart_resize=apply_smart_resize
446
+ )
447
+ for img in batch_images
448
+ ]
449
+
450
+ # Create messages for batch
451
+ batch_messages = [
452
+ create_message(img, task_mode) for img in processed_images
453
+ ]
454
+
455
+ # Process with transformers batch inference
456
+ inputs = processor.apply_chat_template(
457
+ batch_messages,
458
+ padding=True,
459
+ add_generation_prompt=True,
460
+ tokenize=True,
461
+ return_dict=True,
462
+ return_tensors="pt",
463
+ ).to(model.device)
464
+
465
+ # Generate outputs
466
+ with torch.no_grad():
467
+ outputs = model.generate(
468
+ **inputs,
469
+ max_new_tokens=max_tokens,
470
+ do_sample=False,
471
+ )
472
+
473
+ # Decode outputs - skip input tokens
474
+ input_len = inputs["input_ids"].shape[1]
475
+ generated_ids = outputs[:, input_len:]
476
+ results = processor.batch_decode(generated_ids, skip_special_tokens=True)
477
+
478
+ # Add to outputs
479
+ for text in results:
480
+ all_outputs.append(text.strip())
481
+
482
+ except Exception as e:
483
+ logger.error(f"Error processing batch: {e}")
484
+ # Add error placeholders for failed batch
485
+ all_outputs.extend(
486
+ [f"[{task_mode.upper()} ERROR: {str(e)[:100]}]"] * len(batch_images)
487
+ )
488
+
489
+ # Calculate processing time
490
+ processing_duration = datetime.now() - start_time
491
+ processing_time_str = f"{processing_duration.total_seconds() / 60:.1f} min"
492
+
493
+ # Add output column to dataset
494
+ logger.info(f"Adding '{output_column}' column to dataset")
495
+ dataset = dataset.add_column(output_column, all_outputs)
496
+
497
+ # Handle inference_info tracking (for multi-model comparisons)
498
+ inference_entry = {
499
+ "model_id": MODEL_ID,
500
+ "model_name": "PaddleOCR-VL-1.5",
501
+ "model_size": "0.9B",
502
+ "task_mode": task_mode,
503
+ "column_name": output_column,
504
+ "timestamp": datetime.now().isoformat(),
505
+ "max_tokens": max_tokens,
506
+ "smart_resize": apply_smart_resize,
507
+ "backend": "transformers",
508
+ }
509
+
510
+ if "inference_info" in dataset.column_names:
511
+ # Append to existing inference info
512
+ logger.info("Updating existing inference_info column")
513
+
514
+ def update_inference_info(example):
515
+ try:
516
+ existing_info = (
517
+ json.loads(example["inference_info"])
518
+ if example["inference_info"]
519
+ else []
520
+ )
521
+ except (json.JSONDecodeError, TypeError):
522
+ existing_info = []
523
+
524
+ existing_info.append(inference_entry)
525
+ return {"inference_info": json.dumps(existing_info)}
526
+
527
+ dataset = dataset.map(update_inference_info)
528
+ else:
529
+ # Create new inference_info column
530
+ logger.info("Creating new inference_info column")
531
+ inference_list = [json.dumps([inference_entry])] * len(dataset)
532
+ dataset = dataset.add_column("inference_info", inference_list)
533
+
534
+ # Push to hub
535
+ logger.info(f"Pushing to {output_dataset}")
536
+ dataset.push_to_hub(output_dataset, private=private, token=HF_TOKEN)
537
+
538
+ # Create and push dataset card
539
+ logger.info("Creating dataset card")
540
+ card_content = create_dataset_card(
541
+ source_dataset=input_dataset,
542
+ model=MODEL_ID,
543
+ task_mode=task_mode,
544
+ num_samples=len(dataset),
545
+ processing_time=processing_time_str,
546
+ batch_size=batch_size,
547
+ max_tokens=max_tokens,
548
+ apply_smart_resize=apply_smart_resize,
549
+ image_column=image_column,
550
+ split=split,
551
+ )
552
+
553
+ card = DatasetCard(card_content)
554
+ card.push_to_hub(output_dataset, token=HF_TOKEN)
555
+
556
+ logger.info("PaddleOCR-VL-1.5 processing complete!")
557
+ logger.info(
558
+ f"Dataset available at: https://huggingface.co/datasets/{output_dataset}"
559
+ )
560
+ logger.info(f"Processing time: {processing_time_str}")
561
+ logger.info(
562
+ f"Processing speed: {len(dataset) / processing_duration.total_seconds():.2f} images/sec"
563
+ )
564
+ logger.info(f"Task mode: {task_mode} - {TASK_DESCRIPTIONS[task_mode]}")
565
+
566
+
567
+ if __name__ == "__main__":
568
+ # Show example usage if no arguments
569
+ if len(sys.argv) == 1:
570
+ print("=" * 80)
571
+ print("PaddleOCR-VL-1.5 Document Processing")
572
+ print("=" * 80)
573
+ print("\nSOTA 0.9B OCR model (94.5% on OmniDocBench v1.5)")
574
+ print("\nFeatures:")
575
+ print("- 🎯 SOTA Performance - 94.5% on OmniDocBench v1.5")
576
+ print("- 🧩 Ultra-compact - Only 0.9B parameters")
577
+ print("- 📝 OCR mode - General text extraction")
578
+ print("- 📊 Table mode - HTML table recognition")
579
+ print("- 📐 Formula mode - LaTeX mathematical notation")
580
+ print("- 📈 Chart mode - Structured chart analysis")
581
+ print("- 🔍 Spotting mode - Text spotting with localization")
582
+ print("- 🔖 Seal mode - Seal and stamp recognition")
583
+ print("- 🌍 Multilingual support")
584
+ print("- ⚡ Fast batch inference with transformers")
585
+ print("\nTask Modes:")
586
+ for mode, description in TASK_DESCRIPTIONS.items():
587
+ print(f" {mode:10} - {description}")
588
+ print("\nExample usage:")
589
+ print("\n1. Basic OCR (default mode):")
590
+ print(" uv run paddleocr-vl-1.5.py input-dataset output-dataset")
591
+ print("\n2. Table extraction:")
592
+ print(" uv run paddleocr-vl-1.5.py docs tables-extracted --task-mode table")
593
+ print("\n3. Formula recognition:")
594
+ print(
595
+ " uv run paddleocr-vl-1.5.py papers formulas --task-mode formula --batch-size 16"
596
+ )
597
+ print("\n4. Text spotting (higher resolution):")
598
+ print(" uv run paddleocr-vl-1.5.py images spotted --task-mode spotting")
599
+ print("\n5. Seal recognition:")
600
+ print(" uv run paddleocr-vl-1.5.py seals recognized --task-mode seal")
601
+ print("\n6. Test with small sample:")
602
+ print(" uv run paddleocr-vl-1.5.py dataset test --max-samples 10 --shuffle")
603
+ print("\n7. Running on HF Jobs:")
604
+ print(" hf jobs uv run --flavor l4x1 \\")
605
+ print(" -s HF_TOKEN \\")
606
+ print(
607
+ " https://huggingface.co/datasets/uv-scripts/ocr/raw/main/paddleocr-vl-1.5.py \\"
608
+ )
609
+ print(" input-dataset output-dataset --task-mode ocr")
610
+ print("\n" + "=" * 80)
611
+ print("\nBackend: Transformers (batch inference)")
612
+ print(
613
+ "Note: Uses transformers instead of vLLM for single-command UV script compatibility"
614
+ )
615
+ print("\nFor full help, run: uv run paddleocr-vl-1.5.py --help")
616
+ sys.exit(0)
617
+
618
+ parser = argparse.ArgumentParser(
619
+ description="Document processing using PaddleOCR-VL-1.5 (0.9B SOTA model, transformers backend)",
620
+ formatter_class=argparse.RawDescriptionHelpFormatter,
621
+ epilog="""
622
+ Task Modes:
623
+ ocr General text extraction to markdown (default)
624
+ table Table extraction to HTML format
625
+ formula Mathematical formula recognition to LaTeX
626
+ chart Chart and diagram analysis
627
+ spotting Text spotting with localization (higher resolution)
628
+ seal Seal and stamp recognition
629
+
630
+ Examples:
631
+ # Basic text OCR
632
+ uv run paddleocr-vl-1.5.py my-docs analyzed-docs
633
+
634
+ # Extract tables from documents
635
+ uv run paddleocr-vl-1.5.py papers tables --task-mode table
636
+
637
+ # Recognize mathematical formulas
638
+ uv run paddleocr-vl-1.5.py textbooks formulas --task-mode formula
639
+
640
+ # Text spotting (uses higher resolution)
641
+ uv run paddleocr-vl-1.5.py images spotted --task-mode spotting
642
+
643
+ # Seal recognition
644
+ uv run paddleocr-vl-1.5.py documents seals --task-mode seal
645
+
646
+ # Test with random sampling
647
+ uv run paddleocr-vl-1.5.py large-dataset test --max-samples 50 --shuffle --task-mode ocr
648
+
649
+ # Disable smart resize for original resolution
650
+ uv run paddleocr-vl-1.5.py images output --no-smart-resize
651
+
652
+ Backend: Transformers batch inference (not vLLM)
653
+ """,
654
+ )
655
+
656
+ parser.add_argument("input_dataset", help="Input dataset ID from Hugging Face Hub")
657
+ parser.add_argument("output_dataset", help="Output dataset ID for Hugging Face Hub")
658
+ parser.add_argument(
659
+ "--image-column",
660
+ default="image",
661
+ help="Column containing images (default: image)",
662
+ )
663
+ parser.add_argument(
664
+ "--batch-size",
665
+ type=int,
666
+ default=8,
667
+ help="Batch size for processing (default: 8)",
668
+ )
669
+ parser.add_argument(
670
+ "--task-mode",
671
+ choices=list(TASK_MODES.keys()),
672
+ default="ocr",
673
+ help="Task type: ocr (default), table, formula, chart, spotting, or seal",
674
+ )
675
+ parser.add_argument(
676
+ "--max-tokens",
677
+ type=int,
678
+ default=512,
679
+ help="Maximum tokens to generate (default: 512)",
680
+ )
681
+ parser.add_argument(
682
+ "--no-smart-resize",
683
+ action="store_true",
684
+ help="Disable smart resize, use original image size",
685
+ )
686
+ parser.add_argument("--hf-token", help="Hugging Face API token")
687
+ parser.add_argument(
688
+ "--split", default="train", help="Dataset split to use (default: train)"
689
+ )
690
+ parser.add_argument(
691
+ "--max-samples",
692
+ type=int,
693
+ help="Maximum number of samples to process (for testing)",
694
+ )
695
+ parser.add_argument(
696
+ "--private", action="store_true", help="Make output dataset private"
697
+ )
698
+ parser.add_argument(
699
+ "--shuffle", action="store_true", help="Shuffle dataset before processing"
700
+ )
701
+ parser.add_argument(
702
+ "--seed",
703
+ type=int,
704
+ default=42,
705
+ help="Random seed for shuffling (default: 42)",
706
+ )
707
+ parser.add_argument(
708
+ "--output-column",
709
+ help="Column name for output (default: paddleocr_1.5_[task_mode])",
710
+ )
711
+
712
+ args = parser.parse_args()
713
+
714
+ main(
715
+ input_dataset=args.input_dataset,
716
+ output_dataset=args.output_dataset,
717
+ image_column=args.image_column,
718
+ batch_size=args.batch_size,
719
+ task_mode=args.task_mode,
720
+ max_tokens=args.max_tokens,
721
+ apply_smart_resize=not args.no_smart_resize,
722
+ hf_token=args.hf_token,
723
+ split=args.split,
724
+ max_samples=args.max_samples,
725
+ private=args.private,
726
+ shuffle=args.shuffle,
727
+ seed=args.seed,
728
+ output_column=args.output_column,
729
+ )