File size: 3,861 Bytes
a808206
 
 
 
 
 
 
bc43a9e
a808206
 
 
bc43a9e
 
a808206
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
# /// script
# requires-python = ">=3.11"
# dependencies = [
#     "datasets>=4.0.0",
#     "huggingface-hub",
#     "pillow",
#     "torch",
#     "torchvision",
#     "transformers>=5.0.0",
#     "tqdm",
#     "accelerate",
#     "addict",
#     "matplotlib",
# ]
# ///

"""Convert document images to markdown using DeepSeek-OCR-2 via transformers."""

import argparse
import json
import os
import sys
import tempfile
from datetime import datetime

import torch
from datasets import load_dataset, Dataset
from huggingface_hub import login
from PIL import Image
from tqdm.auto import tqdm
from transformers import AutoModel, AutoTokenizer

PROMPT = "<image>\n<|grounding|>Convert the document to markdown. "


def main(input_dataset: str, output_dataset: str, split: str = "train",
         max_samples: int | None = None, image_column: str = "image"):

    if not torch.cuda.is_available():
        print("ERROR: CUDA not available. GPU required.")
        sys.exit(1)
    print(f"GPU: {torch.cuda.get_device_name(0)}")

    token = os.environ.get("HF_TOKEN")
    if token:
        login(token=token)

    print("Loading model deepseek-ai/DeepSeek-OCR-2...")
    tokenizer = AutoTokenizer.from_pretrained(
        "deepseek-ai/DeepSeek-OCR-2", trust_remote_code=True
    )
    model = AutoModel.from_pretrained(
        "deepseek-ai/DeepSeek-OCR-2",
        trust_remote_code=True,
        use_safetensors=True,
        torch_dtype=torch.bfloat16,
    ).cuda()

    print(f"Loading dataset {input_dataset}...")
    ds = load_dataset(input_dataset, split=split)
    if max_samples:
        ds = ds.select(range(min(max_samples, len(ds))))
    print(f"Processing {len(ds)} samples...")

    results = []
    with tempfile.TemporaryDirectory() as tmpdir:
        for i, row in enumerate(tqdm(ds)):
            img_path = os.path.join(tmpdir, f"img_{i}.jpg")
            img = row[image_column]
            if isinstance(img, dict):
                img = Image.open(__import__("io").BytesIO(img["bytes"]))
            img.save(img_path, format="JPEG", quality=95)

            try:
                out = model.infer(
                    tokenizer,
                    prompt=PROMPT,
                    image_file=img_path,
                    output_path=tmpdir,
                    base_size=1024,
                    image_size=768,
                    crop_mode=True,
                    save_results=False,
                )
                if i == 0:
                    print(f"[DEBUG] out type={type(out)}, value={repr(out)[:200]}")
                markdown = out if isinstance(out, str) else str(out)
            except Exception as e:
                print(f"Error on sample {i}: {e}")
                markdown = ""

            results.append({
                "image": row[image_column],
                "gt_json": row.get("gt_json", ""),
                "markdown": markdown,
                "inference_info": json.dumps([{
                    "column_name": "markdown",
                    "model_id": "deepseek-ai/DeepSeek-OCR-2",
                    "processing_date": datetime.now().strftime("%Y-%m-%d"),
                    "backend": "transformers",
                }]),
            })

    print(f"Pushing to {output_dataset}...")
    Dataset.from_list(results).push_to_hub(output_dataset, private=False)
    print(f"Done → https://huggingface.co/datasets/{output_dataset}")


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("input_dataset")
    parser.add_argument("output_dataset")
    parser.add_argument("--split", default="train")
    parser.add_argument("--max-samples", type=int, default=None)
    parser.add_argument("--image-column", default="image")
    args = parser.parse_args()
    main(args.input_dataset, args.output_dataset, args.split,
         args.max_samples, args.image_column)