DeepSeek-OCR 2: Visual Causal Flow
Paper • 2601.20552 • Published • 67
This is a port of deepseek-ai/DeepSeek-OCR-2 for the transformers library. 🤗

import torch
from transformers import AutoProcessor, AutoModelForImageTextToText
model = AutoModelForImageTextToText.from_pretrained(
"thisisiron/DeepSeek-OCR-2-hf", torch_dtype=torch.bfloat16, device_map="auto"
)
processor = AutoProcessor.from_pretrained("thisisiron/DeepSeek-OCR-2-hf")
image = "https://huggingface.co/datasets/hf-internal-testing/fixtures_got_ocr/resolve/main/image_ocr.jpg"
inputs = processor(images=image, text="<image>\nFree OCR.", return_tensors="pt").to(model.device, dtype=torch.bfloat16)
generate_ids = model.generate(**inputs, do_sample=False, max_new_tokens=4096)
processor.decode(generate_ids[0, inputs["input_ids"].shape[1]:], skip_special_tokens=True)
The <|grounding|> token enables coordinate-aware output with <|ref|> and <|det|> tags.
inputs = processor(
images=image,
text="<image>\n<|grounding|>Convert the document to markdown.",
return_tensors="pt",
).to(model.device, dtype=torch.bfloat16)
generate_ids = model.generate(**inputs, do_sample=False, max_new_tokens=4096)
processor.decode(generate_ids[0, inputs["input_ids"].shape[1]:], skip_special_tokens=False)
@article{wei2026deepseek,
title={DeepSeek-OCR 2: Visual Causal Flow},
author={Wei, Haoran and Sun, Yaofeng and Li, Yukun},
journal={arXiv preprint arXiv:2601.20552},
year={2026}
}