Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| from transformers import AutoModel, AutoTokenizer | |
| from PIL import Image | |
| import torch | |
| import os | |
| model_name = "deepseek-ai/DeepSeek-OCR" | |
| print("Loading tokenizer...") | |
| tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) | |
| print("Loading model...") | |
| model = AutoModel.from_pretrained( | |
| model_name, | |
| trust_remote_code=True, | |
| use_safetensors=True, | |
| low_cpu_mem_usage=True | |
| ).cuda().eval() | |
| def ocr_infer(image): | |
| image.save("input.png") | |
| prompt = "<image>\nFree OCR." | |
| result = model.infer( | |
| tokenizer, | |
| prompt=prompt, | |
| image_file="input.png", | |
| output_path=".", | |
| image_size=640, | |
| crop_mode=True | |
| ) | |
| return result["text"] | |
| demo = gr.Interface( | |
| fn=ocr_infer, | |
| inputs=gr.Image(type="pil"), | |
| outputs="text", | |
| title="DeepSeek-OCR (HF Space)", | |
| description="GPU OCR API" | |
| ) | |
| demo.launch() | |