Update app.py
Browse files
app.py
CHANGED
|
@@ -1,12 +1,10 @@
|
|
| 1 |
# =========================
|
| 2 |
-
# CAMEL-DOC-OCR (
|
| 3 |
-
# Single-file
|
| 4 |
# =========================
|
| 5 |
|
| 6 |
import os
|
| 7 |
import gc
|
| 8 |
-
import json
|
| 9 |
-
import re
|
| 10 |
import torch
|
| 11 |
import fitz
|
| 12 |
import gradio as gr
|
|
@@ -23,22 +21,18 @@ from transformers.models.qwen2_5_vl import Qwen2_5_VLForConditionalGeneration
|
|
| 23 |
MODEL_ID = "prithivMLmods/Camel-Doc-OCR-062825"
|
| 24 |
DPI = 150
|
| 25 |
MAX_IMAGE_SIZE = 2048
|
| 26 |
-
GPU_MEMORY_FRACTION = 0.8
|
| 27 |
|
| 28 |
|
| 29 |
# =========================
|
| 30 |
-
# TORCH
|
| 31 |
# =========================
|
| 32 |
torch.set_grad_enabled(False)
|
| 33 |
torch.backends.cuda.matmul.allow_tf32 = True
|
| 34 |
torch.backends.cudnn.allow_tf32 = True
|
| 35 |
|
| 36 |
-
if torch.cuda.is_available():
|
| 37 |
-
torch.cuda.set_per_process_memory_fraction(GPU_MEMORY_FRACTION, device=0)
|
| 38 |
-
|
| 39 |
|
| 40 |
# =========================
|
| 41 |
-
# LOAD MODEL (
|
| 42 |
# =========================
|
| 43 |
bnb = BitsAndBytesConfig(
|
| 44 |
load_in_4bit=True,
|
|
@@ -55,7 +49,7 @@ processor = AutoProcessor.from_pretrained(
|
|
| 55 |
model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
|
| 56 |
MODEL_ID,
|
| 57 |
quantization_config=bnb,
|
| 58 |
-
device_map="auto",
|
| 59 |
torch_dtype=torch.float16,
|
| 60 |
trust_remote_code=True
|
| 61 |
).eval()
|
|
@@ -64,7 +58,7 @@ processor.tokenizer.pad_token_id = processor.tokenizer.eos_token_id
|
|
| 64 |
|
| 65 |
|
| 66 |
# =========================
|
| 67 |
-
# PDF → IMAGE
|
| 68 |
# =========================
|
| 69 |
def pdf_to_images(pdf_bytes):
|
| 70 |
doc = fitz.open(stream=pdf_bytes, filetype="pdf")
|
|
@@ -86,7 +80,7 @@ def pdf_to_images(pdf_bytes):
|
|
| 86 |
|
| 87 |
|
| 88 |
# =========================
|
| 89 |
-
# OCR INFERENCE (
|
| 90 |
# =========================
|
| 91 |
@spaces.GPU
|
| 92 |
def run_inference(image, prompt, max_new_tokens):
|
|
@@ -181,7 +175,7 @@ OUTPUT FORMAT:
|
|
| 181 |
# GRADIO UI
|
| 182 |
# =========================
|
| 183 |
with gr.Blocks(title="Camel-Doc-OCR") as demo:
|
| 184 |
-
gr.Markdown("## 🧾 Camel-Doc-OCR (Qwen2.5-VL – 4bit,
|
| 185 |
|
| 186 |
with gr.Row():
|
| 187 |
with gr.Column(scale=1):
|
|
@@ -218,13 +212,16 @@ with gr.Blocks(title="Camel-Doc-OCR") as demo:
|
|
| 218 |
|
| 219 |
|
| 220 |
# =========================
|
| 221 |
-
# CLEANUP
|
| 222 |
# =========================
|
| 223 |
def cleanup():
|
| 224 |
torch.cuda.empty_cache()
|
| 225 |
gc.collect()
|
| 226 |
|
| 227 |
|
|
|
|
|
|
|
|
|
|
| 228 |
if __name__ == "__main__":
|
| 229 |
demo.launch(
|
| 230 |
server_name="0.0.0.0",
|
|
|
|
| 1 |
# =========================
|
| 2 |
+
# CAMEL-DOC-OCR (HF Spaces SAFE)
|
| 3 |
+
# Single-file – NO CUDA init at global scope
|
| 4 |
# =========================
|
| 5 |
|
| 6 |
import os
|
| 7 |
import gc
|
|
|
|
|
|
|
| 8 |
import torch
|
| 9 |
import fitz
|
| 10 |
import gradio as gr
|
|
|
|
| 21 |
MODEL_ID = "prithivMLmods/Camel-Doc-OCR-062825"
|
| 22 |
DPI = 150
|
| 23 |
MAX_IMAGE_SIZE = 2048
|
|
|
|
| 24 |
|
| 25 |
|
| 26 |
# =========================
|
| 27 |
+
# TORCH FLAGS (SAFE FOR SPACES)
|
| 28 |
# =========================
|
| 29 |
torch.set_grad_enabled(False)
|
| 30 |
torch.backends.cuda.matmul.allow_tf32 = True
|
| 31 |
torch.backends.cudnn.allow_tf32 = True
|
| 32 |
|
|
|
|
|
|
|
|
|
|
| 33 |
|
| 34 |
# =========================
|
| 35 |
+
# LOAD MODEL (NO CUDA INIT HERE)
|
| 36 |
# =========================
|
| 37 |
bnb = BitsAndBytesConfig(
|
| 38 |
load_in_4bit=True,
|
|
|
|
| 49 |
model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
|
| 50 |
MODEL_ID,
|
| 51 |
quantization_config=bnb,
|
| 52 |
+
device_map="auto", # HF Spaces will inject GPU here
|
| 53 |
torch_dtype=torch.float16,
|
| 54 |
trust_remote_code=True
|
| 55 |
).eval()
|
|
|
|
| 58 |
|
| 59 |
|
| 60 |
# =========================
|
| 61 |
+
# PDF → IMAGE (FAST & SAFE)
|
| 62 |
# =========================
|
| 63 |
def pdf_to_images(pdf_bytes):
|
| 64 |
doc = fitz.open(stream=pdf_bytes, filetype="pdf")
|
|
|
|
| 80 |
|
| 81 |
|
| 82 |
# =========================
|
| 83 |
+
# OCR INFERENCE (CUDA ONLY HERE)
|
| 84 |
# =========================
|
| 85 |
@spaces.GPU
|
| 86 |
def run_inference(image, prompt, max_new_tokens):
|
|
|
|
| 175 |
# GRADIO UI
|
| 176 |
# =========================
|
| 177 |
with gr.Blocks(title="Camel-Doc-OCR") as demo:
|
| 178 |
+
gr.Markdown("## 🧾 Camel-Doc-OCR (Qwen2.5-VL – 4bit, HF Spaces Safe)")
|
| 179 |
|
| 180 |
with gr.Row():
|
| 181 |
with gr.Column(scale=1):
|
|
|
|
| 212 |
|
| 213 |
|
| 214 |
# =========================
|
| 215 |
+
# CLEANUP
|
| 216 |
# =========================
|
| 217 |
def cleanup():
|
| 218 |
torch.cuda.empty_cache()
|
| 219 |
gc.collect()
|
| 220 |
|
| 221 |
|
| 222 |
+
# =========================
|
| 223 |
+
# LAUNCH
|
| 224 |
+
# =========================
|
| 225 |
if __name__ == "__main__":
|
| 226 |
demo.launch(
|
| 227 |
server_name="0.0.0.0",
|