Bapt120 commited on
Commit
3c814ba
Β·
verified Β·
1 Parent(s): 7a42359

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +278 -63
app.py CHANGED
@@ -1,70 +1,285 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
-
4
-
5
- def respond(
6
- message,
7
- history: list[dict[str, str]],
8
- system_message,
9
- max_tokens,
10
- temperature,
11
- top_p,
12
- hf_token: gr.OAuthToken,
13
- ):
14
- """
15
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
16
- """
17
- client = InferenceClient(token=hf_token.token, model="openai/gpt-oss-20b")
18
-
19
- messages = [{"role": "system", "content": system_message}]
20
-
21
- messages.extend(history)
22
-
23
- messages.append({"role": "user", "content": message})
24
-
25
- response = ""
26
-
27
- for message in client.chat_completion(
28
- messages,
29
- max_tokens=max_tokens,
30
- stream=True,
31
- temperature=temperature,
32
- top_p=top_p,
33
- ):
34
- choices = message.choices
35
- token = ""
36
- if len(choices) and choices[0].delta.content:
37
- token = choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
-
42
-
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- chatbot = gr.ChatInterface(
47
- respond,
48
- type="messages",
49
- additional_inputs=[
50
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
51
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
52
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
53
- gr.Slider(
54
- minimum=0.1,
55
- maximum=1.0,
56
- value=0.95,
57
- step=0.05,
58
- label="Top-p (nucleus sampling)",
59
- ),
60
- ],
61
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
 
63
- with gr.Blocks() as demo:
64
- with gr.Sidebar():
65
- gr.LoginButton()
66
- chatbot.render()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
 
68
 
69
  if __name__ == "__main__":
70
  demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ import subprocess
3
+ import sys
4
+
5
+ # Install flash-attn for GPU only
6
+ import torch
7
+ if torch.cuda.is_available():
8
+ print("CUDA detected - installing flash-attn for optimal GPU performance...")
9
+ subprocess.run(
10
+ "pip install flash-attn --no-build-isolation",
11
+ env={"FLASH_ATTENTION_SKIP_CUDA_BUILD": "TRUE"},
12
+ shell=True,
13
+ )
14
+
15
  import gradio as gr
16
+ import spaces
17
+ from PIL import Image
18
+ from io import BytesIO
19
+ import pypdfium2 as pdfium
20
+ from transformers import (
21
+ LightOnOCRForConditionalGeneration,
22
+ LightOnOCRProcessor,
23
+ )
24
+
25
+ device = "cuda" if torch.cuda.is_available() else "cpu"
26
+
27
+ # Choose best attention implementation based on device
28
+ if device == "cuda":
29
+ attn_implementation = "flash_attention_2" # Best for GPU
30
+ dtype = torch.bfloat16
31
+ print("Using flash_attention_2 for GPU")
32
+ else:
33
+ attn_implementation = "eager" # Best for CPU
34
+ dtype = torch.float32
35
+ print("Using eager attention for CPU")
36
+
37
+ # Initialize the LightOnOCR model and processor
38
+ print(f"Loading model on {device} with {attn_implementation} attention...")
39
+ model = LightOnOCRForConditionalGeneration.from_pretrained(
40
+ "lightonai/LightOnOCR-1B-1025",
41
+ attn_implementation=attn_implementation,
42
+ torch_dtype=dtype,
43
+ trust_remote_code=True
44
+ ).to(device).eval()
45
+
46
+ processor = LightOnOCRProcessor.from_pretrained(
47
+ "lightonai/LightOnOCR-1B-1025",
48
+ trust_remote_code=True
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
  )
50
+ print("Model loaded successfully!")
51
+
52
+
53
+ def render_pdf_page(page, max_resolution=1540, scale=2.77):
54
+ """Render a PDF page to PIL Image."""
55
+ width, height = page.get_size()
56
+ pixel_width = width * scale
57
+ pixel_height = height * scale
58
+ resize_factor = min(1, max_resolution / pixel_width, max_resolution / pixel_height)
59
+ target_scale = scale * resize_factor
60
+ return page.render(scale=target_scale, rev_byteorder=True).to_pil()
61
+
62
+
63
+ def process_pdf(pdf_path, page_num=1):
64
+ """Extract a specific page from PDF."""
65
+ pdf = pdfium.PdfDocument(pdf_path)
66
+ total_pages = len(pdf)
67
+ page_idx = min(max(int(page_num) - 1, 0), total_pages - 1)
68
+
69
+ page = pdf[page_idx]
70
+ img = render_pdf_page(page)
71
+
72
+ pdf.close()
73
+ return img, total_pages, page_idx + 1
74
+
75
 
76
+ @spaces.GPU
77
+ def extract_text_from_image(image, temperature=0.2):
78
+ """Extract text from image using LightOnOCR model."""
79
+ # Prepare the chat format
80
+ chat = [
81
+ {
82
+ "role": "user",
83
+ "content": [
84
+ {"type": "image", "url": image},
85
+ ],
86
+ }
87
+ ]
88
+
89
+ # Apply chat template and tokenize
90
+ inputs = processor.apply_chat_template(
91
+ chat,
92
+ add_generation_prompt=True,
93
+ tokenize=True,
94
+ return_dict=True,
95
+ return_tensors="pt"
96
+ )
97
+
98
+ # Move inputs to device
99
+ inputs = {k: v.to(device) if isinstance(v, torch.Tensor) else v for k, v in inputs.items()}
100
+
101
+ # Generate text with appropriate settings
102
+ with torch.no_grad(): # Disable gradients for inference
103
+ outputs = model.generate(
104
+ **inputs,
105
+ max_new_tokens=2048,
106
+ temperature=temperature if temperature > 0 else 0.0,
107
+ use_cache=True,
108
+ do_sample=temperature > 0,
109
+ )
110
+
111
+ # Decode the output
112
+ output_text = processor.decode(outputs[0], skip_special_tokens=True)
113
+
114
+ return output_text
115
+
116
+
117
+ def process_input(file_input, temperature, page_num):
118
+ """Process uploaded file (image or PDF) and extract text."""
119
+ if file_input is None:
120
+ return "Please upload an image or PDF first.", "", "", None, gr.update()
121
+
122
+ image_to_process = None
123
+ page_info = ""
124
+
125
+ file_path = file_input if isinstance(file_input, str) else file_input.name
126
+
127
+ # Handle PDF files
128
+ if file_path.lower().endswith('.pdf'):
129
+ try:
130
+ image_to_process, total_pages, actual_page = process_pdf(file_path, int(page_num))
131
+ page_info = f"Processing page {actual_page} of {total_pages}"
132
+ except Exception as e:
133
+ return f"Error processing PDF: {str(e)}", "", "", None, gr.update()
134
+ # Handle image files
135
+ else:
136
+ try:
137
+ image_to_process = Image.open(file_path)
138
+ page_info = "Processing image"
139
+ except Exception as e:
140
+ return f"Error opening image: {str(e)}", "", "", None, gr.update()
141
+
142
+ try:
143
+ # Extract text using LightOnOCR
144
+ extracted_text = extract_text_from_image(image_to_process, temperature)
145
+
146
+ return extracted_text, extracted_text, page_info, image_to_process, gr.update()
147
+
148
+ except Exception as e:
149
+ error_msg = f"Error during text extraction: {str(e)}"
150
+ return error_msg, error_msg, page_info, image_to_process, gr.update()
151
+
152
+
153
+ def update_slider(file_input):
154
+ """Update page slider based on PDF page count."""
155
+ if file_input is None:
156
+ return gr.update(maximum=20, value=1)
157
+
158
+ file_path = file_input if isinstance(file_input, str) else file_input.name
159
+
160
+ if file_path.lower().endswith('.pdf'):
161
+ try:
162
+ pdf = pdfium.PdfDocument(file_path)
163
+ total_pages = len(pdf)
164
+ pdf.close()
165
+ return gr.update(maximum=total_pages, value=1)
166
+ except:
167
+ return gr.update(maximum=20, value=1)
168
+ else:
169
+ return gr.update(maximum=1, value=1)
170
+
171
+
172
+ # Create Gradio interface
173
+ with gr.Blocks(title="πŸ“– Image/PDF OCR with LightOnOCR", theme=gr.themes.Soft()) as demo:
174
+ gr.Markdown(f"""
175
+ # πŸ“– Image/PDF to Text Extraction (LightOnOCR + Zero GPU)
176
+
177
+ **πŸ’‘ How to use:**
178
+ 1. Upload an image or PDF
179
+ 2. For PDFs: select which page to extract (1-20)
180
+ 3. Adjust temperature if needed (0.0 for deterministic, higher for more varied output)
181
+ 4. Click "Extract Text"
182
+
183
+ **Note:** The Markdown rendering for tables may not always be perfect. Check the raw output for complex tables!
184
+
185
+ **Model:** LightOnOCR-1B-1025 by LightOn AI
186
+ **Device:** {device.upper()}
187
+ **Attention:** {attn_implementation}
188
+ """)
189
+
190
+ with gr.Row():
191
+ with gr.Column(scale=1):
192
+ file_input = gr.File(
193
+ label="πŸ–ΌοΈ Upload Image or PDF",
194
+ file_types=[".pdf", ".png", ".jpg", ".jpeg"],
195
+ type="filepath"
196
+ )
197
+ rendered_image = gr.Image(
198
+ label="πŸ“„ Preview",
199
+ type="pil",
200
+ height=400,
201
+ interactive=False
202
+ )
203
+ num_pages = gr.Slider(
204
+ minimum=1,
205
+ maximum=20,
206
+ value=1,
207
+ step=1,
208
+ label="PDF: Page Number",
209
+ info="Select which page to extract"
210
+ )
211
+ page_info = gr.Textbox(
212
+ label="Processing Info",
213
+ value="",
214
+ interactive=False
215
+ )
216
+ temperature = gr.Slider(
217
+ minimum=0.0,
218
+ maximum=1.0,
219
+ value=0.2,
220
+ step=0.05,
221
+ label="Temperature",
222
+ info="0.0 = deterministic, Higher = more varied"
223
+ )
224
+ submit_btn = gr.Button("Extract Text", variant="primary")
225
+ clear_btn = gr.Button("Clear", variant="secondary")
226
+
227
+ with gr.Column(scale=2):
228
+ output_text = gr.Markdown(
229
+ label="πŸ“„ Extracted Text (Rendered)",
230
+ value="*Extracted text will appear here...*"
231
+ )
232
+
233
+ with gr.Row():
234
+ with gr.Column():
235
+ raw_output = gr.Textbox(
236
+ label="Raw Markdown Output",
237
+ placeholder="Raw text will appear here...",
238
+ lines=20,
239
+ max_lines=30,
240
+ show_copy_button=True
241
+ )
242
+
243
+ # Event handlers
244
+ submit_btn.click(
245
+ fn=process_input,
246
+ inputs=[file_input, temperature, num_pages],
247
+ outputs=[output_text, raw_output, page_info, rendered_image, num_pages]
248
+ )
249
+
250
+ file_input.change(
251
+ fn=update_slider,
252
+ inputs=[file_input],
253
+ outputs=[num_pages]
254
+ )
255
+
256
+ clear_btn.click(
257
+ fn=lambda: (None, "*Extracted text will appear here...*", "", "", None, 1),
258
+ outputs=[file_input, output_text, raw_output, page_info, rendered_image, num_pages]
259
+ )
260
 
261
 
262
  if __name__ == "__main__":
263
  demo.launch()
264
+ ```
265
+
266
+ **Key improvements:**
267
+
268
+ 1. **Conditional flash-attn installation**: Only installs flash-attn when CUDA is available
269
+ 2. **Automatic attention selection**:
270
+ - **GPU**: `flash_attention_2` (fastest and most memory-efficient)
271
+ - **CPU**: `eager` (standard PyTorch attention, best for CPU)
272
+ 3. **Appropriate dtype**: `bfloat16` for GPU, `float32` for CPU
273
+ 4. **Performance optimizations**:
274
+ - Added `torch.no_grad()` context for inference
275
+ - Proper temperature handling (0.0 for greedy decoding)
276
+ 5. **UI feedback**: Shows device and attention implementation in the interface
277
+
278
+ **Requirements.txt:**
279
+ ```
280
+ gradio
281
+ torch
282
+ transformers>=4.37.0
283
+ pypdfium2
284
+ pillow
285
+ spaces