File size: 14,831 Bytes
fb0cc53
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f551837
 
fb0cc53
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
# -*- coding: utf-8 -*-
"""

Created on Mon Nov 24 14:58:03 2025



@author: rmd2219

"""

from PIL import Image
import numpy as np
import gradio as gr
import fitz   # PyMuPDF
import tempfile
import os, json
import base64
from io import BytesIO
from openai import OpenAI
from paddleocr import PaddleOCR

# Initialize once (slow!)
ocr_engine = PaddleOCR(
    lang="en",
    use_angle_cls=False,  # Disable angle classification if docs are straight
    det_db_thresh=0.3,
    det_db_box_thresh=0.5,
    rec_batch_num=6,      # Process multiple text regions at once
    det_limit_side_len=4096,  # Limit detection image size (default 960)
    use_gpu=False,
    enable_mkldnn=True,
    cpu_threads=8,       # Use more CPU threads
)

os.environ["OPENAI_API_KEY"] = os.environ.get("OPEN_AI_API_KEY")

client = OpenAI()

#%%
def ocr_with_confidence_power(pil_img):
    """

    Universal PaddleOCR wrapper (works for all versions)

    Extracts text + average confidence.

    No preprocessing required.

    """
    
    # Ensure numpy array
    img_np = np.array(pil_img)

    # Use `ocr`, which internally runs detection+recog
    result = ocr_engine.ocr(img_np)
    
    if result is None or not result:
        print("⚠️ OCR returned None or empty")
        return "", 0
    
    
    # Handle dictionary format (newer PaddleOCR)
    if isinstance(result[0], dict):

        
        # Get the recognized texts
        texts = result[0].get('rec_texts', [])
        
        # Try to find confidence scores - they might be under different keys
        scores = result[0].get('rec_scores', [])
        if not scores:
            scores = result[0].get('scores', [])
        if not scores:
            # Some versions might have it nested differently
            scores = [0.95] * len(texts)  # Default confidence if not found
        

        
        # Combine texts
        full_text = "\n".join(texts)
        
        # Calculate average confidence
        if scores and len(scores) > 0:
            avg_conf = sum(float(s) for s in scores) / len(scores) * 100
        else:
            avg_conf = 95.0  # Default
        
        print(f"=== OCR COMPLETE ===")
        print(f"Total lines: {len(texts)}")
        print(f"Avg confidence: {avg_conf:.2f}%")
        print(f"Full text preview: {full_text[:200]}...")
        
        return full_text, avg_conf
    
    # Handle list format (older PaddleOCR)
    print("Processing list format")
    lines = []
    confs = []

    for item in result[0]:
        # Check if it's a list [box, (text, conf)]
        if isinstance(item, (list, tuple)) and len(item) >= 2:
            try:
                box, text_conf = item[0], item[1]
                if isinstance(text_conf, (list, tuple)) and len(text_conf) >= 2:
                    text, conf = text_conf[0], text_conf[1]
                    conf = float(conf) * 100
                    lines.append(text)
                    confs.append(conf)
            except Exception as e:
                print(f"Error parsing item: {e}")
                continue

    full_text = "\n".join(lines)
    avg_conf = sum(confs) / len(confs) if confs else 0
    
    print(f"=== OCR COMPLETE ===")
    print(f"Total lines: {len(lines)}")
    print(f"Avg confidence: {avg_conf:.2f}%")

    return full_text, avg_conf

def pil_to_base64(pil_img):
    buffered = BytesIO()
    pil_img.save(buffered, format="PNG")
    encoded = base64.b64encode(buffered.getvalue()).decode("utf-8")
    return f"data:image/png;base64,{encoded}"

def flatten_ocr_result(ocr_pages):
    if isinstance(ocr_pages, list):
        return "\n\n".join([p["text"] for p in ocr_pages])
    return str(ocr_pages)

#%%

SYSTEM_PROMPT = """

    You extract structured information from building related documents.

    

    Your task:

    1. Identify the information requested by the user in the PDF.

    2. Identify how you should organize that information to clearly return it to the user for viewing in table format.

    3. Only return fields strictly requested by the user.

    4. Output your response in as structured a manner as possible. Do not use paragraphs but use bullets organized by category.

    5. Make your output very concise and precise. Try to summarize in a way that would be easily input ot a table if the user wanted to.

    

    Rules:

    - Only include data actually present in the OCR text, however if you can reasonably infer a use value include it. And if it appears a single use is describing multiple floors, extrapolate but note it.

    - Do NOT invent additional data.

    - Provide a "notes" section to contain model concerns such as: inconsistent numbers, ambiguous use text, missing columns, or anything suspicious in the OCR. make notes feild concise. Include whether or not an image was needed for assistance.

    - If the OCR text passed to you is unclear you have access to the image directly through your tool "get_pdf_page_image"

    If you decide that looking at the image would improve accuracy,

    you MUST call the function `get_pdf_page_image`.

    

    NEVER describe fetching or retrieving the image in plain text.

    NEVER state that you will call the tool. ONLY call the tool directly.



    Respond in **Markdown**

    Avoid using characters that trigger markdown formatting in responses.

    Specifically:

    - Do NOT use underscores (_)

    - Do NOT use asterisks (*)

    - Do NOT use tildes (~)

    - Do NOT use backticks (`)

    - Do NOT use double characters like ** __ ~~ **

    - Do NOT attempt bold, italics, strikethrough, or inline code formatting

    - Respond in plain text only, with no markdown formatting

    """
TOOLS = [
    {
        "type": "function",
        "function": {
            "name": "get_pdf_page_image",
            "description": '''This function MUST be called whenever visual inspection of the page is needed,

                                even slightly. You MUST NOT describe the image in words unless you have

                                called this tool and received the images from the user.''',
            "parameters": {
                "type": "object",
                "properties": {},
                "required": []
            }
        }
    }
]
def get_pdf_page_image(images):
    print("Encoding images as base64 image_url blocks...")

    blocks = []
    for img in images:
        # Encode PNG
        buf = BytesIO()
        img.save(buf, format="PNG")
        b64 = base64.b64encode(buf.getvalue()).decode("utf-8")

        # FULLY VALID message block
        blocks.append({
            "type": "image_url",
            "image_url": {
                "url": f"data:image/png;base64,{b64}",
                "detail": "high"
            }
        })

    return {"images": blocks}

def llm_extract_stream(raw_text: str, images, user_input):

    # Initial messages
    messages = [
        {"role": "system", "content": SYSTEM_PROMPT},
        {"role": "user", "content": f'''

         The user has requested the following information from this document: {user_input}

         

         If any part of the OCR appears unreliable, noisy, or uncertain,

         you MUST call the image tool instead of guessing.

         

         The OCR for this document is {raw_text}

         '''},
    ]

    # FIRST CALL β€” model decides if it needs images
    response = client.chat.completions.create(
        model="gpt-5",
        messages=messages,
        tools=TOOLS,
        tool_choice="auto"
    )

    msg = response.choices[0].message

    # If the model CALLS the tool
    if msg.tool_calls:
        tool_call = msg.tool_calls[0]

        if tool_call.function.name == "get_pdf_page_image":
            yield "πŸ“Έ Model requested image help."

            # TOOL EXECUTION β†’ returns file_ids
            tool_result = get_pdf_page_image(images)
            
            messages.append(msg)
            
            messages.append({
                "role": "tool",
                "tool_call_id": tool_call.id,
                "content": json.dumps({"status": "images will follow"})
            })

            # *** CRITICAL ***
            # Now ADD A USER MESSAGE with the ACTUAL image blocks
            messages.append({
                "role": "user",
                "content": [
                    {"type": "text", "text": "Here are the images you requested."}
                ] + tool_result["images"]
            })

            # SECOND CALL β€” final answer, NO further tool calls allowed
            stream_text = ""
            for chunk in client.chat.completions.create(
                model="gpt-5",
                messages=messages,
                tools=TOOLS,
                tool_choice="none",
                stream=True
            ):
                delta = chunk.choices[0].delta
                token = getattr(delta, "content", "") or ""
                stream_text += token
                yield stream_text   # REAL streaming

            return

    stream_text = ""
    for chunk in client.chat.completions.create(
        model="gpt-5",
        messages=messages,
        stream=True
    ):
        delta = chunk.choices[0].delta
        token = getattr(delta, "content", "") or ""
        stream_text += token
        yield stream_text
        
def ensure_max_resolution(img, max_dim=2000):
    w, h = img.size
    if max(w, h) > max_dim:
        scale = max_dim / max(w, h)
        return img.resize((int(w * scale), int(h * scale)), Image.LANCZOS)
    return img



def file_to_images(file_path, dpi=300):
    """

    Accepts either a PDF or an image file.

    Returns a list of PIL images.

    """

    # Case 1 β€” PDF
    if file_path.lower().endswith(".pdf"):
        images = []
        doc = fitz.open(file_path)

        for page in doc:
            pix = page.get_pixmap(dpi=dpi)
            img = Image.frombytes("RGB", [pix.width, pix.height], pix.samples)
            img = ensure_max_resolution(img)
            images.append(img)

        return images

    # Case 2 β€” Image
    img = Image.open(file_path).convert("RGB")
    return [img]

def extract_pages_from_file(file_path, dpi=300):
    pages = []
    pil_images = file_to_images(file_path, dpi=dpi)

    for idx, pil_img in enumerate(pil_images):
        text, conf = ocr_with_confidence_power(pil_img)
        pages.append({
            "page": idx,
            "text": text,
            "image": pil_img,
            "confidence": conf
        })

    return pages

def process_single_pdf_stream(pdf_path, user_input):
    yield "⏳ Running OCR..."
    # OCR + image extraction
    table_pages = extract_pages_from_file(pdf_path)
    raw_text = flatten_ocr_result(table_pages)
    print(raw_text)
    images = [p["image"] for p in table_pages]
    
    yield "⏳ Running LLM with tool-calling..."
    
    final_text = None

    for update in llm_extract_stream(raw_text, images, user_input):
        final_text = update
        yield update   # <-- streamed text

    # Done, nothing more to return
    return


#%%
PASSWORD = os.environ.get("PASSWORD")

def gradio_process(pdf_file, user_input):

    if pdf_file:
        pdf_path = pdf_file.name
        baseline_preview = None  # no need for persistent preview
        yield "⏳ Running OCR...", None , gr.update()
    else:
        pdf_path = 'example_image.jpg'

        # Generate preview manually
        baseline_preview = preview_pdf(pdf_path)

        # Update gallery BEFORE streaming OCR
        yield "⏳ Running OCR...", baseline_preview
    # STREAM THE STEPS
    for output in process_single_pdf_stream(pdf_path, user_input):

        if baseline_preview:
            # baseline mode β†’ preserve preview
            yield output, baseline_preview
        else:
            # upload mode β†’ preserve user preview
            yield output, gr.update()
        


def preview_pdf(pdf_file):
    if hasattr(pdf_file, "name"):
        pdf_path = pdf_file.name
    else:
        pdf_path = pdf_file
        
    doc = fitz.open(pdf_path)
    temp_dir = tempfile.mkdtemp()

    image_paths = []
    for i, page in enumerate(doc):
        pix = page.get_pixmap(dpi=150)
        out_path = os.path.join(temp_dir, f"page_{i+1}.png")
        pix.save(out_path)
        image_paths.append(out_path)

    return image_paths

def check_password(pw):
    if pw == PASSWORD:
        return (
            gr.update(visible=False),   # hide password section
            gr.update(visible=True),    # show main app
            ""
        )
    else:
        return (
            gr.update(visible=True),
            gr.update(visible=False),
            "❌ Incorrect password, try again."
        )

with gr.Blocks() as demo:
    with gr.Group(visible=True) as password_block:
        gr.Markdown("### πŸ”’ Enter password to access the chatbot")
        pw_box = gr.Textbox(type="password", placeholder="Enter password...", show_label=False)
        pw_btn = gr.Button("Unlock")
        pw_msg = gr.Markdown("")
    with gr.Group(visible=False) as main_app:
        gr.Markdown("""

            ## πŸ“„ Universal Document Interpreter  

            """)
    
        pdf_input = gr.File(
            label="Upload PDF or Image",
            file_types=[".pdf", ".png", ".jpg", ".jpeg"]
        )
    
        # User input triggers LLM extraction
        user_input = gr.Textbox(
            label="Instructions",
            placeholder="Type desired characteristics to extract..."
        )
        
        run_btn_slow = gr.Button("Run Extraction with Powerful OCR - Will be Slow")
    
        # PDF Preview
        gallery = gr.Gallery(
            label="Preview",
            columns=1,
            height="auto",
            object_fit="contain"
        )
    
        status_box = gr.Markdown()
    
        # PREVIEW ONLY on PDF upload
        pdf_input.upload(preview_pdf, pdf_input, gallery)
    
        # EXTRACTION: runs when user hits ENTER after typing in the box
        
        run_btn_slow.click(
            fn=gradio_process,
            inputs=[pdf_input, user_input],
            outputs=[status_box, gallery]
        )
    
    pw_box.submit(
        fn=check_password,
        inputs=[pw_box],
        outputs=[password_block, main_app, pw_msg],
    )
    pw_btn.click(
        fn=check_password,
        inputs=[pw_box],
        outputs=[password_block, main_app, pw_msg]
    )
   
demo.launch(inbrowser=True)