Biifruu commited on
Commit
bf18e86
·
verified ·
1 Parent(s): fabee1a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +169 -158
app.py CHANGED
@@ -1,197 +1,208 @@
1
- import os
2
- import unicodedata
3
- import fitz
4
- from PIL import Image
5
  import gradio as gr
 
 
6
  import numpy as np
7
  import cv2
8
- from dotenv import load_dotenv
9
- import easyocr
10
  import pytesseract
 
 
 
 
 
 
11
 
12
- load_dotenv()
 
13
 
14
- reader = easyocr.Reader(['es', 'en'])
15
 
16
- def clean_text(text):
17
  text = unicodedata.normalize("NFC", text)
18
  lines = text.splitlines()
19
  cleaned_lines = [line.strip() for line in lines if line.strip()]
20
  return "\n".join(cleaned_lines)
21
 
22
- def clean_ocr_lines(text):
23
- lines = text.splitlines()
24
- cleaned = []
25
- for line in lines:
26
- line = line.strip()
27
- if line:
28
- line = " ".join(line.split())
29
- cleaned.append(line)
30
- return "\n".join(cleaned)
31
-
32
- def preprocess_for_ocr(pil_image):
33
- gray = pil_image.convert('L')
34
- np_img = np.array(gray)
35
- try:
36
- from skimage.filters import threshold_sauvola
37
- window_size = 25
38
- thresh_sauvola = threshold_sauvola(np_img, window_size=window_size)
39
- binary = (np_img > thresh_sauvola).astype("uint8") * 255
40
- except:
41
- binary = cv2.adaptiveThreshold(np_img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
42
- cv2.THRESH_BINARY, 31, 10)
43
- return Image.fromarray(binary)
44
-
45
- def run_easyocr(image_path):
46
- img = Image.open(image_path)
47
- img = preprocess_for_ocr(img)
48
- img.save(image_path)
49
- results = reader.readtext(image_path, detail=0, paragraph=False, decoder='greedy')
50
- text = "\n".join(results)
51
- return clean_ocr_lines(text)
52
-
53
- def run_tesseract_ocr(pil_image):
54
- pil_image = preprocess_for_ocr(pil_image)
55
- config = '--oem 3 --psm 6 -l spa+eng'
56
- text = pytesseract.image_to_string(pil_image, config=config)
57
- return clean_ocr_lines(text)
58
-
59
- def extract_embedded_images(page, page_number, seen_xrefs):
60
- image_paths = []
61
- blocks = []
62
- for img_index, img in enumerate(page.get_images(full=True)):
63
- xref = img[0]
64
- if xref in seen_xrefs:
65
- continue
66
- seen_xrefs.add(xref)
67
- base_image = page.parent.extract_image(xref)
68
- image_bytes = base_image["image"]
69
- ext = base_image["ext"]
70
- image_path = f"/tmp/embedded_p{page_number + 1}_{img_index + 1}.{ext}"
71
- with open(image_path, "wb") as f:
72
- f.write(image_bytes)
73
- image_paths.append(image_path)
74
- blocks.append(f"![Imagen_Embedded]({image_path})\n")
75
- return blocks, image_paths
76
-
77
- def extract_visual_regions(image, page_number):
78
- results = []
79
  np_img = np.array(image.convert("RGB"))
80
  gray = cv2.cvtColor(np_img, cv2.COLOR_RGB2GRAY)
81
  _, binary = cv2.threshold(gray, 220, 255, cv2.THRESH_BINARY_INV)
82
  closed = cv2.morphologyEx(binary, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_RECT, (15, 15)))
83
- num_labels, labels, stats, _ = cv2.connectedComponentsWithStats(closed, connectivity=8)
84
 
 
 
85
  for i in range(1, num_labels):
86
  x, y, w, h, area = stats[i]
87
- if area > 5000 and h > 50 and w > 50 and 0.3 < (w / float(h)) < 3.5:
88
  bbox = (x, y, x + w, y + h)
89
  crop = image.crop(bbox)
90
- crop_path = f"/tmp/visual_crop_p{page_number + 1}_{i}.jpg"
91
- crop.save(crop_path)
92
- text_crop = run_tesseract_ocr(crop)
93
- word_count = len(text_crop.split())
94
- if 2 < word_count < 20:
95
- results.append(crop_path)
96
  return results
97
 
98
- def is_scanned_page(page):
99
- text = page.get_text("text")
100
- return not text or len(text.strip()) < 30
101
-
102
- def process_document(input_file):
103
- if not input_file:
104
- return None, "No file uploaded", None
105
-
106
- temp_path = input_file.name
107
- ext = os.path.splitext(temp_path)[-1].lower()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
108
  markdown_output = ""
109
- all_images = []
110
  seen_xrefs = set()
111
 
112
- if ext in [".png", ".jpg", ".jpeg"]:
113
- image = Image.open(temp_path)
114
- text = run_tesseract_ocr(image)
115
- markdown_output += f"## Resultado OCR\n\n{clean_text(text)}\n"
116
- return markdown_output, [], None
117
-
118
- doc = fitz.open(temp_path)
119
- for i, page in enumerate(doc):
120
- markdown_output += f"\n## Página {i + 1}\n\n"
121
- text_dict = page.get_text("dict")
122
- lines = []
123
- for block in text_dict["blocks"]:
124
- if "lines" in block:
125
- for l in block["lines"]:
126
- line_parts = [span["text"].strip() for span in l["spans"] if span["text"].strip()]
127
- if line_parts:
128
- lines.append(" ".join(line_parts))
129
- lines.append("")
130
- text = "\n".join(lines).strip()
131
-
132
- if not is_scanned_page(page):
133
- markdown_output += f"{clean_text(text)}\n"
134
  else:
 
 
135
  pix = page.get_pixmap(dpi=300)
136
  img = Image.frombytes("RGB", [pix.width, pix.height], pix.samples)
137
- image_path = f"/tmp/ocr_page_{i + 1}.jpg"
138
  img.save(image_path)
139
- all_images.append(image_path)
140
- markdown_output += f"![Pagina_Scaneada]({image_path})\n\n"
141
- ocr_text = run_tesseract_ocr(img)
142
- markdown_output += f"{clean_text(ocr_text)}\n"
143
- crops = extract_visual_regions(img, i)
144
- for crop_path in crops:
145
- all_images.append(crop_path)
146
- markdown_output += f"![Region_Detectada]({crop_path})\n"
147
-
148
- blocks, embedded_images = extract_embedded_images(page, i, seen_xrefs)
149
- for block in blocks:
150
- markdown_output += block
151
- all_images.extend(embedded_images)
152
- markdown_output += "\n---\n\n"
 
 
 
 
 
153
 
154
  markdown_path = "/tmp/resultado.md"
155
  with open(markdown_path, "w", encoding="utf-8") as f:
156
  f.write(markdown_output)
157
 
158
- return markdown_output.strip(), all_images, markdown_path
159
 
160
- # UI
161
 
162
- theme = gr.themes.Soft(primary_hue="indigo", secondary_hue="rose", neutral_hue="stone")
 
 
 
163
 
164
- with gr.Blocks(theme=theme) as demo:
165
- gr.Markdown("# OCR Preciso + Extracción Inteligente de Imágenes del PDF")
 
166
 
167
- with gr.Row():
168
- with gr.Column(scale=1):
169
- input_file = gr.File(label="Sube PDF o Imagen", file_types=[".pdf", ".png", ".jpg", ".jpeg"])
170
- run_button = gr.Button("Ejecutar OCR")
171
- with gr.Column(scale=2):
172
- markdown_output = gr.Textbox(
173
- label="Markdown Generado",
174
- lines=25,
175
- max_lines=1000,
176
- interactive=True,
177
- elem_id="markdown_scrollbox"
178
- )
179
- gallery_output = gr.Gallery(label="Imágenes Extraídas", type="file")
180
- download_md = gr.File(label="Descargar Markdown")
181
-
182
- run_button.click(
183
- fn=process_document,
184
- inputs=[input_file],
185
- outputs=[markdown_output, gallery_output, download_md]
186
- )
187
-
188
- demo.css = """
189
- #markdown_scrollbox textarea {
190
- overflow-y: auto !important;
191
- max-height: 600px;
192
- resize: vertical;
193
- font-family: monospace;
194
- }
195
- """
196
-
197
- demo.launch()
 
 
 
 
 
1
  import gradio as gr
2
+ import fitz # PyMuPDF
3
+ from PIL import Image
4
  import numpy as np
5
  import cv2
 
 
6
  import pytesseract
7
+ import base64
8
+ import os
9
+ import unicodedata
10
+
11
+ # NUEVO: Traducción
12
+ from transformers import pipeline
13
 
14
+ # Inicializa el pipeline de traducción EN->ES una sola vez
15
+ translator = pipeline("translation_en_to_es", model="Helsinki-NLP/opus-mt-en-es")
16
 
17
+ # ---------- OCR y limpieza de texto ----------
18
 
19
+ def clean_ocr_text(text):
20
  text = unicodedata.normalize("NFC", text)
21
  lines = text.splitlines()
22
  cleaned_lines = [line.strip() for line in lines if line.strip()]
23
  return "\n".join(cleaned_lines)
24
 
25
+ def translate_text(text):
26
+ """
27
+ Traduce texto del inglés al español si está en inglés (siempre lo traduce para simplificar)
28
+ """
29
+ # Para hacerlo robusto podrías agregar detección de idioma (langdetect),
30
+ # pero para este ejemplo traducimos siempre
31
+ if len(text.strip()) < 5:
32
+ return text
33
+ chunks = [text[i:i+500] for i in range(0, len(text), 500)]
34
+ translated = []
35
+ for chunk in chunks:
36
+ result = translator(chunk)
37
+ translated.append(result[0]["translation_text"])
38
+ return "\n".join(translated)
39
+
40
+ # ---------- Funciones de imagen ----------
41
+
42
+ def text_area_ratio(image):
43
+ np_img = np.array(image.convert("L"))
44
+ _, thresh = cv2.threshold(np_img, 150, 255, cv2.THRESH_BINARY_INV)
45
+ contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
46
+ text_area = sum(w * h for x, y, w, h in [cv2.boundingRect(c) for c in contours if 8 < cv2.boundingRect(c)[3] < 40 and 5 < cv2.boundingRect(c)[2] < 100])
47
+ total_area = np_img.shape[0] * np_img.shape[1]
48
+ return text_area / total_area if total_area > 0 else 0
49
+
50
+ def has_significant_text(image):
51
+ return text_area_ratio(image) > 0.25
52
+
53
+ def is_primarily_text(image, ocr_threshold=30):
54
+ if has_significant_text(image):
55
+ ocr_result = pytesseract.image_to_string(image, lang="eng+spa")
56
+ return len(ocr_result.strip()) > ocr_threshold
57
+ return False
58
+
59
+ def is_likely_photo(crop):
60
+ np_crop = np.array(crop)
61
+ gray = cv2.cvtColor(np_crop, cv2.COLOR_RGB2GRAY)
62
+ return np.std(gray) > 25 and len(np.unique(gray)) > 50
63
+
64
+ def extract_visual_regions(image):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
  np_img = np.array(image.convert("RGB"))
66
  gray = cv2.cvtColor(np_img, cv2.COLOR_RGB2GRAY)
67
  _, binary = cv2.threshold(gray, 220, 255, cv2.THRESH_BINARY_INV)
68
  closed = cv2.morphologyEx(binary, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_RECT, (15, 15)))
 
69
 
70
+ num_labels, labels, stats, _ = cv2.connectedComponentsWithStats(closed, connectivity=8)
71
+ results = []
72
  for i in range(1, num_labels):
73
  x, y, w, h, area = stats[i]
74
+ if area > 2000 and 0.3 < (w / float(h)) < 3.5:
75
  bbox = (x, y, x + w, y + h)
76
  crop = image.crop(bbox)
77
+ if is_likely_photo(crop) and text_area_ratio(crop) < 0.25 and not is_primarily_text(crop):
78
+ results.append(crop)
 
 
 
 
79
  return results
80
 
81
+ # ---------- Extracción de texto + imágenes ----------
82
+
83
+ def clean_bullet_line(text):
84
+ text = unicodedata.normalize("NFKC", text)
85
+ text = text.replace("e@", "-")
86
+ text = text.replace("@", "-")
87
+ text = text.replace("", "-")
88
+ text = text.replace("*", "-")
89
+ text = text.replace("·", "-")
90
+ text = text.replace("–", "-")
91
+ text = " ".join(text.split())
92
+ return text
93
+
94
+ def extract_text_markdown(doc, image_paths, page_index, seen_xrefs):
95
+ markdown_output = f"\n## Página {page_index + 1}\n\n"
96
+ image_counter = 1
97
+ elements = []
98
+ page = doc[0]
99
+ blocks = page.get_text("dict")["blocks"]
100
+
101
+ for b in blocks:
102
+ y = b["bbox"][1]
103
+ if b["type"] == 0:
104
+ for line in b["lines"]:
105
+ line_y = line["bbox"][1]
106
+ line_text = " ".join([span["text"] for span in line["spans"]]).strip()
107
+ line_text = clean_bullet_line(line_text)
108
+ max_font_size = max([span.get("size", 10) for span in line["spans"]])
109
+ if line_text:
110
+ elements.append((line_y, line_text, max_font_size))
111
+
112
+ images_on_page = page.get_images(full=True)
113
+ for img_index, img in enumerate(images_on_page):
114
+ xref = img[0]
115
+ if xref in seen_xrefs:
116
+ continue
117
+ seen_xrefs.add(xref)
118
+ try:
119
+ base_image = page.parent.extract_image(xref)
120
+ image_bytes = base_image["image"]
121
+ ext = base_image["ext"]
122
+ image_path = f"/tmp/imagen_p{page_index + 1}_{img_index + 1}.{ext}"
123
+ with open(image_path, "wb") as f:
124
+ f.write(image_bytes)
125
+ image_paths.append(image_path)
126
+ elements.append((float("inf") - img_index, f"\n\n![imagen_{image_counter}]({image_path})\n", 10))
127
+ image_counter += 1
128
+ except Exception as e:
129
+ elements.append((float("inf"), f"[Error imagen: {e}]", 10))
130
+
131
+ elements.sort(key=lambda x: x[0])
132
+ previous_y = None
133
+
134
+ for y, text, font_size in elements:
135
+ is_header = font_size >= 14
136
+ if previous_y is not None and abs(y - previous_y) > 10:
137
+ markdown_output += "\n"
138
+ translated = translate_text(text.strip())
139
+ markdown_output += f"\n### {translated}\n" if is_header else translated + "\n"
140
+ previous_y = y
141
+
142
+ markdown_output += "\n---\n\n"
143
+ return markdown_output.strip()
144
+
145
+ # ---------- Función principal ----------
146
+
147
+ def convert(pdf_file):
148
+ temp_pdf_path = pdf_file.name
149
+ doc = fitz.open(temp_pdf_path)
150
  markdown_output = ""
151
+ image_paths = []
152
  seen_xrefs = set()
153
 
154
+ for page_num in range(len(doc)):
155
+ page = doc[page_num]
156
+ text = page.get_text("text").strip()
157
+
158
+ if len(text) > 30:
159
+ # Texto nativo del PDF
160
+ extracted = extract_text_markdown([page], image_paths, page_num, seen_xrefs)
161
+ markdown_output += extracted + "\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
162
  else:
163
+ # Página "escaneada" -> OCR
164
+ markdown_output += f"\n## Página {page_num + 1}\n\n"
165
  pix = page.get_pixmap(dpi=300)
166
  img = Image.frombytes("RGB", [pix.width, pix.height], pix.samples)
167
+ image_path = f"/tmp/ocr_page_{page_num + 1}.jpg"
168
  img.save(image_path)
169
+ image_paths.append(image_path)
170
+ markdown_output += f"![imagen_pagina_{page_num + 1}]({image_path})\n"
171
+
172
+ try:
173
+ ocr_text = pytesseract.image_to_string(img, lang="eng+spa")
174
+ except pytesseract.TesseractError:
175
+ ocr_text = ""
176
+ ocr_text_clean = clean_ocr_text(ocr_text)
177
+ translated_ocr = translate_text(ocr_text_clean)
178
+ markdown_output += translated_ocr + "\n"
179
+
180
+ crops = extract_visual_regions(img)
181
+ for i, crop in enumerate(crops):
182
+ crop_path = f"/tmp/recorte_p{page_num + 1}_{i + 1}.jpg"
183
+ crop.save(crop_path)
184
+ image_paths.append(crop_path)
185
+ markdown_output += f"\n\n![imagen_detectada]({crop_path})\n"
186
+
187
+ markdown_output += "\n---\n\n"
188
 
189
  markdown_path = "/tmp/resultado.md"
190
  with open(markdown_path, "w", encoding="utf-8") as f:
191
  f.write(markdown_output)
192
 
193
+ return markdown_output.strip(), image_paths, markdown_path
194
 
195
+ # ---------- Gradio Interface ----------
196
 
197
+ with gr.Blocks() as demo:
198
+ with gr.Row():
199
+ pdf_input = gr.File(label="Upload your PDF", type="filepath", file_types=[".pdf"])
200
+ submit_btn = gr.Button("Process PDF")
201
 
202
+ markdown_output = gr.Textbox(label="Generated Markdown", lines=25, interactive=True)
203
+ gallery_output = gr.Gallery(label="Extracted and Detected Images", type="file")
204
+ download_md = gr.File(label="Download Markdown File")
205
 
206
+ submit_btn.click(fn=convert, inputs=[pdf_input], outputs=[markdown_output, gallery_output, download_md])
207
+
208
+ demo.launch()