pdf_extractor / app.py
Biifruu's picture
Update app.py
da6ca5a verified
import gradio as gr
import fitz # PyMuPDF
from PIL import Image
import numpy as np
import cv2
import pytesseract
import base64
import os
import unicodedata
# NUEVO: Traducción
from transformers import pipeline
# Inicializa el pipeline de traducción EN->ES una sola vez
translator = pipeline("translation_en_to_es", model="Helsinki-NLP/opus-mt-en-es")
# ---------- OCR y limpieza de texto ----------
def clean_ocr_text(text):
text = unicodedata.normalize("NFC", text)
lines = text.splitlines()
cleaned_lines = [line.strip() for line in lines if line.strip()]
return "\n".join(cleaned_lines)
def translate_text(text):
"""
Traduce texto del inglés al español si está en inglés (siempre lo traduce para simplificar)
"""
# Para hacerlo robusto podrías agregar detección de idioma (langdetect),
# pero para este ejemplo traducimos siempre
if len(text.strip()) < 5:
return text
chunks = [text[i:i+500] for i in range(0, len(text), 500)]
translated = []
for chunk in chunks:
result = translator(chunk)
translated.append(result[0]["translation_text"])
return "\n".join(translated)
# ---------- Funciones de imagen ----------
def text_area_ratio(image):
np_img = np.array(image.convert("L"))
_, thresh = cv2.threshold(np_img, 150, 255, cv2.THRESH_BINARY_INV)
contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
text_area = sum(w * h for x, y, w, h in [cv2.boundingRect(c) for c in contours if 8 < cv2.boundingRect(c)[3] < 40 and 5 < cv2.boundingRect(c)[2] < 100])
total_area = np_img.shape[0] * np_img.shape[1]
return text_area / total_area if total_area > 0 else 0
def has_significant_text(image):
return text_area_ratio(image) > 0.25
def is_primarily_text(image, ocr_threshold=30):
if has_significant_text(image):
ocr_result = pytesseract.image_to_string(image, lang="eng+spa")
return len(ocr_result.strip()) > ocr_threshold
return False
def is_likely_photo(crop):
np_crop = np.array(crop)
gray = cv2.cvtColor(np_crop, cv2.COLOR_RGB2GRAY)
return np.std(gray) > 25 and len(np.unique(gray)) > 50
def extract_visual_regions(image):
np_img = np.array(image.convert("RGB"))
gray = cv2.cvtColor(np_img, cv2.COLOR_RGB2GRAY)
_, binary = cv2.threshold(gray, 220, 255, cv2.THRESH_BINARY_INV)
closed = cv2.morphologyEx(binary, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_RECT, (15, 15)))
num_labels, labels, stats, _ = cv2.connectedComponentsWithStats(closed, connectivity=8)
results = []
for i in range(1, num_labels):
x, y, w, h, area = stats[i]
if area > 2000 and 0.3 < (w / float(h)) < 3.5:
bbox = (x, y, x + w, y + h)
crop = image.crop(bbox)
if is_likely_photo(crop) and text_area_ratio(crop) < 0.25 and not is_primarily_text(crop):
results.append(crop)
return results
# ---------- Extracción de texto + imágenes ----------
def clean_bullet_line(text):
text = unicodedata.normalize("NFKC", text)
text = text.replace("e@", "-")
text = text.replace("@", "-")
text = text.replace("•", "-")
text = text.replace("*", "-")
text = text.replace("·", "-")
text = text.replace("–", "-")
text = " ".join(text.split())
return text
def extract_text_markdown(doc, image_paths, page_index, seen_xrefs):
markdown_output = f"\n## Página {page_index + 1}\n\n"
image_counter = 1
elements = []
page = doc[0]
blocks = page.get_text("dict")["blocks"]
for b in blocks:
y = b["bbox"][1]
if b["type"] == 0:
for line in b["lines"]:
line_y = line["bbox"][1]
line_text = " ".join([span["text"] for span in line["spans"]]).strip()
line_text = clean_bullet_line(line_text)
max_font_size = max([span.get("size", 10) for span in line["spans"]])
if line_text:
elements.append((line_y, line_text, max_font_size))
images_on_page = page.get_images(full=True)
for img_index, img in enumerate(images_on_page):
xref = img[0]
if xref in seen_xrefs:
continue
seen_xrefs.add(xref)
try:
base_image = page.parent.extract_image(xref)
image_bytes = base_image["image"]
ext = base_image["ext"]
image_path = f"/tmp/imagen_p{page_index + 1}_{img_index + 1}.{ext}"
with open(image_path, "wb") as f:
f.write(image_bytes)
image_paths.append(image_path)
elements.append((float("inf") - img_index, f"\n\n![imagen_{image_counter}]({image_path})\n", 10))
image_counter += 1
except Exception as e:
elements.append((float("inf"), f"[Error imagen: {e}]", 10))
elements.sort(key=lambda x: x[0])
previous_y = None
for y, text, font_size in elements:
is_header = font_size >= 14
if previous_y is not None and abs(y - previous_y) > 10:
markdown_output += "\n"
translated = translate_text(text.strip())
markdown_output += f"\n### {translated}\n" if is_header else translated + "\n"
previous_y = y
markdown_output += "\n---\n\n"
return markdown_output.strip()
# ---------- Función principal ----------
def convert(pdf_file):
temp_pdf_path = pdf_file.name
doc = fitz.open(temp_pdf_path)
markdown_output = ""
image_paths = []
seen_xrefs = set()
for page_num in range(len(doc)):
page = doc[page_num]
text = page.get_text("text").strip()
if len(text) > 30:
# Texto nativo del PDF
extracted = extract_text_markdown([page], image_paths, page_num, seen_xrefs)
markdown_output += extracted + "\n"
else:
# Página "escaneada" -> OCR
markdown_output += f"\n## Página {page_num + 1}\n\n"
pix = page.get_pixmap(dpi=300)
img = Image.frombytes("RGB", [pix.width, pix.height], pix.samples)
image_path = f"/tmp/ocr_page_{page_num + 1}.jpg"
img.save(image_path)
image_paths.append(image_path)
markdown_output += f"![imagen_pagina_{page_num + 1}]({image_path})\n"
try:
ocr_text = pytesseract.image_to_string(img, lang="eng+spa")
except pytesseract.TesseractError:
ocr_text = ""
ocr_text_clean = clean_ocr_text(ocr_text)
translated_ocr = translate_text(ocr_text_clean)
markdown_output += translated_ocr + "\n"
crops = extract_visual_regions(img)
for i, crop in enumerate(crops):
crop_path = f"/tmp/recorte_p{page_num + 1}_{i + 1}.jpg"
crop.save(crop_path)
image_paths.append(crop_path)
markdown_output += f"\n\n![imagen_detectada]({crop_path})\n"
markdown_output += "\n---\n\n"
markdown_path = "/tmp/resultado.md"
with open(markdown_path, "w", encoding="utf-8") as f:
f.write(markdown_output)
return markdown_output.strip(), image_paths, markdown_path
# ---------- Gradio Interface ----------
with gr.Blocks() as demo:
with gr.Row():
pdf_input = gr.File(label="Upload your PDF", type="filepath", file_types=[".pdf"])
submit_btn = gr.Button("Process PDF")
markdown_output = gr.Textbox(label="Generated Markdown", lines=25, interactive=True)
gallery_output = gr.Gallery(label="Extracted and Detected Images", type="file")
download_md = gr.File(label="Download Markdown File")
submit_btn.click(fn=convert, inputs=[pdf_input], outputs=[markdown_output, gallery_output, download_md])
demo.launch()