Spaces:
Sleeping
Sleeping
Update web_scraper_tool.py
Browse files- web_scraper_tool.py +165 -172
web_scraper_tool.py
CHANGED
|
@@ -1,22 +1,36 @@
|
|
| 1 |
-
# web_scraper_tool.py
|
| 2 |
# -*- coding: utf-8 -*-
|
| 3 |
import requests
|
| 4 |
from bs4 import BeautifulSoup
|
| 5 |
-
from fpdf import FPDF, FPDFException
|
| 6 |
from urllib.parse import urlparse, urlunparse
|
| 7 |
import tempfile
|
| 8 |
import os
|
| 9 |
-
import re
|
|
|
|
|
|
|
|
|
|
| 10 |
|
| 11 |
class WebScrapperTool:
|
| 12 |
def __init__(self):
|
| 13 |
self.session = requests.Session()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
self.session.headers.update({
|
| 15 |
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
|
| 16 |
})
|
| 17 |
|
| 18 |
self.dejavu_regular_path = self._find_font_file('DejaVuSansCondensed.ttf')
|
| 19 |
-
self.dejavu_bold_path = self._find_font_file('DejaVuSansCondensed-Bold.ttf')
|
| 20 |
|
| 21 |
if not self.dejavu_regular_path:
|
| 22 |
print("Advertencia: No se encontró 'DejaVuSansCondensed.ttf'. Se usará Arial para el cuerpo de los PDFs (soporte Unicode limitado).")
|
|
@@ -24,9 +38,7 @@ class WebScrapperTool:
|
|
| 24 |
if self.dejavu_regular_path and not self.dejavu_bold_path:
|
| 25 |
print("Advertencia: No se encontró 'DejaVuSansCondensed-Bold.ttf'. Los títulos en PDF usarán Arial Bold o DejaVu Regular si Arial falla.")
|
| 26 |
|
| 27 |
-
|
| 28 |
def _find_font_file(self, font_filename: str):
|
| 29 |
-
"""Busca un archivo de fuente específico."""
|
| 30 |
if os.path.exists(font_filename):
|
| 31 |
return font_filename
|
| 32 |
if os.path.exists(os.path.join('fonts', font_filename)):
|
|
@@ -39,33 +51,23 @@ class WebScrapperTool:
|
|
| 39 |
|
| 40 |
scheme = parsed_url.scheme
|
| 41 |
if not scheme:
|
| 42 |
-
|
| 43 |
-
if parsed_url.netloc:
|
| 44 |
parsed_url = parsed_url._replace(scheme="https")
|
| 45 |
-
|
| 46 |
-
elif parsed_url.path and '.' in parsed_url.path.split('/')[0]:
|
| 47 |
path_parts = parsed_url.path.split('/')
|
| 48 |
potential_netloc = path_parts[0]
|
| 49 |
new_path = '/'.join(path_parts[1:])
|
| 50 |
parsed_url = parsed_url._replace(scheme="https", netloc=potential_netloc, path=new_path)
|
| 51 |
-
#
|
| 52 |
-
# Para este scraper, asumimos que se refiere a una URL y necesita un esquema.
|
| 53 |
-
else:
|
| 54 |
parsed_url = parsed_url._replace(scheme="https")
|
| 55 |
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
if '.' in parsed_url.path
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
new_path = '/' + '/'.join(path_parts[1:]) if len(path_parts) > 1 else ''
|
| 64 |
-
# Solo si el scheme no fue forzado a https y netloc sigue vacío.
|
| 65 |
-
# Esto es complicado, la lógica anterior de agregar https si no hay scheme debería cubrirlo.
|
| 66 |
-
# La línea siguiente es una verificación adicional.
|
| 67 |
-
if not parsed_url.scheme and not parsed_url.netloc:
|
| 68 |
-
parsed_url = parsed_url._replace(netloc=new_netloc, path=new_path, scheme="https")
|
| 69 |
|
| 70 |
|
| 71 |
return urlunparse(parsed_url)
|
|
@@ -75,104 +77,105 @@ class WebScrapperTool:
|
|
| 75 |
try:
|
| 76 |
parsed_url = urlparse(url)
|
| 77 |
return any(parsed_url.path.lower().endswith(ext) for ext in image_extensions)
|
| 78 |
-
except Exception:
|
| 79 |
return False
|
| 80 |
|
| 81 |
-
|
| 82 |
def _get_content(self, url: str):
|
| 83 |
try:
|
| 84 |
is_potential_image = self.is_image_url(url)
|
| 85 |
-
|
| 86 |
-
response.
|
|
|
|
| 87 |
|
| 88 |
content_type_header = response.headers.get('content-type', '').lower()
|
| 89 |
|
| 90 |
-
if 'image' in content_type_header or (is_potential_image and not content_type_header):
|
| 91 |
-
# Si el content_type es genérico pero la URL sugiere imagen (ej. octet-stream para un .jpg)
|
| 92 |
raw_content = response.content
|
| 93 |
-
return None, raw_content, content_type_header or "image/unknown"
|
| 94 |
|
| 95 |
-
# Para contenido no imagen, intentar decodificar.
|
| 96 |
text_content = None
|
| 97 |
try:
|
|
|
|
| 98 |
text_content = response.content.decode('utf-8')
|
| 99 |
except UnicodeDecodeError:
|
| 100 |
-
# Si UTF-8 falla, usar la codificación que requests
|
| 101 |
-
# o su heurística.
|
| 102 |
print(f"Advertencia: Falló la decodificación UTF-8 para {url}. Usando response.text (codificación aparente: {response.apparent_encoding}).")
|
| 103 |
-
text_content = response.text
|
| 104 |
|
| 105 |
return text_content, response.content, content_type_header
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 115 |
return None, None, f"Error inesperado obteniendo contenido: {str(e_generic)}"
|
| 116 |
|
| 117 |
-
|
| 118 |
def scrape_to_text(self, url: str):
|
| 119 |
text_content, _, content_type_info = self._get_content(url)
|
| 120 |
|
| 121 |
-
# Si _get_content devolvió un mensaje de error en content_type_info
|
| 122 |
if text_content is None and isinstance(content_type_info, str) and content_type_info.startswith("Error:"):
|
| 123 |
return {'status': 'error', 'message': content_type_info, 'url': url}
|
| 124 |
|
| 125 |
final_text = ""
|
| 126 |
-
if text_content:
|
| 127 |
-
|
|
|
|
| 128 |
soup = BeautifulSoup(text_content, 'html.parser')
|
| 129 |
-
# Eliminar elementos no deseados
|
| 130 |
for element in soup(["script", "style", "nav", "footer", "aside", "form", "button", "input", "header", "noscript", "iframe", "link", "meta"]):
|
| 131 |
-
element.decompose()
|
| 132 |
|
| 133 |
-
|
| 134 |
-
main_content_tags = ['main', 'article', 'div[role="main"]', 'div[class*="content"]', 'div[id*="content"]']
|
| 135 |
content_holder = None
|
| 136 |
for tag_selector in main_content_tags:
|
| 137 |
try:
|
| 138 |
-
|
| 139 |
-
if
|
|
|
|
| 140 |
break
|
| 141 |
-
except Exception:
|
| 142 |
-
pass
|
| 143 |
|
| 144 |
-
if not content_holder:
|
| 145 |
-
content_holder = soup.find('body')
|
| 146 |
|
| 147 |
-
if content_holder:
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
else: # Fallback si no se encuentra body o main (ej. fragmento HTML)
|
| 151 |
-
text_items = [s.strip() for s in soup.stripped_strings if s.strip()]
|
| 152 |
-
final_text = "\n".join(text_items)
|
| 153 |
|
| 154 |
-
elif 'text/plain' in
|
| 155 |
final_text = text_content
|
| 156 |
-
elif self.is_image_url(url) or ('image' in
|
| 157 |
return {'status': 'error', 'message': f"La URL apunta a una imagen. El formato TXT es para contenido textual. Intente el formato PDF para imágenes.", 'url': url}
|
| 158 |
-
else:
|
| 159 |
-
# Podríamos intentar extraer texto de JSON/XML aquí si fuera necesario
|
| 160 |
-
# Por ahora, simplemente tomamos el contenido como está si es texto.
|
| 161 |
final_text = text_content
|
| 162 |
-
else:
|
| 163 |
error_message = f"No se pudo obtener contenido textual de la URL (Tipo: {content_type_info})."
|
| 164 |
-
if isinstance(content_type_info, str) and content_type_info.startswith("Error:"):
|
| 165 |
error_message = content_type_info
|
| 166 |
return {'status': 'error', 'message': error_message, 'url': url}
|
| 167 |
|
| 168 |
-
|
| 169 |
if not final_text.strip():
|
| 170 |
return {'status': 'error', 'message': "No se encontró contenido textual extraíble o la página está vacía después de la limpieza.", 'url': url}
|
| 171 |
|
| 172 |
try:
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
safe_filename_prefix =
|
|
|
|
| 176 |
|
| 177 |
with tempfile.NamedTemporaryFile(delete=False, mode='w', suffix='.txt', encoding='utf-8', prefix=f"scraped_{safe_filename_prefix}_") as tmp_file:
|
| 178 |
tmp_file.write(f"URL: {url}\n\n--- Contenido ---\n\n{final_text}")
|
|
@@ -184,33 +187,39 @@ class WebScrapperTool:
|
|
| 184 |
def scrape_to_pdf(self, url: str):
|
| 185 |
text_content, raw_content, content_type_info = self._get_content(url)
|
| 186 |
|
| 187 |
-
if text_content is None and raw_content is None:
|
| 188 |
-
|
| 189 |
-
|
|
|
|
|
|
|
|
|
|
| 190 |
|
| 191 |
-
is_likely_image = 'image' in content_type_info or (self.is_image_url(url) and 'octet-stream' in content_type_info)
|
| 192 |
|
| 193 |
if is_likely_image and raw_content:
|
| 194 |
-
tmp_img_path = None
|
| 195 |
try:
|
| 196 |
pdf = FPDF()
|
| 197 |
pdf.add_page()
|
| 198 |
|
| 199 |
-
|
| 200 |
-
if
|
| 201 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 202 |
valid_img_suffixes = ['.jpeg', '.jpg', '.png']
|
| 203 |
if img_suffix not in valid_img_suffixes:
|
| 204 |
-
if 'png' in
|
| 205 |
-
elif 'jpeg' in
|
| 206 |
-
else: img_suffix = '.jpg'
|
| 207 |
|
| 208 |
with tempfile.NamedTemporaryFile(delete=False, suffix=img_suffix) as tmp_img:
|
| 209 |
tmp_img.write(raw_content)
|
| 210 |
tmp_img_path = tmp_img.name
|
| 211 |
|
| 212 |
page_width = pdf.w - 2 * pdf.l_margin
|
| 213 |
-
# Intentar añadir imagen. Si falla por formato, FPDFException se captura abajo.
|
| 214 |
pdf.image(tmp_img_path, x=pdf.l_margin, y=pdf.t_margin, w=page_width)
|
| 215 |
|
| 216 |
with tempfile.NamedTemporaryFile(delete=False, mode='wb', suffix='.pdf') as tmp_file:
|
|
@@ -218,8 +227,7 @@ class WebScrapperTool:
|
|
| 218 |
tmp_file.write(pdf_bytes)
|
| 219 |
filepath = tmp_file.name
|
| 220 |
return {'status': 'success', 'file': filepath, 'url': url}
|
| 221 |
-
|
| 222 |
-
except FPDFException as fpdf_e: # Errores específicos de FPDF (ej. formato de imagen no soportado)
|
| 223 |
return {'status': 'error', 'message': f"Error de FPDF al procesar imagen (formato {img_suffix} podría no ser compatible o imagen corrupta): {str(fpdf_e)}", 'url': url}
|
| 224 |
except Exception as e_img:
|
| 225 |
import traceback
|
|
@@ -227,39 +235,35 @@ class WebScrapperTool:
|
|
| 227 |
finally:
|
| 228 |
if tmp_img_path and os.path.exists(tmp_img_path):
|
| 229 |
os.unlink(tmp_img_path)
|
| 230 |
-
|
| 231 |
-
# Procesamiento de texto para PDF
|
| 232 |
extracted_text_for_pdf = ""
|
| 233 |
-
if text_content:
|
| 234 |
-
if 'text/html' in
|
| 235 |
soup = BeautifulSoup(text_content, 'html.parser')
|
| 236 |
for element in soup(["script", "style", "nav", "footer", "aside", "form", "button", "input", "header", "noscript", "iframe", "link", "meta"]):
|
| 237 |
-
element.decompose()
|
| 238 |
|
| 239 |
-
main_content_tags = ['main', 'article', 'div[role="main"]', 'div[class*="content"]', 'div[id*="content"]']
|
| 240 |
content_holder = None
|
| 241 |
for tag_selector in main_content_tags:
|
| 242 |
try:
|
| 243 |
-
|
| 244 |
-
if
|
|
|
|
| 245 |
break
|
| 246 |
-
except Exception:
|
| 247 |
-
|
| 248 |
-
|
| 249 |
-
|
| 250 |
-
|
| 251 |
-
|
| 252 |
-
text_items = [s.strip() for s in content_holder.stripped_strings if s.strip()]
|
| 253 |
-
extracted_text_for_pdf = "\n".join(text_items)
|
| 254 |
-
else:
|
| 255 |
-
extracted_text_for_pdf = "\n".join([s.strip() for s in soup.stripped_strings if s.strip()])
|
| 256 |
|
| 257 |
-
elif 'text/plain' in
|
| 258 |
extracted_text_for_pdf = text_content
|
| 259 |
-
else:
|
| 260 |
extracted_text_for_pdf = text_content
|
| 261 |
-
else:
|
| 262 |
-
error_message =
|
| 263 |
return {'status': 'error', 'message': error_message, 'url': url}
|
| 264 |
|
| 265 |
if not extracted_text_for_pdf.strip():
|
|
@@ -270,98 +274,87 @@ class WebScrapperTool:
|
|
| 270 |
pdf.add_page()
|
| 271 |
pdf.set_auto_page_break(auto=True, margin=15)
|
| 272 |
|
| 273 |
-
# Preparar fuentes
|
| 274 |
title_font_family = 'Arial'
|
| 275 |
title_font_style = 'B'
|
| 276 |
body_font_family = 'Arial'
|
| 277 |
body_font_style = ''
|
| 278 |
|
|
|
|
| 279 |
if self.dejavu_regular_path:
|
| 280 |
-
|
| 281 |
-
|
| 282 |
-
|
| 283 |
-
|
| 284 |
-
|
| 285 |
-
|
| 286 |
-
|
| 287 |
-
|
| 288 |
-
|
| 289 |
-
|
| 290 |
-
|
| 291 |
-
|
| 292 |
-
|
| 293 |
-
|
| 294 |
-
pdf.set_font('Arial', 'B', 12)
|
| 295 |
-
else:
|
| 296 |
-
# Usar la combinación de DejaVu (Regular o Bold) o Arial Bold si DejaVu no está
|
| 297 |
-
pdf.set_font(title_font_family, title_font_style, 12)
|
| 298 |
|
| 299 |
-
|
| 300 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 301 |
clean_url_for_pdf = "".join(c for c in url if c.isprintable() or c in ('\n', '\r', '\t'))
|
| 302 |
try:
|
| 303 |
-
pdf.multi_cell(0, 8, f"Contenido de: {clean_url_for_pdf}")
|
| 304 |
except FPDFException as e_url_font:
|
| 305 |
-
|
| 306 |
-
|
| 307 |
-
pdf.set_font('Arial', 'B', 12) # Reintentar con Arial seguro
|
| 308 |
pdf.multi_cell(0, 8, f"Contenido de URL (ver metadatos)")
|
|
|
|
| 309 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 310 |
|
| 311 |
-
pdf.ln(6)
|
| 312 |
|
| 313 |
-
# Configurar fuente del cuerpo
|
| 314 |
-
pdf.set_font(body_font_family, body_font_style, 11)
|
| 315 |
-
|
| 316 |
-
# Limpieza básica de caracteres problemáticos comunes
|
| 317 |
clean_text = extracted_text_for_pdf.replace('\u2013', '-').replace('\u2014', '--')
|
| 318 |
clean_text = clean_text.replace('\u2018', "'").replace('\u2019', "'")
|
| 319 |
clean_text = clean_text.replace('\u201c', '"').replace('\u201d', '"')
|
| 320 |
-
clean_text = clean_text.replace('\u2026', '...')
|
| 321 |
-
clean_text = clean_text.replace('\u00A0', ' ') # Non-breaking space
|
| 322 |
|
| 323 |
-
# Asegurar que solo caracteres imprimibles o saltos de línea/tabs se pasen a FPDF
|
| 324 |
-
# Esto es crucial para evitar errores FPDFException "character not in font"
|
| 325 |
printable_text = "".join(c for c in clean_text if c.isprintable() or c in ('\n', '\r', '\t'))
|
| 326 |
|
| 327 |
paragraphs = printable_text.split('\n')
|
| 328 |
for para_idx, para in enumerate(paragraphs):
|
| 329 |
if para.strip():
|
| 330 |
try:
|
| 331 |
-
pdf.multi_cell(0, 7, para)
|
| 332 |
-
pdf.ln(2)
|
| 333 |
except FPDFException as e_font_char:
|
| 334 |
-
|
| 335 |
-
|
| 336 |
-
print(f"Advertencia: Carácter no soportado en PDF en párrafo {para_idx+1} (font: {body_font_family}). Caracteres problemáticos (hex): {problem_char_hex}. Párrafo omitido/reemplazado.")
|
| 337 |
-
# Opción 1: Omitir párrafo problemático
|
| 338 |
-
# Opción 2: Intentar reemplazar caracteres no soportados y reintentar (más complejo)
|
| 339 |
-
# Opción 3: Escribir un placeholder
|
| 340 |
try:
|
| 341 |
-
|
| 342 |
-
|
|
|
|
|
|
|
| 343 |
pdf.ln(2)
|
| 344 |
-
pdf.set_font(
|
| 345 |
-
except:
|
| 346 |
-
pass # Simplemente omitir
|
| 347 |
else:
|
| 348 |
-
pdf.ln(5)
|
| 349 |
-
|
| 350 |
|
| 351 |
with tempfile.NamedTemporaryFile(delete=False, mode='wb', suffix='.pdf') as tmp_file:
|
| 352 |
pdf_output_bytes = pdf.output(dest='S')
|
| 353 |
tmp_file.write(pdf_output_bytes)
|
| 354 |
filepath = tmp_file.name
|
| 355 |
return {'status': 'success', 'file': filepath, 'url': url}
|
| 356 |
-
except FPDFException as e_fpdf_text:
|
| 357 |
import traceback
|
| 358 |
-
|
| 359 |
-
|
| 360 |
-
if len(error_message) > 500: error_message = error_message[:497] + "..."
|
| 361 |
-
return {'status': 'error', 'message': error_message, 'url': url}
|
| 362 |
-
except Exception as e: # Otros errores generales
|
| 363 |
import traceback
|
| 364 |
-
|
| 365 |
-
error_message = f"Error general al generar PDF de texto: {str(e)}\nDetalles: {tb_str}"
|
| 366 |
-
if len(error_message) > 500: error_message = error_message[:497] + "..."
|
| 367 |
-
return {'status': 'error', 'message': error_message, 'url': url}
|
|
|
|
|
|
|
| 1 |
# -*- coding: utf-8 -*-
|
| 2 |
import requests
|
| 3 |
from bs4 import BeautifulSoup
|
| 4 |
+
from fpdf import FPDF, FPDFException
|
| 5 |
from urllib.parse import urlparse, urlunparse
|
| 6 |
import tempfile
|
| 7 |
import os
|
| 8 |
+
import re
|
| 9 |
+
from requests.adapters import HTTPAdapter
|
| 10 |
+
# from requests.packages.urllib3.util.retry import Retry # Para versiones más antiguas de requests
|
| 11 |
+
from urllib3.util.retry import Retry # Para requests >= 2.26 o si urllib3 está instalado globalmente
|
| 12 |
|
| 13 |
class WebScrapperTool:
|
| 14 |
def __init__(self):
|
| 15 |
self.session = requests.Session()
|
| 16 |
+
|
| 17 |
+
# Configurar estrategia de reintentos
|
| 18 |
+
retry_strategy = Retry(
|
| 19 |
+
total=3, # Número total de reintentos
|
| 20 |
+
backoff_factor=1, # Factor de espera (ej. 1s, 2s, 4s entre reintentos)
|
| 21 |
+
status_forcelist=[429, 500, 502, 503, 504], # Códigos HTTP que dispararán un reintento
|
| 22 |
+
allowed_methods=["HEAD", "GET", "OPTIONS"] # Métodos HTTP para los que se aplicarán reintentos
|
| 23 |
+
)
|
| 24 |
+
adapter = HTTPAdapter(max_retries=retry_strategy)
|
| 25 |
+
self.session.mount("http://", adapter)
|
| 26 |
+
self.session.mount("https://", adapter)
|
| 27 |
+
|
| 28 |
self.session.headers.update({
|
| 29 |
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
|
| 30 |
})
|
| 31 |
|
| 32 |
self.dejavu_regular_path = self._find_font_file('DejaVuSansCondensed.ttf')
|
| 33 |
+
self.dejavu_bold_path = self._find_font_file('DejaVuSansCondensed-Bold.ttf')
|
| 34 |
|
| 35 |
if not self.dejavu_regular_path:
|
| 36 |
print("Advertencia: No se encontró 'DejaVuSansCondensed.ttf'. Se usará Arial para el cuerpo de los PDFs (soporte Unicode limitado).")
|
|
|
|
| 38 |
if self.dejavu_regular_path and not self.dejavu_bold_path:
|
| 39 |
print("Advertencia: No se encontró 'DejaVuSansCondensed-Bold.ttf'. Los títulos en PDF usarán Arial Bold o DejaVu Regular si Arial falla.")
|
| 40 |
|
|
|
|
| 41 |
def _find_font_file(self, font_filename: str):
|
|
|
|
| 42 |
if os.path.exists(font_filename):
|
| 43 |
return font_filename
|
| 44 |
if os.path.exists(os.path.join('fonts', font_filename)):
|
|
|
|
| 51 |
|
| 52 |
scheme = parsed_url.scheme
|
| 53 |
if not scheme:
|
| 54 |
+
if parsed_url.netloc: # ej. www.google.com/page
|
|
|
|
| 55 |
parsed_url = parsed_url._replace(scheme="https")
|
| 56 |
+
elif parsed_url.path and '.' in parsed_url.path.split('/')[0]: # ej. google.com/page
|
|
|
|
| 57 |
path_parts = parsed_url.path.split('/')
|
| 58 |
potential_netloc = path_parts[0]
|
| 59 |
new_path = '/'.join(path_parts[1:])
|
| 60 |
parsed_url = parsed_url._replace(scheme="https", netloc=potential_netloc, path=new_path)
|
| 61 |
+
else: # ej. page.html or /page.html
|
|
|
|
|
|
|
| 62 |
parsed_url = parsed_url._replace(scheme="https")
|
| 63 |
|
| 64 |
+
if not parsed_url.netloc and parsed_url.path and not parsed_url.path.startswith('/'):
|
| 65 |
+
# Caso como "google.com" que termina en path sin netloc si no hubo "www."
|
| 66 |
+
if '.' in parsed_url.path and '/' not in parsed_url.path: # "google.com"
|
| 67 |
+
parsed_url = parsed_url._replace(netloc=parsed_url.path, path='')
|
| 68 |
+
elif '.' in parsed_url.path.split('/')[0]: # "google.com/path"
|
| 69 |
+
parts = parsed_url.path.split('/', 1)
|
| 70 |
+
parsed_url = parsed_url._replace(netloc=parts[0], path=f"/{parts[1]}" if len(parts) > 1 else '')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 71 |
|
| 72 |
|
| 73 |
return urlunparse(parsed_url)
|
|
|
|
| 77 |
try:
|
| 78 |
parsed_url = urlparse(url)
|
| 79 |
return any(parsed_url.path.lower().endswith(ext) for ext in image_extensions)
|
| 80 |
+
except Exception:
|
| 81 |
return False
|
| 82 |
|
|
|
|
| 83 |
def _get_content(self, url: str):
|
| 84 |
try:
|
| 85 |
is_potential_image = self.is_image_url(url)
|
| 86 |
+
# Timeouts: (connect_timeout, read_timeout) en segundos. Aplicado a cada intento.
|
| 87 |
+
response = self.session.get(url, timeout=(15, 30), allow_redirects=True, stream=is_potential_image)
|
| 88 |
+
response.raise_for_status() # Lanza HTTPError para códigos 4xx/5xx después de reintentos (si aplica)
|
| 89 |
|
| 90 |
content_type_header = response.headers.get('content-type', '').lower()
|
| 91 |
|
| 92 |
+
if 'image' in content_type_header or (is_potential_image and not content_type_header.startswith('text/')):
|
|
|
|
| 93 |
raw_content = response.content
|
| 94 |
+
return None, raw_content, content_type_header or "image/unknown"
|
| 95 |
|
|
|
|
| 96 |
text_content = None
|
| 97 |
try:
|
| 98 |
+
# Intentar decodificar como UTF-8 primero
|
| 99 |
text_content = response.content.decode('utf-8')
|
| 100 |
except UnicodeDecodeError:
|
| 101 |
+
# Si UTF-8 falla, usar la codificación que 'requests' infiere (almacenada en response.text)
|
|
|
|
| 102 |
print(f"Advertencia: Falló la decodificación UTF-8 para {url}. Usando response.text (codificación aparente: {response.apparent_encoding}).")
|
| 103 |
+
text_content = response.text # response.text usa la codificación detectada por requests
|
| 104 |
|
| 105 |
return text_content, response.content, content_type_header
|
| 106 |
+
|
| 107 |
+
except requests.exceptions.ConnectTimeout as e:
|
| 108 |
+
return None, None, f"Error: Timeout de conexión al acceder a {url}. El servidor no respondió a la solicitud de conexión a tiempo (después de reintentos). (Detalle: {str(e)})"
|
| 109 |
+
except requests.exceptions.ReadTimeout as e:
|
| 110 |
+
return None, None, f"Error: Timeout de lectura al acceder a {url}. El servidor conectó pero tardó demasiado en enviar datos (después de reintentos). (Detalle: {str(e)})"
|
| 111 |
+
except requests.exceptions.Timeout as e: # Captura otros Timeouts (si los hay) que no sean Connect o Read.
|
| 112 |
+
return None, None, f"Error: Timeout general al intentar acceder a la URL: {url} (después de reintentos). (Detalle: {str(e)})"
|
| 113 |
+
except requests.exceptions.HTTPError as e: # Errores HTTP como 403, 404, 500 (si no se reintentaron o fallaron tras reintentos)
|
| 114 |
+
return None, None, f"Error HTTP {e.response.status_code} ({e.response.reason}) para la URL: {url}. (Detalle: {str(e)})"
|
| 115 |
+
except requests.exceptions.TooManyRedirects as e:
|
| 116 |
+
return None, None, f"Error: Demasiados redirects para la URL: {url}. (Detalle: {str(e)})"
|
| 117 |
+
except requests.exceptions.SSLError as e:
|
| 118 |
+
return None, None, f"Error: Problema de SSL con la URL: {url}. (Detalle: {str(e)})"
|
| 119 |
+
except requests.exceptions.ConnectionError as e: # Cubre otros problemas de conexión (DNS, etc.)
|
| 120 |
+
return None, None, f"Error de conexión al intentar acceder a {url}. (Detalle: {str(e)})"
|
| 121 |
+
except requests.exceptions.RequestException as e: # Captura base para otros errores de requests no cubiertos
|
| 122 |
+
return None, None, f"Error de red/petición: {str(e)}"
|
| 123 |
+
except Exception as e_generic:
|
| 124 |
+
import traceback
|
| 125 |
+
tb_str = traceback.format_exc()
|
| 126 |
+
print(f"Error inesperado en _get_content para URL {url}: {str(e_generic)}\n{tb_str}")
|
| 127 |
return None, None, f"Error inesperado obteniendo contenido: {str(e_generic)}"
|
| 128 |
|
|
|
|
| 129 |
def scrape_to_text(self, url: str):
|
| 130 |
text_content, _, content_type_info = self._get_content(url)
|
| 131 |
|
|
|
|
| 132 |
if text_content is None and isinstance(content_type_info, str) and content_type_info.startswith("Error:"):
|
| 133 |
return {'status': 'error', 'message': content_type_info, 'url': url}
|
| 134 |
|
| 135 |
final_text = ""
|
| 136 |
+
if text_content:
|
| 137 |
+
content_type_str = str(content_type_info) # Asegurar que es string
|
| 138 |
+
if 'text/html' in content_type_str:
|
| 139 |
soup = BeautifulSoup(text_content, 'html.parser')
|
|
|
|
| 140 |
for element in soup(["script", "style", "nav", "footer", "aside", "form", "button", "input", "header", "noscript", "iframe", "link", "meta"]):
|
| 141 |
+
if element: element.decompose()
|
| 142 |
|
| 143 |
+
main_content_tags = ['main', 'article', 'div[role="main"]', 'div[class*="content"]', 'div[id*="content"]', 'section[class*="content"]']
|
|
|
|
| 144 |
content_holder = None
|
| 145 |
for tag_selector in main_content_tags:
|
| 146 |
try:
|
| 147 |
+
candidate = soup.select_one(tag_selector)
|
| 148 |
+
if candidate:
|
| 149 |
+
content_holder = candidate
|
| 150 |
break
|
| 151 |
+
except Exception: pass
|
|
|
|
| 152 |
|
| 153 |
+
if not content_holder: content_holder = soup.find('body')
|
|
|
|
| 154 |
|
| 155 |
+
if content_holder: text_items = [s.strip() for s in content_holder.stripped_strings if s.strip()]
|
| 156 |
+
else: text_items = [s.strip() for s in soup.stripped_strings if s.strip()]
|
| 157 |
+
final_text = "\n".join(text_items)
|
|
|
|
|
|
|
|
|
|
| 158 |
|
| 159 |
+
elif 'text/plain' in content_type_str:
|
| 160 |
final_text = text_content
|
| 161 |
+
elif self.is_image_url(url) or ('image' in content_type_str):
|
| 162 |
return {'status': 'error', 'message': f"La URL apunta a una imagen. El formato TXT es para contenido textual. Intente el formato PDF para imágenes.", 'url': url}
|
| 163 |
+
else:
|
|
|
|
|
|
|
| 164 |
final_text = text_content
|
| 165 |
+
else:
|
| 166 |
error_message = f"No se pudo obtener contenido textual de la URL (Tipo: {content_type_info})."
|
| 167 |
+
if isinstance(content_type_info, str) and content_type_info.startswith("Error:"):
|
| 168 |
error_message = content_type_info
|
| 169 |
return {'status': 'error', 'message': error_message, 'url': url}
|
| 170 |
|
|
|
|
| 171 |
if not final_text.strip():
|
| 172 |
return {'status': 'error', 'message': "No se encontró contenido textual extraíble o la página está vacía después de la limpieza.", 'url': url}
|
| 173 |
|
| 174 |
try:
|
| 175 |
+
parsed_url_obj = urlparse(url)
|
| 176 |
+
safe_filename_base = (parsed_url_obj.netloc + parsed_url_obj.path).replace('/', '_').replace(':', '_')
|
| 177 |
+
safe_filename_prefix = re.sub(r'[^a-zA-Z0-9_-]', '', safe_filename_base)
|
| 178 |
+
safe_filename_prefix = safe_filename_prefix[:50]
|
| 179 |
|
| 180 |
with tempfile.NamedTemporaryFile(delete=False, mode='w', suffix='.txt', encoding='utf-8', prefix=f"scraped_{safe_filename_prefix}_") as tmp_file:
|
| 181 |
tmp_file.write(f"URL: {url}\n\n--- Contenido ---\n\n{final_text}")
|
|
|
|
| 187 |
def scrape_to_pdf(self, url: str):
|
| 188 |
text_content, raw_content, content_type_info = self._get_content(url)
|
| 189 |
|
| 190 |
+
if text_content is None and raw_content is None:
|
| 191 |
+
return {'status': 'error', 'message': str(content_type_info), 'url': url}
|
| 192 |
+
|
| 193 |
+
content_type_str = str(content_type_info) # Asegurar que es string
|
| 194 |
+
is_likely_image = 'image' in content_type_str or \
|
| 195 |
+
(self.is_image_url(url) and ('octet-stream' in content_type_str or not content_type_str or content_type_str == "application/unknown"))
|
| 196 |
|
|
|
|
| 197 |
|
| 198 |
if is_likely_image and raw_content:
|
| 199 |
+
tmp_img_path = None
|
| 200 |
try:
|
| 201 |
pdf = FPDF()
|
| 202 |
pdf.add_page()
|
| 203 |
|
| 204 |
+
img_ext_from_content_type = content_type_str.split('/')[-1].split(';')[0].strip()
|
| 205 |
+
if img_ext_from_content_type in ["unknown", "octet-stream"] or not img_ext_from_content_type: # Check for generic or empty
|
| 206 |
+
parsed_url_path = urlparse(url).path
|
| 207 |
+
img_ext_from_url = os.path.splitext(parsed_url_path)[1].lower()
|
| 208 |
+
img_suffix = img_ext_from_url if img_ext_from_url else '.jpg' # Fallback
|
| 209 |
+
else:
|
| 210 |
+
img_suffix = '.' + img_ext_from_content_type
|
| 211 |
+
|
| 212 |
valid_img_suffixes = ['.jpeg', '.jpg', '.png']
|
| 213 |
if img_suffix not in valid_img_suffixes:
|
| 214 |
+
if 'png' in content_type_str or img_suffix == '.png': img_suffix = '.png'
|
| 215 |
+
elif 'jpeg' in content_type_str or 'jpg' in content_type_str or img_suffix == '.jpg' or img_suffix == '.jpeg': img_suffix = '.jpg'
|
| 216 |
+
else: img_suffix = '.jpg'
|
| 217 |
|
| 218 |
with tempfile.NamedTemporaryFile(delete=False, suffix=img_suffix) as tmp_img:
|
| 219 |
tmp_img.write(raw_content)
|
| 220 |
tmp_img_path = tmp_img.name
|
| 221 |
|
| 222 |
page_width = pdf.w - 2 * pdf.l_margin
|
|
|
|
| 223 |
pdf.image(tmp_img_path, x=pdf.l_margin, y=pdf.t_margin, w=page_width)
|
| 224 |
|
| 225 |
with tempfile.NamedTemporaryFile(delete=False, mode='wb', suffix='.pdf') as tmp_file:
|
|
|
|
| 227 |
tmp_file.write(pdf_bytes)
|
| 228 |
filepath = tmp_file.name
|
| 229 |
return {'status': 'success', 'file': filepath, 'url': url}
|
| 230 |
+
except FPDFException as fpdf_e:
|
|
|
|
| 231 |
return {'status': 'error', 'message': f"Error de FPDF al procesar imagen (formato {img_suffix} podría no ser compatible o imagen corrupta): {str(fpdf_e)}", 'url': url}
|
| 232 |
except Exception as e_img:
|
| 233 |
import traceback
|
|
|
|
| 235 |
finally:
|
| 236 |
if tmp_img_path and os.path.exists(tmp_img_path):
|
| 237 |
os.unlink(tmp_img_path)
|
| 238 |
+
|
|
|
|
| 239 |
extracted_text_for_pdf = ""
|
| 240 |
+
if text_content:
|
| 241 |
+
if 'text/html' in content_type_str:
|
| 242 |
soup = BeautifulSoup(text_content, 'html.parser')
|
| 243 |
for element in soup(["script", "style", "nav", "footer", "aside", "form", "button", "input", "header", "noscript", "iframe", "link", "meta"]):
|
| 244 |
+
if element: element.decompose()
|
| 245 |
|
| 246 |
+
main_content_tags = ['main', 'article', 'div[role="main"]', 'div[class*="content"]', 'div[id*="content"]', 'section[class*="content"]']
|
| 247 |
content_holder = None
|
| 248 |
for tag_selector in main_content_tags:
|
| 249 |
try:
|
| 250 |
+
candidate = soup.select_one(tag_selector)
|
| 251 |
+
if candidate:
|
| 252 |
+
content_holder = candidate
|
| 253 |
break
|
| 254 |
+
except Exception: pass
|
| 255 |
+
if not content_holder: content_holder = soup.find('body')
|
| 256 |
+
|
| 257 |
+
if content_holder: text_items = [s.strip() for s in content_holder.stripped_strings if s.strip()]
|
| 258 |
+
else: text_items = [s.strip() for s in soup.stripped_strings if s.strip()]
|
| 259 |
+
extracted_text_for_pdf = "\n".join(text_items)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 260 |
|
| 261 |
+
elif 'text/plain' in content_type_str:
|
| 262 |
extracted_text_for_pdf = text_content
|
| 263 |
+
else:
|
| 264 |
extracted_text_for_pdf = text_content
|
| 265 |
+
else:
|
| 266 |
+
error_message = content_type_str if isinstance(content_type_str, str) and content_type_str.startswith("Error:") else f"Tipo de contenido no soportado o vacío para PDF: {content_type_str}"
|
| 267 |
return {'status': 'error', 'message': error_message, 'url': url}
|
| 268 |
|
| 269 |
if not extracted_text_for_pdf.strip():
|
|
|
|
| 274 |
pdf.add_page()
|
| 275 |
pdf.set_auto_page_break(auto=True, margin=15)
|
| 276 |
|
|
|
|
| 277 |
title_font_family = 'Arial'
|
| 278 |
title_font_style = 'B'
|
| 279 |
body_font_family = 'Arial'
|
| 280 |
body_font_style = ''
|
| 281 |
|
| 282 |
+
font_error_occurred = False
|
| 283 |
if self.dejavu_regular_path:
|
| 284 |
+
try:
|
| 285 |
+
pdf.add_font('DejaVu', '', self.dejavu_regular_path, uni=True)
|
| 286 |
+
body_font_family = 'DejaVu'
|
| 287 |
+
title_font_family = 'DejaVu'
|
| 288 |
+
if self.dejavu_bold_path:
|
| 289 |
+
pdf.add_font('DejaVu', 'B', self.dejavu_bold_path, uni=True)
|
| 290 |
+
title_font_style = 'B'
|
| 291 |
+
else:
|
| 292 |
+
title_font_style = '' # Use regular DejaVu if bold not found
|
| 293 |
+
except FPDFException as fe:
|
| 294 |
+
print(f"Error al añadir fuente DejaVu: {fe}. Usando Arial.")
|
| 295 |
+
font_error_occurred = True
|
| 296 |
+
title_font_family, body_font_family = 'Arial', 'Arial'
|
| 297 |
+
title_font_style = 'B' # Arial bold para título
|
|
|
|
|
|
|
|
|
|
|
|
|
| 298 |
|
| 299 |
+
if title_font_family == 'DejaVu' and title_font_style == 'B' and (not self.dejavu_bold_path or font_error_occurred) :
|
| 300 |
+
pdf.set_font('Arial', 'B', 12) # Fallback a Arial Bold si DejaVu Bold no está o falló
|
| 301 |
+
else:
|
| 302 |
+
try:
|
| 303 |
+
pdf.set_font(title_font_family, title_font_style, 12)
|
| 304 |
+
except FPDFException: # Si set_font falla incluso con DejaVu regular (raro si add_font tuvo éxito)
|
| 305 |
+
pdf.set_font('Arial', 'B', 12) # Fallback final a Arial
|
| 306 |
+
|
| 307 |
+
|
| 308 |
clean_url_for_pdf = "".join(c for c in url if c.isprintable() or c in ('\n', '\r', '\t'))
|
| 309 |
try:
|
| 310 |
+
pdf.multi_cell(0, 8, f"Contenido de: {clean_url_for_pdf}")
|
| 311 |
except FPDFException as e_url_font:
|
| 312 |
+
print(f"Advertencia: Error al escribir URL en PDF: {e_url_font}. Usando placeholder.")
|
| 313 |
+
pdf.set_font('Arial', 'B', 12)
|
|
|
|
| 314 |
pdf.multi_cell(0, 8, f"Contenido de URL (ver metadatos)")
|
| 315 |
+
pdf.ln(6)
|
| 316 |
|
| 317 |
+
try:
|
| 318 |
+
pdf.set_font(body_font_family, body_font_style, 11)
|
| 319 |
+
except FPDFException: # Si falla la fuente del cuerpo
|
| 320 |
+
pdf.set_font('Arial', '', 11)
|
| 321 |
|
|
|
|
| 322 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 323 |
clean_text = extracted_text_for_pdf.replace('\u2013', '-').replace('\u2014', '--')
|
| 324 |
clean_text = clean_text.replace('\u2018', "'").replace('\u2019', "'")
|
| 325 |
clean_text = clean_text.replace('\u201c', '"').replace('\u201d', '"')
|
| 326 |
+
clean_text = clean_text.replace('\u2026', '...').replace('\u00A0', ' ')
|
|
|
|
| 327 |
|
|
|
|
|
|
|
| 328 |
printable_text = "".join(c for c in clean_text if c.isprintable() or c in ('\n', '\r', '\t'))
|
| 329 |
|
| 330 |
paragraphs = printable_text.split('\n')
|
| 331 |
for para_idx, para in enumerate(paragraphs):
|
| 332 |
if para.strip():
|
| 333 |
try:
|
| 334 |
+
pdf.multi_cell(0, 7, para)
|
| 335 |
+
pdf.ln(2)
|
| 336 |
except FPDFException as e_font_char:
|
| 337 |
+
problem_chars_hex = [hex(ord(c)) for c in para if not (c.isprintable() or c in ('\n','\r','\t')) and ord(c) > 127]
|
| 338 |
+
print(f"Advertencia: Carácter no soportado en PDF en párrafo {para_idx+1} (fuente: {pdf.font_family}). Problemáticos (hex): {problem_chars_hex}. Párrafo reemplazado.")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 339 |
try:
|
| 340 |
+
current_body_font = pdf.font_family
|
| 341 |
+
current_body_style = pdf.font_style
|
| 342 |
+
pdf.set_font('Arial', '', 11)
|
| 343 |
+
pdf.multi_cell(0, 7, "[Párrafo con caracteres no soportados por la fuente. Contenido original en TXT si se generó.]")
|
| 344 |
pdf.ln(2)
|
| 345 |
+
pdf.set_font(current_body_font, current_body_style, 11)
|
| 346 |
+
except: pass
|
|
|
|
| 347 |
else:
|
| 348 |
+
pdf.ln(5)
|
|
|
|
| 349 |
|
| 350 |
with tempfile.NamedTemporaryFile(delete=False, mode='wb', suffix='.pdf') as tmp_file:
|
| 351 |
pdf_output_bytes = pdf.output(dest='S')
|
| 352 |
tmp_file.write(pdf_output_bytes)
|
| 353 |
filepath = tmp_file.name
|
| 354 |
return {'status': 'success', 'file': filepath, 'url': url}
|
| 355 |
+
except FPDFException as e_fpdf_text:
|
| 356 |
import traceback
|
| 357 |
+
return {'status': 'error', 'message': f"Error FPDF generando PDF de texto: {str(e_fpdf_text)}\n{traceback.format_exc()[:300]}", 'url': url}
|
| 358 |
+
except Exception as e:
|
|
|
|
|
|
|
|
|
|
| 359 |
import traceback
|
| 360 |
+
return {'status': 'error', 'message': f"Error general generando PDF de texto: {str(e)}\n{traceback.format_exc()[:300]}", 'url': url}
|
|
|
|
|
|
|
|
|