import gradio as gr from datetime import datetime, timedelta import requests import json import re # Define los headers para simular una petición desde Firefox headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:71.0) Gecko/20100101 Firefox/71.0' } def get_google_cache_date(url): cache_url = f"https://webcache.googleusercontent.com/search?q=cache:{url}" try: resp = requests.get(cache_url, headers=headers) if resp.status_code == 200: getcache = re.search("[a-zA-z]{3}\s[0-9]{1,2},\s[0-9]{4}", resp.text) if getcache: g_cache = getcache.group(0) cache_date = datetime.strptime(g_cache, '%b %d, %Y') today = datetime.now() days_ago = (today - cache_date).days # Manejo del singular y plural para "día/días" day_word = "día" if days_ago == 1 else "días" # Lógica para determinar el emoji if days_ago <= 3: emoji = "😎" elif days_ago <= 30: emoji = "🙂" elif days_ago <= 90: emoji = "😐" else: emoji = "😭" formatted_date = cache_date.strftime('%d/%m/%Y') return f"

👁 Cacheada por Google hace {days_ago} {day_word} ({formatted_date}) {emoji}

" else: return "Url no cacheada por Google ⛔" else: return "Error al acceder a la caché de Google ⛔" except Exception as e: return str(e) def wayback(website): if not website: return '

😭 Error: introduce una url correcta

' google_cache_info = get_google_cache_date(website) # Intenta primero con la Wayback CDX Server API end_date = datetime.now() start_date = end_date - timedelta(days=365) datefrom = start_date.strftime('%Y%m%d') dateto = end_date.strftime('%Y%m%d') cdx_api_url = f"http://web.archive.org/cdx/search/cdx?url={website}&output=json&from={datefrom}&to={dateto}&limit=3000" try: response = requests.get(cdx_api_url, headers=headers) if response.status_code == 200: content = json.loads(response.text) if len(content) > 1: # Ordenar los resultados por fecha de manera descendente sorted_content = sorted(content[1:], key=lambda x: x[1], reverse=True) # Crear la tabla HTML results = [f"
{google_cache_info}
"] for row in sorted_content: date, page, status = [row[i] for i in [1, 2, 4]] formatted_date = datetime.strptime(date, '%Y%m%d%H%M%S').strftime('%d/%m/%Y') formatted_wayback_url = f"https://web.archive.org/web/{date}/{page}" results.append(f"") results.append("
FechaURL
{formatted_date}{formatted_wayback_url}
") return "".join(results) except Exception as e: pass # Falla silenciosa, intentar con el siguiente método # Si falla, intentar con la Wayback Availability JSON API availability_api_url = f"http://archive.org/wayback/available?url={website}" try: response = requests.get(availability_api_url, headers=headers) if response.status_code == 200: data = json.loads(response.text) if data["archived_snapshots"]: closest_snapshot = data["archived_snapshots"]["closest"] if closest_snapshot and closest_snapshot["available"]: snapshot_url = closest_snapshot["url"] timestamp = closest_snapshot["timestamp"] formatted_date = datetime.strptime(timestamp, '%Y%m%d%H%M%S').strftime('%d/%m/%Y') return f"
{google_cache_info}
FechaURL
{formatted_date}{snapshot_url}
" except Exception as e: return f"
{google_cache_info}

😭 Error: {e}

" return f"
{google_cache_info}

😭 Error: No se encontraron datos archivados para esta URL.

" def archive_now(website): if not website: return "
😭 Error: Por favor, introduce una URL válida.
" archive_url = f"https://web.archive.org/save/{website}" try: response = requests.get(archive_url, headers=headers, timeout=60) # Establece un tiempo límite de 60 segundos if response.status_code == 200: return f"
👌 URL archivada con éxito.
" else: # Si la respuesta no es exitosa, busca la última instantánea return check_last_snapshot(website) except requests.exceptions.Timeout: # Si se supera el tiempo de espera, busca la última instantánea return check_last_snapshot(website) except Exception as e: return f"
Error al archivar la URL: {e}
" def check_last_snapshot(website): availability_api_url = f"http://archive.org/wayback/available?url={website}" try: response = requests.get(availability_api_url, headers=headers) if response.status_code == 200: data = json.loads(response.text) if data["archived_snapshots"]: closest_snapshot = data["archived_snapshots"]["closest"] if closest_snapshot and closest_snapshot["available"]: snapshot_url = closest_snapshot["url"] timestamp = closest_snapshot["timestamp"] formatted_date = datetime.strptime(timestamp, '%Y%m%d%H%M%S').strftime('%d/%m/%Y') return f"
Última instantánea disponible: {formatted_date}
" except Exception as e: return f"
Error al buscar la última instantánea: {e}
" return "
😭 No se encontraron datos archivados para esta URL.
" # Crear la interfaz de Gradio para la función wayback wayback_interface = gr.Interface( fn=wayback, inputs="text", outputs="html", title="

Wayback Machine

", description="

Busca instantáneas de una página web en Wayback Machine y guarda la página actual simlemente introduciendo la url.

", article="

Desarrollada por © Artxe Web

" ) # Crear la interfaz de Gradio para la función archive_now archive_interface = gr.Interface( fn=archive_now, inputs="text", outputs="html", title="

Guardar en Wayback Machine

", description="

Guarda la página web actual en Wayback Machine.

", article="

Desarrollada por © Artxe Web

" ) # Combinar ambas interfaces en una iface = gr.TabbedInterface([wayback_interface, archive_interface], ["Buscar Instantáneas", "Archivar URL"], title="

Wayback Fast

") # Lanzar la aplicación iface.launch()