# app.py
"""
Coppermine Originalbild-Downloader – Gradio Edition
Sicherheitsforschung / Bug-Bounty Tool
Nur für legale Tests mit Erlaubnis des Betreibers nutzen!
"""
import os
import threading
import time
import queue
import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin
from pathlib import Path
import gradio as gr
# ────────────────────────────────────────────────
# Globale Konfiguration – anpassbar
# ────────────────────────────────────────────────
SESSION = requests.Session()
SESSION.headers.update({
"User-Agent": (
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
"AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/128.0.0.0 Safari/537.36 "
"Coppermine-Research-Downloader/1.1 "
"(Security-Research; responsible-disclosure)"
),
"Accept": "image/avif,image/webp,image/apng,image/svg+xml,image/*,*/*;q=0.8",
"Accept-Language": "de-DE,de;q=0.9,en-US;q=0.8,en;q=0.7",
"Referer": "https://www.google.com/"
})
REQUEST_TIMEOUT = 12
DOWNLOAD_DELAY = 0.40 # Anti-Rate-Limit / Anti-DoS
MAX_THREADS_DEFAULT = 4
MAX_PAGES_TO_SCAN = 400 # Schutz vor Endlosschleifen / DoS
# ────────────────────────────────────────────────
# Hilfsfunktionen
# ────────────────────────────────────────────────
def correct_image_url(url: str) -> str:
"""Entfernt gängige Coppermine-Thumbnails-Prefixe"""
path = Path(url.split('?')[0].split('#')[0])
filename = path.name
prefixes = ["thumb_", "normal_", "medium_", "small_", "preview_", "mini_"]
for prefix in prefixes:
if filename.startswith(prefix):
original_name = filename[len(prefix):]
return str(path.with_name(original_name))
# Kein Prefix → vermutlich schon Original
return str(path)
def is_likely_image_url(url: str) -> bool:
exts = (".jpg", ".jpeg", ".png", ".gif", ".webp", ".bmp")
return any(url.lower().endswith(ext) for ext in exts)
def download_image(img_url: str, folder: str, progress_queue: queue.Queue) -> bool:
if not is_likely_image_url(img_url):
return False
filename = img_url.split("/")[-1].split("?")[0].split("#")[0]
if not filename:
return False
filepath = os.path.join(folder, filename)
if os.path.exists(filepath):
progress_queue.put(("skip", f"bereits vorhanden → {filename}"))
return False
try:
# HEAD-Request zuerst → spart Bandbreite bei großen Dateien
head = SESSION.head(img_url, timeout=6, allow_redirects=True)
if head.status_code != 200:
return False
ct = head.headers.get("Content-Type", "").lower()
if "image" not in ct and "octet-stream" not in ct:
return False
size = int(head.headers.get("Content-Length", 0))
if size < 20_000: # < \~20 KB → meist Thumbnail oder Fehler
return False
# Jetzt erst richtiger Download
r = SESSION.get(img_url, timeout=REQUEST_TIMEOUT, stream=True)
r.raise_for_status()
with open(filepath, "wb") as f:
for chunk in r.iter_content(chunk_size=8192):
if chunk:
f.write(chunk)
progress_queue.put(("success", f"✓ {filename} ({size//1024:,} KB)"))
return True
except Exception as e:
progress_queue.put(("error", f"× {filename} → {str(e)}"))
return False
def scrape_album_page(page_url: str) -> list[str]:
"""Extrahiert Bild-URLs von einer Album-Seite (?page=...)"""
try:
r = SESSION.get(page_url, timeout=REQUEST_TIMEOUT)
r.raise_for_status()
soup = BeautifulSoup(r.text, "html.parser")
candidates = set()
# 1.
– meist Thumbnails
for img in soup.find_all("img"):
src = img.get("src") or img.get("data-src") or img.get("data-lazy-src")
if src and is_likely_image_url(src):
full = urljoin(page_url, src)
candidates.add(correct_image_url(full))
# 2. die direkt auf Bilder zeigen
for a in soup.find_all("a", href=True):
href = a["href"]
if is_likely_image_url(href):
full = urljoin(page_url, href)
candidates.add(full)
# 3. data-fancybox / lightbox-Attribute (häufig bei neueren Themes)
for elem in soup.find_all(attrs={"data-fancybox": True, "href": True}):
href = elem.get("href")
if href and is_likely_image_url(href):
candidates.add(urljoin(page_url, href))
return list(candidates)
except Exception as e:
print(f"Scrape-Fehler {page_url}: {e}")
return []
def worker(album_url: str, folder: str, stop_event: threading.Event,
progress_queue: queue.Queue, thread_id: int):
page = 1
count = 0
while not stop_event.is_set() and page <= MAX_PAGES_TO_SCAN:
page_url = f"{album_url.rstrip('/')}?page={page}"
progress_queue.put(("info", f"Thread {thread_id} → Seite {page}"))
image_urls = scrape_album_page(page_url)
if not image_urls:
progress_queue.put(("info", f"Thread {thread_id} → Ende erreicht (Seite {page})"))
break
for url in image_urls:
if stop_event.is_set():
break
if download_image(url, folder, progress_queue):
count += 1
time.sleep(DOWNLOAD_DELAY)
page += 1
progress_queue.put(("done", f"Thread {thread_id} beendet – {count} Bilder"))
def start_download(album_url: str, folder: str, threads: int):
if not album_url.strip():
yield "❌ Album-URL fehlt", "", 0, gr.update(interactive=False)
return
folder = folder.strip() or "downloads_coppermine"
Path(folder).mkdir(parents=True, exist_ok=True)
stop_event = threading.Event()
progress_queue = queue.Queue()
def progress_loop():
total = 0
lines = []
while True:
try:
typ, msg = progress_queue.get(timeout=1.2)
if typ == "success":
total += 1
if typ in ("info", "success", "skip", "error", "warn", "done"):
lines.append(msg)
lines = lines[-30:]
status = f"**Download läuft** – {total} Bilder"
if typ == "done" and total > 0:
status = f"**Fertig** – {total} Bilder heruntergeladen"
yield (
status,
"\n".join(lines),
total,
gr.update(value="Stoppen", interactive=not stop_event.is_set())
)
if typ == "done" and all(not t.is_alive() for t in threads_list):
break
except queue.Empty:
if all(not t.is_alive() for t in threads_list):
break
final_log = "\n".join(lines) + "\n\n→ Download abgeschlossen oder gestoppt."
yield "Download beendet", final_log, total, gr.update(value="Start", interactive=True)
# Threads starten
global threads_list
threads_list = []
for i in range(1, max(1, threads) + 1):
t = threading.Thread(
target=worker,
args=(album_url, folder, stop_event, progress_queue, i),
daemon=True
)
threads_list.append(t)
t.start()
yield from progress_loop()
def stop_download():
if 'stop_event' in globals():
stop_event.set()
return "Stop-Signal gesendet … Threads werden beendet", gr.update(value="Stop gesendet", interactive=False)
return "Kein Download läuft", gr.update()
# ────────────────────────────────────────────────
# Gradio Interface
# ────────────────────────────────────────────────
css = """
.gradio-container { max-width: 960px; margin: auto; font-family: system-ui, sans-serif; }
.logbox { font-family: 'Consolas', 'Courier New', monospace !important;
background: #0d1117; color: #c9d1d9; padding: 14px;
border-radius: 8px; white-space: pre-wrap; overflow-y: auto;
max-height: 380px; line-height: 1.45; }
.status { font-weight: 600; }
"""
with gr.Blocks(css=css, title="Coppermine Original Downloader – Research Edition") as demo:
gr.Markdown("""
# Coppermine Originalbild-Downloader
**Sicherheitsforschung / Bug-Bounty Tool** – 2025/2026 Edition
Nur mit ausdrücklicher Erlaubnis des Website-Betreibers nutzen!
""")
with gr.Row():
url_input = gr.Textbox(
label="Album Basis-URL",
placeholder="https://example.com/gallery/index.php?album=123",
value="https://example.com/index.php?album=1",
scale=5
)
folder_input = gr.Textbox(
label="Zielordner",
value="coppermine_originals",
scale=3
)
threads_slider = gr.Slider(
1, 12, value=MAX_THREADS_DEFAULT, step=1,
label="Anzahl paralleler Threads (Vorsicht vor Rate-Limits / IP-Bans)"
)
status_md = gr.Markdown("**Bereit …**", elem_classes=["status"])
log_box = gr.Textbox(label="Live-Log", lines=14, max_lines=40, interactive=False, elem_classes=["logbox"])
count_num = gr.Number(label="Heruntergeladene Bilder", value=0, interactive=False)
with gr.Row():
start_btn = gr.Button("Download starten", variant="primary")
stop_btn = gr.Button("Download stoppen", variant="stop", interactive=False)
gr.Markdown("""
**Wichtige Hinweise**
• **Rechtlich**: Massen-Downloads können gegen AGB / Strafgesetze verstoßen
• **Technisch**: Viele Coppermine-Instanzen haben schwachen Schutz → IDOR, Directory Listing, offene Alben häufig
• **Bug Bounty**: Finde Schwachstellen? → Responsible Disclosure!
• **Tipp**: Teste zuerst mit HEAD-Requests & niedriger Thread-Anzahl
""")
# ─── Events ─────────────────────────────────────────────
start_btn.click(
start_download,
inputs=[url_input, folder_input, threads_slider],
outputs=[status_md, log_box, count_num, stop_btn]
)
stop_btn.click(
stop_download,
outputs=[status_md, stop_btn]
)
if __name__ == "__main__":
demo.queue(max_size=8).launch(
server_name="0.0.0.0",
server_port=7860,
share=False,
debug=False
)