| """ | |
| id: pdf | |
| title: PDF Finder & Downloader | |
| author: admin | |
| description: Find PDF links on a page and download to /data/adaptai/web/pdf. | |
| version: 0.1.0 | |
| license: Proprietary | |
| """ | |
| import re | |
| import os | |
| import urllib.request | |
| import urllib.parse | |
| import hashlib | |
| PDF_DIR = "/data/adaptai/web/pdf" | |
| os.makedirs(PDF_DIR, exist_ok=True) | |
| class Tools: | |
| def find_and_download(self, url: str, max_pdfs: int = 5) -> dict: | |
| html = ( | |
| urllib.request.urlopen(url, timeout=20) | |
| .read() | |
| .decode("utf-8", errors="replace") | |
| ) | |
| base = url | |
| links = [] | |
| for href in re.findall(r'href="([^"]+\.pdf)"', html, flags=re.I): | |
| full = urllib.parse.urljoin(base, href) | |
| links.append(full) | |
| if len(links) >= max_pdfs: | |
| break | |
| saved = [] | |
| for link in links: | |
| try: | |
| data = urllib.request.urlopen(link, timeout=30).read() | |
| h = hashlib.sha1(link.encode()).hexdigest()[:12] | |
| path = f"{PDF_DIR}/{h}.pdf" | |
| open(path, "wb").write(data) | |
| saved.append({"url": link, "file": path}) | |
| except Exception: | |
| continue | |
| return {"found": len(links), "saved": saved} | |