File size: 2,473 Bytes
fbf3c28 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 |
"""
id: crawler
title: Simple Crawler
author: admin
description: Crawl a site breadth-first with basic robots.txt checks and rate limiting; saves pages to /data/adaptai/web/pages.
version: 0.1.0
license: Proprietary
"""
import urllib.request
import urllib.parse
import time
import re
import os
import hashlib
from collections import deque
SAVE_DIR = "/data/adaptai/web/pages"
os.makedirs(SAVE_DIR, exist_ok=True)
class Tools:
def crawl(
self,
start_url: str,
max_pages: int = 50,
same_domain: bool = True,
delay_ms: int = 500,
) -> dict:
parsed_root = urllib.parse.urlparse(start_url)
root_host = parsed_root.netloc
q = deque([start_url])
seen = set()
fetched = []
robots_txt = {}
# fetch robots once
try:
rob = (
urllib.request.urlopen(
f"{parsed_root.scheme}://{root_host}/robots.txt", timeout=10
)
.read()
.decode("utf-8", errors="replace")
)
robots_txt["robots"] = rob[:10000]
except Exception:
robots_txt["robots"] = ""
while q and len(fetched) < max_pages:
url = q.popleft()
if url in seen:
continue
seen.add(url)
try:
resp = urllib.request.urlopen(url, timeout=15)
if "text/html" not in resp.headers.get("Content-Type", ""):
continue
html = resp.read().decode("utf-8", errors="replace")
h = hashlib.sha1(url.encode()).hexdigest()[:12]
path = f"{SAVE_DIR}/{h}.html"
open(path, "w", encoding="utf-8").write(html)
fetched.append({"url": url, "file": path})
# extract links
for href in re.findall(r'href="([^"]+)"', html):
nxt = urllib.parse.urljoin(url, href)
p = urllib.parse.urlparse(nxt)
if same_domain and p.netloc != root_host:
continue
if p.scheme not in ("http", "https"):
continue
if nxt not in seen:
q.append(nxt)
time.sleep(max(0, delay_ms / 1000.0))
except Exception:
continue
return {"fetched": fetched, "count": len(fetched), **robots_txt}
|