ADAPT-Chase's picture
Add files using upload-large-folder tool
fbf3c28 verified
"""
id: crawler
title: Simple Crawler
author: admin
description: Crawl a site breadth-first with basic robots.txt checks and rate limiting; saves pages to /data/adaptai/web/pages.
version: 0.1.0
license: Proprietary
"""
import urllib.request
import urllib.parse
import time
import re
import os
import hashlib
from collections import deque
SAVE_DIR = "/data/adaptai/web/pages"
os.makedirs(SAVE_DIR, exist_ok=True)
class Tools:
def crawl(
self,
start_url: str,
max_pages: int = 50,
same_domain: bool = True,
delay_ms: int = 500,
) -> dict:
parsed_root = urllib.parse.urlparse(start_url)
root_host = parsed_root.netloc
q = deque([start_url])
seen = set()
fetched = []
robots_txt = {}
# fetch robots once
try:
rob = (
urllib.request.urlopen(
f"{parsed_root.scheme}://{root_host}/robots.txt", timeout=10
)
.read()
.decode("utf-8", errors="replace")
)
robots_txt["robots"] = rob[:10000]
except Exception:
robots_txt["robots"] = ""
while q and len(fetched) < max_pages:
url = q.popleft()
if url in seen:
continue
seen.add(url)
try:
resp = urllib.request.urlopen(url, timeout=15)
if "text/html" not in resp.headers.get("Content-Type", ""):
continue
html = resp.read().decode("utf-8", errors="replace")
h = hashlib.sha1(url.encode()).hexdigest()[:12]
path = f"{SAVE_DIR}/{h}.html"
open(path, "w", encoding="utf-8").write(html)
fetched.append({"url": url, "file": path})
# extract links
for href in re.findall(r'href="([^"]+)"', html):
nxt = urllib.parse.urljoin(url, href)
p = urllib.parse.urlparse(nxt)
if same_domain and p.netloc != root_host:
continue
if p.scheme not in ("http", "https"):
continue
if nxt not in seen:
q.append(nxt)
time.sleep(max(0, delay_ms / 1000.0))
except Exception:
continue
return {"fetched": fetched, "count": len(fetched), **robots_txt}