File size: 11,358 Bytes
4188404 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 |
#!/usr/bin/env python3
"""
OpenTransformers Web Crawler v1.0
Your data. Your company. No apologies.
Usage:
python scraper.py --seeds seeds.txt --output /workspace/crawl --workers 100 --max-pages 50000
python scraper.py --seed "https://news.ycombinator.com" --depth 3 --same-domain
"""
import asyncio
import aiohttp
import argparse
import hashlib
import json
import os
import re
import time
import random
from dataclasses import dataclass, asdict
from datetime import datetime
from pathlib import Path
from typing import Optional, Set, Dict, List
from urllib.parse import urljoin, urlparse
from collections import defaultdict
import gzip
class BoilerplateRemover:
"""Extract article text, strip nav/footer/ads"""
IGNORE_TAGS = {'script', 'style', 'nav', 'header', 'footer', 'aside',
'noscript', 'iframe', 'svg', 'form', 'button', 'input',
'meta', 'link', 'head'}
BLOCK_TAGS = {'p', 'div', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
'li', 'tr', 'article', 'section', 'blockquote', 'pre', 'code'}
@staticmethod
def extract(html: str) -> str:
if not html:
return ""
for tag in BoilerplateRemover.IGNORE_TAGS:
html = re.sub(f'<{tag}[^>]*>.*?</{tag}>', ' ', html, flags=re.DOTALL | re.IGNORECASE)
html = re.sub(r'<!--.*?-->', '', html, flags=re.DOTALL)
for tag in BoilerplateRemover.BLOCK_TAGS:
html = re.sub(f'</?{tag}[^>]*>', '\n', html, flags=re.IGNORECASE)
text = re.sub(r'<[^>]+>', ' ', html)
text = text.replace(' ', ' ').replace('&', '&').replace('<', '<')
text = text.replace('>', '>').replace('"', '"').replace(''', "'")
text = re.sub(r'[ \t]+', ' ', text)
text = re.sub(r'\n\s*\n', '\n\n', text)
text = re.sub(r'\n{3,}', '\n\n', text)
return text.strip()
def extract_links(html: str, base_url: str) -> Set[str]:
links = set()
for match in re.finditer(r'href=["\']([^"\']+)["\']', html, re.I):
href = match.group(1)
if href.startswith(('#', 'javascript:', 'mailto:', 'tel:', 'data:')):
continue
try:
absolute = urljoin(base_url, href)
if absolute.startswith(('http://', 'https://')):
absolute = absolute.split('#')[0]
if len(absolute) < 500:
links.add(absolute)
except:
pass
return links
def get_domain(url: str) -> str:
try:
return urlparse(url).netloc.lower()
except:
return ""
@dataclass
class CrawledPage:
url: str
domain: str
timestamp: str
status: int
text: str
text_len: int
html_len: int
links: int
fetch_ms: int
hash: str
class Stats:
def __init__(self):
self.pages = 0
self.bytes = 0
self.errors = 0
self.start = time.time()
self.by_domain: Dict[str, int] = defaultdict(int)
def log(self, page: Optional[CrawledPage] = None, error: bool = False):
if error:
self.errors += 1
elif page:
self.pages += 1
self.bytes += page.text_len
self.by_domain[page.domain] += 1
def report(self) -> str:
elapsed = time.time() - self.start
rate = self.pages / elapsed if elapsed > 0 else 0
mb = self.bytes / (1024 * 1024)
return f"Pages: {self.pages} | {mb:.1f} MB | {rate:.1f} p/s | Err: {self.errors} | Domains: {len(self.by_domain)}"
class Crawler:
USER_AGENTS = [
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 Chrome/120.0.0.0 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 Chrome/120.0.0.0 Safari/537.36',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 Chrome/120.0.0.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:121.0) Gecko/20100101 Firefox/121.0',
]
def __init__(self, output_dir: str = 'crawl_data', max_workers: int = 100,
max_pages: int = 50000, max_bytes: int = 10*1024**3,
timeout: int = 30, same_domain: bool = False,
min_text_len: int = 200, max_text_len: int = 500000,
politeness: float = 0.1):
self.output_dir = Path(output_dir)
self.output_dir.mkdir(parents=True, exist_ok=True)
self.max_workers = max_workers
self.max_pages = max_pages
self.max_bytes = max_bytes
self.timeout = timeout
self.same_domain = same_domain
self.min_text_len = min_text_len
self.max_text_len = max_text_len
self.politeness = politeness
self.queue: asyncio.Queue = asyncio.Queue()
self.seen: Set[str] = set()
self.seen_hashes: Set[str] = set()
self.domain_last_hit: Dict[str, float] = {}
self.seed_domains: Set[str] = set()
self.stats = Stats()
self.running = True
self.output_file = None
self.output_lock = asyncio.Lock()
def _should_crawl(self, url: str) -> bool:
if url in self.seen:
return False
skip_ext = {'.jpg', '.jpeg', '.png', '.gif', '.pdf', '.zip', '.mp3', '.mp4',
'.avi', '.mov', '.exe', '.dmg', '.iso', '.tar', '.gz', '.css', '.js',
'.woff', '.woff2', '.ttf', '.ico', '.svg', '.webp'}
lower = url.lower()
if any(lower.endswith(ext) for ext in skip_ext):
return False
if self.same_domain:
if get_domain(url) not in self.seed_domains:
return False
return True
async def _fetch(self, session: aiohttp.ClientSession, url: str) -> Optional[CrawledPage]:
domain = get_domain(url)
if domain in self.domain_last_hit:
since = time.time() - self.domain_last_hit[domain]
if since < self.politeness:
await asyncio.sleep(self.politeness - since)
self.domain_last_hit[domain] = time.time()
start = time.time()
try:
headers = {'User-Agent': random.choice(self.USER_AGENTS)}
async with session.get(url, headers=headers, timeout=self.timeout,
ssl=False, allow_redirects=True) as resp:
ct = resp.headers.get('content-type', '').lower()
if 'text/html' not in ct and 'text/plain' not in ct:
return None
html = await resp.text(errors='ignore')
fetch_ms = int((time.time() - start) * 1000)
text = BoilerplateRemover.extract(html)
if len(text) < self.min_text_len or len(text) > self.max_text_len:
return None
content_hash = hashlib.md5(text.encode()).hexdigest()[:16]
if content_hash in self.seen_hashes:
return None
self.seen_hashes.add(content_hash)
links = extract_links(html, url)
for link in links:
if self._should_crawl(link) and self.stats.pages < self.max_pages:
self.seen.add(link)
await self.queue.put(link)
return CrawledPage(url=url, domain=domain, timestamp=datetime.utcnow().isoformat(),
status=resp.status, text=text, text_len=len(text),
html_len=len(html), links=len(links), fetch_ms=fetch_ms, hash=content_hash)
except:
return None
async def _worker(self, session: aiohttp.ClientSession, worker_id: int):
while self.running:
try:
url = await asyncio.wait_for(self.queue.get(), timeout=10.0)
except asyncio.TimeoutError:
if self.queue.empty():
break
continue
if self.stats.pages >= self.max_pages or self.stats.bytes >= self.max_bytes:
self.running = False
break
page = await self._fetch(session, url)
if page:
self.stats.log(page)
async with self.output_lock:
self.output_file.write(json.dumps(asdict(page)) + '\n')
if self.stats.pages % 100 == 0:
print(f"[{datetime.now().strftime('%H:%M:%S')}] {self.stats.report()}")
else:
self.stats.log(error=True)
self.queue.task_done()
async def crawl(self, seeds: List[str]):
print(f"OpenTransformers Crawler starting")
print(f"Seeds: {len(seeds)} | Workers: {self.max_workers} | Target: {self.max_pages} pages / {self.max_bytes/1024**3:.1f} GB")
print("-" * 60)
for seed in seeds:
self.seed_domains.add(get_domain(seed))
self.seen.add(seed)
await self.queue.put(seed)
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
output_path = self.output_dir / f'crawl_{timestamp}.jsonl'
self.output_file = open(output_path, 'w', buffering=1)
connector = aiohttp.TCPConnector(limit=self.max_workers, ssl=False)
async with aiohttp.ClientSession(connector=connector) as session:
workers = [asyncio.create_task(self._worker(session, i)) for i in range(self.max_workers)]
await asyncio.gather(*workers)
self.output_file.close()
print("-" * 60)
print(f"DONE! {self.stats.report()}")
print(f"Output: {output_path} ({output_path.stat().st_size / 1024**2:.1f} MB)")
print("Compressing...")
with open(output_path, 'rb') as f_in:
with gzip.open(str(output_path) + '.gz', 'wb') as f_out:
f_out.writelines(f_in)
gz_size = Path(str(output_path) + '.gz').stat().st_size
print(f"Compressed: {gz_size / 1024**2:.1f} MB")
def main():
p = argparse.ArgumentParser(description='OpenTransformers Web Crawler')
p.add_argument('--seeds', type=str, help='File with seed URLs')
p.add_argument('--seed', type=str, action='append', help='Single seed URL')
p.add_argument('--output', type=str, default='crawl_data')
p.add_argument('--workers', type=int, default=100)
p.add_argument('--max-pages', type=int, default=50000)
p.add_argument('--max-gb', type=float, default=10.0)
p.add_argument('--timeout', type=int, default=30)
p.add_argument('--same-domain', action='store_true')
p.add_argument('--min-text', type=int, default=200)
p.add_argument('--politeness', type=float, default=0.1)
args = p.parse_args()
seeds = []
if args.seeds:
with open(args.seeds) as f:
seeds.extend(line.strip() for line in f if line.strip())
if args.seed:
seeds.extend(args.seed)
if not seeds:
print("ERROR: Provide --seeds file or --seed URLs")
return
crawler = Crawler(output_dir=args.output, max_workers=args.workers,
max_pages=args.max_pages, max_bytes=int(args.max_gb * 1024**3),
timeout=args.timeout, same_domain=args.same_domain,
min_text_len=args.min_text, politeness=args.politeness)
asyncio.run(crawler.crawl(seeds))
if __name__ == '__main__':
main()
|