OpenTransformer commited on
Commit
4188404
·
verified ·
1 Parent(s): ef86d33

Upload scraper.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. scraper.py +281 -0
scraper.py ADDED
@@ -0,0 +1,281 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ OpenTransformers Web Crawler v1.0
4
+ Your data. Your company. No apologies.
5
+
6
+ Usage:
7
+ python scraper.py --seeds seeds.txt --output /workspace/crawl --workers 100 --max-pages 50000
8
+ python scraper.py --seed "https://news.ycombinator.com" --depth 3 --same-domain
9
+ """
10
+
11
+ import asyncio
12
+ import aiohttp
13
+ import argparse
14
+ import hashlib
15
+ import json
16
+ import os
17
+ import re
18
+ import time
19
+ import random
20
+ from dataclasses import dataclass, asdict
21
+ from datetime import datetime
22
+ from pathlib import Path
23
+ from typing import Optional, Set, Dict, List
24
+ from urllib.parse import urljoin, urlparse
25
+ from collections import defaultdict
26
+ import gzip
27
+
28
+
29
+ class BoilerplateRemover:
30
+ """Extract article text, strip nav/footer/ads"""
31
+
32
+ IGNORE_TAGS = {'script', 'style', 'nav', 'header', 'footer', 'aside',
33
+ 'noscript', 'iframe', 'svg', 'form', 'button', 'input',
34
+ 'meta', 'link', 'head'}
35
+ BLOCK_TAGS = {'p', 'div', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
36
+ 'li', 'tr', 'article', 'section', 'blockquote', 'pre', 'code'}
37
+
38
+ @staticmethod
39
+ def extract(html: str) -> str:
40
+ if not html:
41
+ return ""
42
+ for tag in BoilerplateRemover.IGNORE_TAGS:
43
+ html = re.sub(f'<{tag}[^>]*>.*?</{tag}>', ' ', html, flags=re.DOTALL | re.IGNORECASE)
44
+ html = re.sub(r'<!--.*?-->', '', html, flags=re.DOTALL)
45
+ for tag in BoilerplateRemover.BLOCK_TAGS:
46
+ html = re.sub(f'</?{tag}[^>]*>', '\n', html, flags=re.IGNORECASE)
47
+ text = re.sub(r'<[^>]+>', ' ', html)
48
+ text = text.replace('&nbsp;', ' ').replace('&amp;', '&').replace('&lt;', '<')
49
+ text = text.replace('&gt;', '>').replace('&quot;', '"').replace('&#39;', "'")
50
+ text = re.sub(r'[ \t]+', ' ', text)
51
+ text = re.sub(r'\n\s*\n', '\n\n', text)
52
+ text = re.sub(r'\n{3,}', '\n\n', text)
53
+ return text.strip()
54
+
55
+
56
+ def extract_links(html: str, base_url: str) -> Set[str]:
57
+ links = set()
58
+ for match in re.finditer(r'href=["\']([^"\']+)["\']', html, re.I):
59
+ href = match.group(1)
60
+ if href.startswith(('#', 'javascript:', 'mailto:', 'tel:', 'data:')):
61
+ continue
62
+ try:
63
+ absolute = urljoin(base_url, href)
64
+ if absolute.startswith(('http://', 'https://')):
65
+ absolute = absolute.split('#')[0]
66
+ if len(absolute) < 500:
67
+ links.add(absolute)
68
+ except:
69
+ pass
70
+ return links
71
+
72
+
73
+ def get_domain(url: str) -> str:
74
+ try:
75
+ return urlparse(url).netloc.lower()
76
+ except:
77
+ return ""
78
+
79
+
80
+ @dataclass
81
+ class CrawledPage:
82
+ url: str
83
+ domain: str
84
+ timestamp: str
85
+ status: int
86
+ text: str
87
+ text_len: int
88
+ html_len: int
89
+ links: int
90
+ fetch_ms: int
91
+ hash: str
92
+
93
+
94
+ class Stats:
95
+ def __init__(self):
96
+ self.pages = 0
97
+ self.bytes = 0
98
+ self.errors = 0
99
+ self.start = time.time()
100
+ self.by_domain: Dict[str, int] = defaultdict(int)
101
+
102
+ def log(self, page: Optional[CrawledPage] = None, error: bool = False):
103
+ if error:
104
+ self.errors += 1
105
+ elif page:
106
+ self.pages += 1
107
+ self.bytes += page.text_len
108
+ self.by_domain[page.domain] += 1
109
+
110
+ def report(self) -> str:
111
+ elapsed = time.time() - self.start
112
+ rate = self.pages / elapsed if elapsed > 0 else 0
113
+ mb = self.bytes / (1024 * 1024)
114
+ return f"Pages: {self.pages} | {mb:.1f} MB | {rate:.1f} p/s | Err: {self.errors} | Domains: {len(self.by_domain)}"
115
+
116
+
117
+ class Crawler:
118
+ USER_AGENTS = [
119
+ 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 Chrome/120.0.0.0 Safari/537.36',
120
+ 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 Chrome/120.0.0.0 Safari/537.36',
121
+ 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 Chrome/120.0.0.0 Safari/537.36',
122
+ 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:121.0) Gecko/20100101 Firefox/121.0',
123
+ ]
124
+
125
+ def __init__(self, output_dir: str = 'crawl_data', max_workers: int = 100,
126
+ max_pages: int = 50000, max_bytes: int = 10*1024**3,
127
+ timeout: int = 30, same_domain: bool = False,
128
+ min_text_len: int = 200, max_text_len: int = 500000,
129
+ politeness: float = 0.1):
130
+ self.output_dir = Path(output_dir)
131
+ self.output_dir.mkdir(parents=True, exist_ok=True)
132
+ self.max_workers = max_workers
133
+ self.max_pages = max_pages
134
+ self.max_bytes = max_bytes
135
+ self.timeout = timeout
136
+ self.same_domain = same_domain
137
+ self.min_text_len = min_text_len
138
+ self.max_text_len = max_text_len
139
+ self.politeness = politeness
140
+ self.queue: asyncio.Queue = asyncio.Queue()
141
+ self.seen: Set[str] = set()
142
+ self.seen_hashes: Set[str] = set()
143
+ self.domain_last_hit: Dict[str, float] = {}
144
+ self.seed_domains: Set[str] = set()
145
+ self.stats = Stats()
146
+ self.running = True
147
+ self.output_file = None
148
+ self.output_lock = asyncio.Lock()
149
+
150
+ def _should_crawl(self, url: str) -> bool:
151
+ if url in self.seen:
152
+ return False
153
+ skip_ext = {'.jpg', '.jpeg', '.png', '.gif', '.pdf', '.zip', '.mp3', '.mp4',
154
+ '.avi', '.mov', '.exe', '.dmg', '.iso', '.tar', '.gz', '.css', '.js',
155
+ '.woff', '.woff2', '.ttf', '.ico', '.svg', '.webp'}
156
+ lower = url.lower()
157
+ if any(lower.endswith(ext) for ext in skip_ext):
158
+ return False
159
+ if self.same_domain:
160
+ if get_domain(url) not in self.seed_domains:
161
+ return False
162
+ return True
163
+
164
+ async def _fetch(self, session: aiohttp.ClientSession, url: str) -> Optional[CrawledPage]:
165
+ domain = get_domain(url)
166
+ if domain in self.domain_last_hit:
167
+ since = time.time() - self.domain_last_hit[domain]
168
+ if since < self.politeness:
169
+ await asyncio.sleep(self.politeness - since)
170
+ self.domain_last_hit[domain] = time.time()
171
+
172
+ start = time.time()
173
+ try:
174
+ headers = {'User-Agent': random.choice(self.USER_AGENTS)}
175
+ async with session.get(url, headers=headers, timeout=self.timeout,
176
+ ssl=False, allow_redirects=True) as resp:
177
+ ct = resp.headers.get('content-type', '').lower()
178
+ if 'text/html' not in ct and 'text/plain' not in ct:
179
+ return None
180
+ html = await resp.text(errors='ignore')
181
+ fetch_ms = int((time.time() - start) * 1000)
182
+ text = BoilerplateRemover.extract(html)
183
+ if len(text) < self.min_text_len or len(text) > self.max_text_len:
184
+ return None
185
+ content_hash = hashlib.md5(text.encode()).hexdigest()[:16]
186
+ if content_hash in self.seen_hashes:
187
+ return None
188
+ self.seen_hashes.add(content_hash)
189
+ links = extract_links(html, url)
190
+ for link in links:
191
+ if self._should_crawl(link) and self.stats.pages < self.max_pages:
192
+ self.seen.add(link)
193
+ await self.queue.put(link)
194
+ return CrawledPage(url=url, domain=domain, timestamp=datetime.utcnow().isoformat(),
195
+ status=resp.status, text=text, text_len=len(text),
196
+ html_len=len(html), links=len(links), fetch_ms=fetch_ms, hash=content_hash)
197
+ except:
198
+ return None
199
+
200
+ async def _worker(self, session: aiohttp.ClientSession, worker_id: int):
201
+ while self.running:
202
+ try:
203
+ url = await asyncio.wait_for(self.queue.get(), timeout=10.0)
204
+ except asyncio.TimeoutError:
205
+ if self.queue.empty():
206
+ break
207
+ continue
208
+ if self.stats.pages >= self.max_pages or self.stats.bytes >= self.max_bytes:
209
+ self.running = False
210
+ break
211
+ page = await self._fetch(session, url)
212
+ if page:
213
+ self.stats.log(page)
214
+ async with self.output_lock:
215
+ self.output_file.write(json.dumps(asdict(page)) + '\n')
216
+ if self.stats.pages % 100 == 0:
217
+ print(f"[{datetime.now().strftime('%H:%M:%S')}] {self.stats.report()}")
218
+ else:
219
+ self.stats.log(error=True)
220
+ self.queue.task_done()
221
+
222
+ async def crawl(self, seeds: List[str]):
223
+ print(f"OpenTransformers Crawler starting")
224
+ print(f"Seeds: {len(seeds)} | Workers: {self.max_workers} | Target: {self.max_pages} pages / {self.max_bytes/1024**3:.1f} GB")
225
+ print("-" * 60)
226
+ for seed in seeds:
227
+ self.seed_domains.add(get_domain(seed))
228
+ self.seen.add(seed)
229
+ await self.queue.put(seed)
230
+ timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
231
+ output_path = self.output_dir / f'crawl_{timestamp}.jsonl'
232
+ self.output_file = open(output_path, 'w', buffering=1)
233
+ connector = aiohttp.TCPConnector(limit=self.max_workers, ssl=False)
234
+ async with aiohttp.ClientSession(connector=connector) as session:
235
+ workers = [asyncio.create_task(self._worker(session, i)) for i in range(self.max_workers)]
236
+ await asyncio.gather(*workers)
237
+ self.output_file.close()
238
+ print("-" * 60)
239
+ print(f"DONE! {self.stats.report()}")
240
+ print(f"Output: {output_path} ({output_path.stat().st_size / 1024**2:.1f} MB)")
241
+ print("Compressing...")
242
+ with open(output_path, 'rb') as f_in:
243
+ with gzip.open(str(output_path) + '.gz', 'wb') as f_out:
244
+ f_out.writelines(f_in)
245
+ gz_size = Path(str(output_path) + '.gz').stat().st_size
246
+ print(f"Compressed: {gz_size / 1024**2:.1f} MB")
247
+
248
+
249
+ def main():
250
+ p = argparse.ArgumentParser(description='OpenTransformers Web Crawler')
251
+ p.add_argument('--seeds', type=str, help='File with seed URLs')
252
+ p.add_argument('--seed', type=str, action='append', help='Single seed URL')
253
+ p.add_argument('--output', type=str, default='crawl_data')
254
+ p.add_argument('--workers', type=int, default=100)
255
+ p.add_argument('--max-pages', type=int, default=50000)
256
+ p.add_argument('--max-gb', type=float, default=10.0)
257
+ p.add_argument('--timeout', type=int, default=30)
258
+ p.add_argument('--same-domain', action='store_true')
259
+ p.add_argument('--min-text', type=int, default=200)
260
+ p.add_argument('--politeness', type=float, default=0.1)
261
+ args = p.parse_args()
262
+
263
+ seeds = []
264
+ if args.seeds:
265
+ with open(args.seeds) as f:
266
+ seeds.extend(line.strip() for line in f if line.strip())
267
+ if args.seed:
268
+ seeds.extend(args.seed)
269
+ if not seeds:
270
+ print("ERROR: Provide --seeds file or --seed URLs")
271
+ return
272
+
273
+ crawler = Crawler(output_dir=args.output, max_workers=args.workers,
274
+ max_pages=args.max_pages, max_bytes=int(args.max_gb * 1024**3),
275
+ timeout=args.timeout, same_domain=args.same_domain,
276
+ min_text_len=args.min_text, politeness=args.politeness)
277
+ asyncio.run(crawler.crawl(seeds))
278
+
279
+
280
+ if __name__ == '__main__':
281
+ main()