riazmo commited on
Commit
f1ff736
·
verified ·
1 Parent(s): 48de618

Delete agents/crawler.py

Browse files
Files changed (1) hide show
  1. agents/crawler.py +0 -350
agents/crawler.py DELETED
@@ -1,350 +0,0 @@
1
- """
2
- Agent 1: Website Crawler
3
- Design System Extractor v2
4
-
5
- Persona: Meticulous Design Archaeologist
6
-
7
- Responsibilities:
8
- - Auto-discover pages from base URL
9
- - Classify page types (homepage, listing, detail, etc.)
10
- - Prepare page list for user confirmation
11
- """
12
-
13
- import asyncio
14
- import re
15
- from urllib.parse import urljoin, urlparse
16
- from typing import Optional, Callable
17
- from datetime import datetime
18
-
19
- from playwright.async_api import async_playwright, Browser, Page, BrowserContext
20
-
21
- from core.token_schema import DiscoveredPage, PageType, Viewport
22
- from config.settings import get_settings
23
-
24
-
25
- class PageDiscoverer:
26
- """
27
- Discovers pages from a website for design system extraction.
28
-
29
- This is the first part of Agent 1's job — finding pages before
30
- the human confirms which ones to crawl.
31
- """
32
-
33
- def __init__(self):
34
- self.settings = get_settings()
35
- self.browser: Optional[Browser] = None
36
- self.context: Optional[BrowserContext] = None
37
- self.visited_urls: set[str] = set()
38
- self.discovered_pages: list[DiscoveredPage] = []
39
-
40
- async def __aenter__(self):
41
- """Async context manager entry."""
42
- await self._init_browser()
43
- return self
44
-
45
- async def __aexit__(self, exc_type, exc_val, exc_tb):
46
- """Async context manager exit."""
47
- await self._close_browser()
48
-
49
- async def _init_browser(self):
50
- """Initialize Playwright browser."""
51
- playwright = await async_playwright().start()
52
- self.browser = await playwright.chromium.launch(
53
- headless=self.settings.browser.headless
54
- )
55
- self.context = await self.browser.new_context(
56
- viewport={
57
- "width": self.settings.viewport.desktop_width,
58
- "height": self.settings.viewport.desktop_height,
59
- },
60
- user_agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36"
61
- )
62
-
63
- async def _close_browser(self):
64
- """Close browser and cleanup."""
65
- if self.context:
66
- await self.context.close()
67
- if self.browser:
68
- await self.browser.close()
69
-
70
- def _normalize_url(self, url: str, base_url: str) -> Optional[str]:
71
- """Normalize and validate URL."""
72
- # Handle relative URLs
73
- if not url.startswith(('http://', 'https://')):
74
- url = urljoin(base_url, url)
75
-
76
- parsed = urlparse(url)
77
- base_parsed = urlparse(base_url)
78
-
79
- # Only allow same domain
80
- if parsed.netloc != base_parsed.netloc:
81
- return None
82
-
83
- # Remove fragments and normalize
84
- normalized = f"{parsed.scheme}://{parsed.netloc}{parsed.path}"
85
-
86
- # Remove trailing slash for consistency
87
- if normalized.endswith('/') and len(normalized) > len(f"{parsed.scheme}://{parsed.netloc}/"):
88
- normalized = normalized.rstrip('/')
89
-
90
- return normalized
91
-
92
- def _classify_page_type(self, url: str, title: str = "") -> PageType:
93
- """
94
- Classify page type based on URL patterns and title.
95
-
96
- This is a heuristic — not perfect, but good enough for discovery.
97
- """
98
- url_lower = url.lower()
99
- title_lower = title.lower() if title else ""
100
-
101
- # Check URL patterns
102
- patterns = {
103
- PageType.HOMEPAGE: [r'/$', r'/home$', r'/index'],
104
- PageType.LISTING: [r'/products', r'/catalog', r'/list', r'/category', r'/collection', r'/search'],
105
- PageType.DETAIL: [r'/product/', r'/item/', r'/detail/', r'/p/', r'/[a-z-]+/\d+'],
106
- PageType.FORM: [r'/contact', r'/form', r'/apply', r'/submit', r'/register'],
107
- PageType.AUTH: [r'/login', r'/signin', r'/signup', r'/auth', r'/account'],
108
- PageType.CHECKOUT: [r'/cart', r'/checkout', r'/basket', r'/payment'],
109
- PageType.MARKETING: [r'/landing', r'/promo', r'/campaign', r'/offer'],
110
- PageType.ABOUT: [r'/about', r'/team', r'/company', r'/story'],
111
- PageType.CONTACT: [r'/contact', r'/support', r'/help'],
112
- }
113
-
114
- for page_type, url_patterns in patterns.items():
115
- for pattern in url_patterns:
116
- if re.search(pattern, url_lower):
117
- return page_type
118
-
119
- # Check title patterns
120
- title_patterns = {
121
- PageType.HOMEPAGE: ['home', 'welcome'],
122
- PageType.LISTING: ['products', 'catalog', 'collection', 'browse'],
123
- PageType.DETAIL: ['product', 'item'],
124
- PageType.AUTH: ['login', 'sign in', 'sign up', 'register'],
125
- PageType.ABOUT: ['about', 'our story', 'team'],
126
- PageType.CONTACT: ['contact', 'get in touch', 'support'],
127
- }
128
-
129
- for page_type, keywords in title_patterns.items():
130
- for keyword in keywords:
131
- if keyword in title_lower:
132
- return page_type
133
-
134
- return PageType.OTHER
135
-
136
- async def _extract_links(self, page: Page, base_url: str) -> list[str]:
137
- """Extract all internal links from a page."""
138
- links = await page.evaluate("""
139
- () => {
140
- const links = Array.from(document.querySelectorAll('a[href]'));
141
- return links.map(a => a.href).filter(href =>
142
- href &&
143
- !href.startsWith('javascript:') &&
144
- !href.startsWith('mailto:') &&
145
- !href.startsWith('tel:') &&
146
- !href.includes('#')
147
- );
148
- }
149
- """)
150
-
151
- # Normalize and filter
152
- valid_links = []
153
- for link in links:
154
- normalized = self._normalize_url(link, base_url)
155
- if normalized and normalized not in self.visited_urls:
156
- valid_links.append(normalized)
157
-
158
- return list(set(valid_links))
159
-
160
- async def _get_page_title(self, page: Page) -> str:
161
- """Get page title."""
162
- try:
163
- return await page.title()
164
- except Exception:
165
- return ""
166
-
167
- async def discover(
168
- self,
169
- base_url: str,
170
- max_pages: int = None,
171
- progress_callback: Optional[Callable[[float], None]] = None
172
- ) -> list[DiscoveredPage]:
173
- """
174
- Discover pages from a website.
175
-
176
- Args:
177
- base_url: The starting URL
178
- max_pages: Maximum pages to discover (default from settings)
179
- progress_callback: Optional callback for progress updates
180
-
181
- Returns:
182
- List of discovered pages
183
- """
184
- max_pages = max_pages or self.settings.crawl.max_pages
185
-
186
- async with self:
187
- # Start with homepage
188
- normalized_base = self._normalize_url(base_url, base_url)
189
- if not normalized_base:
190
- raise ValueError(f"Invalid base URL: {base_url}")
191
-
192
- queue = [normalized_base]
193
- self.visited_urls = set()
194
- self.discovered_pages = []
195
-
196
- while queue and len(self.discovered_pages) < max_pages:
197
- current_url = queue.pop(0)
198
-
199
- if current_url in self.visited_urls:
200
- continue
201
-
202
- self.visited_urls.add(current_url)
203
-
204
- try:
205
- page = await self.context.new_page()
206
-
207
- # Navigate to page
208
- await page.goto(
209
- current_url,
210
- wait_until="networkidle",
211
- timeout=self.settings.browser.timeout
212
- )
213
-
214
- # Get page info
215
- title = await self._get_page_title(page)
216
- page_type = self._classify_page_type(current_url, title)
217
- depth = len(urlparse(current_url).path.split('/')) - 1
218
-
219
- # Create discovered page
220
- discovered = DiscoveredPage(
221
- url=current_url,
222
- title=title,
223
- page_type=page_type,
224
- depth=depth,
225
- selected=True,
226
- )
227
- self.discovered_pages.append(discovered)
228
-
229
- # Extract links for further crawling
230
- new_links = await self._extract_links(page, base_url)
231
-
232
- # Prioritize certain page types
233
- priority_patterns = ['/product', '/listing', '/category', '/about', '/contact']
234
- priority_links = [l for l in new_links if any(p in l.lower() for p in priority_patterns)]
235
- other_links = [l for l in new_links if l not in priority_links]
236
-
237
- # Add to queue (priority first)
238
- for link in priority_links + other_links:
239
- if link not in self.visited_urls and link not in queue:
240
- queue.append(link)
241
-
242
- await page.close()
243
-
244
- # Progress callback
245
- if progress_callback:
246
- progress = len(self.discovered_pages) / max_pages
247
- progress_callback(min(progress, 1.0))
248
-
249
- # Rate limiting
250
- await asyncio.sleep(self.settings.crawl.crawl_delay_ms / 1000)
251
-
252
- except Exception as e:
253
- # Log error but continue
254
- discovered = DiscoveredPage(
255
- url=current_url,
256
- title="",
257
- page_type=PageType.OTHER,
258
- depth=0,
259
- selected=False,
260
- error=str(e),
261
- )
262
- self.discovered_pages.append(discovered)
263
-
264
- return self.discovered_pages
265
-
266
- def get_pages_by_type(self) -> dict[PageType, list[DiscoveredPage]]:
267
- """Group discovered pages by type."""
268
- grouped: dict[PageType, list[DiscoveredPage]] = {}
269
- for page in self.discovered_pages:
270
- if page.page_type not in grouped:
271
- grouped[page.page_type] = []
272
- grouped[page.page_type].append(page)
273
- return grouped
274
-
275
- def get_suggested_pages(self, min_pages: int = None) -> list[DiscoveredPage]:
276
- """
277
- Get suggested pages for extraction.
278
-
279
- Ensures diversity of page types and prioritizes key templates.
280
- """
281
- min_pages = min_pages or self.settings.crawl.min_pages
282
-
283
- # Priority order for page types
284
- priority_types = [
285
- PageType.HOMEPAGE,
286
- PageType.LISTING,
287
- PageType.DETAIL,
288
- PageType.FORM,
289
- PageType.MARKETING,
290
- PageType.AUTH,
291
- PageType.ABOUT,
292
- PageType.CONTACT,
293
- PageType.OTHER,
294
- ]
295
-
296
- selected = []
297
- grouped = self.get_pages_by_type()
298
-
299
- # First pass: get at least one of each priority type
300
- for page_type in priority_types:
301
- if page_type in grouped and grouped[page_type]:
302
- # Take the first (usually shallowest) page of this type
303
- page = sorted(grouped[page_type], key=lambda p: p.depth)[0]
304
- if page not in selected:
305
- selected.append(page)
306
-
307
- # Second pass: fill up to min_pages with remaining pages
308
- remaining = [p for p in self.discovered_pages if p not in selected and not p.error]
309
- remaining.sort(key=lambda p: p.depth)
310
-
311
- while len(selected) < min_pages and remaining:
312
- selected.append(remaining.pop(0))
313
-
314
- # Mark as selected
315
- for page in selected:
316
- page.selected = True
317
-
318
- return selected
319
-
320
-
321
- # =============================================================================
322
- # CONVENIENCE FUNCTIONS
323
- # =============================================================================
324
-
325
- async def discover_pages(base_url: str, max_pages: int = 20) -> list[DiscoveredPage]:
326
- """Convenience function to discover pages."""
327
- discoverer = PageDiscoverer()
328
- return await discoverer.discover(base_url, max_pages)
329
-
330
-
331
- async def quick_discover(base_url: str) -> dict:
332
- """Quick discovery returning summary dict."""
333
- pages = await discover_pages(base_url)
334
-
335
- return {
336
- "total_found": len(pages),
337
- "by_type": {
338
- pt.value: len([p for p in pages if p.page_type == pt])
339
- for pt in PageType
340
- },
341
- "pages": [
342
- {
343
- "url": p.url,
344
- "title": p.title,
345
- "type": p.page_type.value,
346
- "selected": p.selected,
347
- }
348
- for p in pages
349
- ],
350
- }