Spaces:
Sleeping
Sleeping
Upload agent_2_website_inspector.py
Browse files
agents/agent_2_website_inspector.py
ADDED
|
@@ -0,0 +1,181 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Agent 2: Website Inspector
|
| 3 |
+
Captures screenshots AND extracts DOM elements from the live website.
|
| 4 |
+
Gets computed styles for each element for comparison with Figma.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from typing import Dict, Any
|
| 8 |
+
import sys
|
| 9 |
+
import os
|
| 10 |
+
import asyncio
|
| 11 |
+
|
| 12 |
+
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
| 13 |
+
|
| 14 |
+
from utils.dom_element_extractor import DOMElementExtractor
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
async def _capture_and_extract_async(
|
| 18 |
+
website_url: str,
|
| 19 |
+
output_dir: str,
|
| 20 |
+
execution_id: str,
|
| 21 |
+
desktop_width: int = 1440,
|
| 22 |
+
mobile_width: int = 375
|
| 23 |
+
) -> tuple:
|
| 24 |
+
"""Async function to capture screenshots and extract elements."""
|
| 25 |
+
from playwright.async_api import async_playwright
|
| 26 |
+
from pathlib import Path
|
| 27 |
+
|
| 28 |
+
Path(output_dir).mkdir(parents=True, exist_ok=True)
|
| 29 |
+
|
| 30 |
+
screenshots = {}
|
| 31 |
+
dimensions = {}
|
| 32 |
+
all_elements = {}
|
| 33 |
+
all_summaries = {}
|
| 34 |
+
|
| 35 |
+
async with async_playwright() as p:
|
| 36 |
+
browser = await p.chromium.launch(headless=True)
|
| 37 |
+
|
| 38 |
+
try:
|
| 39 |
+
# Desktop capture and extraction
|
| 40 |
+
print(f"\n π± Processing desktop ({desktop_width}px)...")
|
| 41 |
+
page = await browser.new_page(viewport={"width": desktop_width, "height": 1080})
|
| 42 |
+
await page.goto(website_url, wait_until="networkidle", timeout=60000)
|
| 43 |
+
|
| 44 |
+
# Wait longer for CSS/JS to fully load and apply styles
|
| 45 |
+
await page.wait_for_timeout(3000)
|
| 46 |
+
|
| 47 |
+
# Scroll to trigger any lazy-loaded styles
|
| 48 |
+
await page.evaluate("window.scrollTo(0, document.body.scrollHeight)")
|
| 49 |
+
await page.wait_for_timeout(500)
|
| 50 |
+
await page.evaluate("window.scrollTo(0, 0)")
|
| 51 |
+
await page.wait_for_timeout(500)
|
| 52 |
+
|
| 53 |
+
# Get full page height
|
| 54 |
+
desktop_height = await page.evaluate("() => document.documentElement.scrollHeight")
|
| 55 |
+
await page.set_viewport_size({"width": desktop_width, "height": desktop_height})
|
| 56 |
+
|
| 57 |
+
# Screenshot
|
| 58 |
+
desktop_path = f"{output_dir}/desktop_{execution_id}.png"
|
| 59 |
+
await page.screenshot(path=desktop_path, full_page=True)
|
| 60 |
+
screenshots["desktop"] = desktop_path
|
| 61 |
+
dimensions["desktop"] = {"width": desktop_width, "height": desktop_height}
|
| 62 |
+
print(f" πΈ Screenshot: {desktop_width}x{desktop_height}px")
|
| 63 |
+
|
| 64 |
+
# Extract elements
|
| 65 |
+
extractor = DOMElementExtractor()
|
| 66 |
+
elements = await extractor.extract_from_page_async(page, "desktop")
|
| 67 |
+
all_elements["desktop"] = [e.to_dict() for e in elements]
|
| 68 |
+
all_summaries["desktop"] = extractor.summarize()
|
| 69 |
+
print(f" π Elements: {len(elements)} extracted")
|
| 70 |
+
|
| 71 |
+
await page.close()
|
| 72 |
+
|
| 73 |
+
# Mobile capture and extraction
|
| 74 |
+
print(f"\n π± Processing mobile ({mobile_width}px)...")
|
| 75 |
+
page = await browser.new_page(viewport={"width": mobile_width, "height": 812})
|
| 76 |
+
await page.set_extra_http_headers({
|
| 77 |
+
"User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 15_0 like Mac OS X) AppleWebKit/605.1.15"
|
| 78 |
+
})
|
| 79 |
+
await page.goto(website_url, wait_until="networkidle", timeout=60000)
|
| 80 |
+
await page.wait_for_timeout(2000)
|
| 81 |
+
|
| 82 |
+
mobile_height = await page.evaluate("() => document.documentElement.scrollHeight")
|
| 83 |
+
await page.set_viewport_size({"width": mobile_width, "height": mobile_height})
|
| 84 |
+
|
| 85 |
+
# Screenshot
|
| 86 |
+
mobile_path = f"{output_dir}/mobile_{execution_id}.png"
|
| 87 |
+
await page.screenshot(path=mobile_path, full_page=True)
|
| 88 |
+
screenshots["mobile"] = mobile_path
|
| 89 |
+
dimensions["mobile"] = {"width": mobile_width, "height": mobile_height}
|
| 90 |
+
print(f" πΈ Screenshot: {mobile_width}x{mobile_height}px")
|
| 91 |
+
|
| 92 |
+
# Extract elements
|
| 93 |
+
extractor = DOMElementExtractor()
|
| 94 |
+
elements = await extractor.extract_from_page_async(page, "mobile")
|
| 95 |
+
all_elements["mobile"] = [e.to_dict() for e in elements]
|
| 96 |
+
all_summaries["mobile"] = extractor.summarize()
|
| 97 |
+
print(f" π Elements: {len(elements)} extracted")
|
| 98 |
+
|
| 99 |
+
await page.close()
|
| 100 |
+
|
| 101 |
+
finally:
|
| 102 |
+
await browser.close()
|
| 103 |
+
|
| 104 |
+
return screenshots, dimensions, all_elements, all_summaries
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
def agent_2_node(state: Dict[str, Any]) -> Dict[str, Any]:
|
| 108 |
+
"""
|
| 109 |
+
Capture screenshots AND extract DOM elements from website.
|
| 110 |
+
|
| 111 |
+
This agent:
|
| 112 |
+
1. Uses Playwright to load the website
|
| 113 |
+
2. Captures full-page screenshots at Desktop and Mobile viewports
|
| 114 |
+
3. Extracts ALL DOM elements with computed CSS styles
|
| 115 |
+
4. Stores paths, dimensions, and elements in state
|
| 116 |
+
"""
|
| 117 |
+
print("\n" + "="*60)
|
| 118 |
+
print("π Agent 2: Website Inspector - Website Analysis")
|
| 119 |
+
print("="*60)
|
| 120 |
+
|
| 121 |
+
website_url = state.get("website_url", "")
|
| 122 |
+
execution_id = state.get("execution_id", "")
|
| 123 |
+
figma_dims = state.get("figma_dimensions", {})
|
| 124 |
+
logs = state.get("logs", [])
|
| 125 |
+
|
| 126 |
+
try:
|
| 127 |
+
# Get desktop width from Figma (if available) for consistent comparison
|
| 128 |
+
desktop_width = 1440
|
| 129 |
+
if "desktop" in figma_dims:
|
| 130 |
+
desktop_width = figma_dims["desktop"].get("width", 1440)
|
| 131 |
+
|
| 132 |
+
print(f" π URL: {website_url}")
|
| 133 |
+
print(f" π Desktop width: {desktop_width}px (matching Figma)")
|
| 134 |
+
|
| 135 |
+
# Capture and extract
|
| 136 |
+
screenshots, dimensions, all_elements, all_summaries = asyncio.run(
|
| 137 |
+
_capture_and_extract_async(
|
| 138 |
+
website_url=website_url,
|
| 139 |
+
output_dir="data/website",
|
| 140 |
+
execution_id=execution_id,
|
| 141 |
+
desktop_width=desktop_width,
|
| 142 |
+
mobile_width=375
|
| 143 |
+
)
|
| 144 |
+
)
|
| 145 |
+
|
| 146 |
+
if not screenshots:
|
| 147 |
+
raise ValueError("Failed to capture any screenshots")
|
| 148 |
+
|
| 149 |
+
# Log results
|
| 150 |
+
print(f"\n β
Website analysis complete")
|
| 151 |
+
for viewport in screenshots:
|
| 152 |
+
dims = dimensions.get(viewport, {})
|
| 153 |
+
summary = all_summaries.get(viewport, {})
|
| 154 |
+
logs.append(f"πΈ Website {viewport}: {dims.get('width', '?')}x{dims.get('height', '?')}px")
|
| 155 |
+
logs.append(f"π Website {viewport}: {summary.get('total_elements', 0)} elements")
|
| 156 |
+
|
| 157 |
+
total_elements = sum(s.get('total_elements', 0) for s in all_summaries.values())
|
| 158 |
+
print(f" Total: {len(screenshots)} screenshots, {total_elements} elements")
|
| 159 |
+
|
| 160 |
+
return {
|
| 161 |
+
"website_screenshots": screenshots,
|
| 162 |
+
"website_dimensions": dimensions,
|
| 163 |
+
"website_elements": all_elements,
|
| 164 |
+
"website_element_summary": all_summaries,
|
| 165 |
+
"status": "website_captured",
|
| 166 |
+
"logs": logs
|
| 167 |
+
}
|
| 168 |
+
|
| 169 |
+
except Exception as e:
|
| 170 |
+
error_msg = f"Failed to analyze website: {str(e)}"
|
| 171 |
+
print(f"\n β {error_msg}")
|
| 172 |
+
logs.append(f"β {error_msg}")
|
| 173 |
+
|
| 174 |
+
import traceback
|
| 175 |
+
traceback.print_exc()
|
| 176 |
+
|
| 177 |
+
return {
|
| 178 |
+
"status": "website_capture_failed",
|
| 179 |
+
"error_message": error_msg,
|
| 180 |
+
"logs": logs
|
| 181 |
+
}
|