| | """ |
| | Design System Extractor v2 — Main Application |
| | ============================================== |
| | |
| | Flow: |
| | 1. User enters URL |
| | 2. Agent 1 discovers pages → User confirms |
| | 3. Agent 1 extracts tokens (Desktop + Mobile) |
| | 4. Agent 2 normalizes tokens |
| | 5. Stage 1 UI: User reviews tokens (accept/reject, Desktop↔Mobile toggle) |
| | 6. Agent 3 proposes upgrades |
| | 7. Stage 2 UI: User selects options with live preview |
| | 8. Agent 4 generates JSON |
| | 9. Stage 3 UI: User exports |
| | """ |
| |
|
| | import os |
| | import asyncio |
| | import json |
| | import gradio as gr |
| | from datetime import datetime |
| | from typing import Optional |
| |
|
| | |
| | HF_TOKEN_FROM_ENV = os.getenv("HF_TOKEN", "") |
| |
|
| | |
| | |
| | |
| |
|
| | class AppState: |
| | """Global application state.""" |
| | def __init__(self): |
| | self.reset() |
| | |
| | def reset(self): |
| | self.discovered_pages = [] |
| | self.base_url = "" |
| | self.desktop_raw = None |
| | self.mobile_raw = None |
| | self.desktop_normalized = None |
| | self.mobile_normalized = None |
| | self.upgrade_recommendations = None |
| | self.selected_upgrades = {} |
| | self.logs = [] |
| | |
| | def log(self, message: str): |
| | timestamp = datetime.now().strftime("%H:%M:%S") |
| | self.logs.append(f"[{timestamp}] {message}") |
| | if len(self.logs) > 100: |
| | self.logs.pop(0) |
| | |
| | def get_logs(self) -> str: |
| | return "\n".join(self.logs) |
| |
|
| | state = AppState() |
| |
|
| |
|
| | |
| | |
| | |
| |
|
| | def get_crawler(): |
| | import agents.crawler |
| | return agents.crawler |
| |
|
| | def get_extractor(): |
| | import agents.extractor |
| | return agents.extractor |
| |
|
| | def get_normalizer(): |
| | import agents.normalizer |
| | return agents.normalizer |
| |
|
| | def get_advisor(): |
| | import agents.advisor |
| | return agents.advisor |
| |
|
| | def get_schema(): |
| | import core.token_schema |
| | return core.token_schema |
| |
|
| |
|
| | |
| | |
| | |
| |
|
| | async def discover_pages(url: str, progress=gr.Progress()): |
| | """Discover pages from URL.""" |
| | state.reset() |
| | |
| | if not url or not url.startswith(("http://", "https://")): |
| | return "❌ Please enter a valid URL", "", None |
| | |
| | state.log(f"🚀 Starting discovery for: {url}") |
| | progress(0.1, desc="🔍 Discovering pages...") |
| | |
| | try: |
| | crawler = get_crawler() |
| | discoverer = crawler.PageDiscoverer() |
| | |
| | pages = await discoverer.discover(url) |
| | |
| | state.discovered_pages = pages |
| | state.base_url = url |
| | |
| | state.log(f"✅ Found {len(pages)} pages") |
| | |
| | |
| | pages_data = [] |
| | for page in pages: |
| | pages_data.append([ |
| | True, |
| | page.url, |
| | page.title if page.title else "(No title)", |
| | page.page_type.value, |
| | "✓" if not page.error else f"⚠ {page.error}" |
| | ]) |
| | |
| | progress(1.0, desc="✅ Discovery complete!") |
| | |
| | status = f"✅ Found {len(pages)} pages. Review and click 'Extract Tokens' to continue." |
| | |
| | return status, state.get_logs(), pages_data |
| | |
| | except Exception as e: |
| | import traceback |
| | state.log(f"❌ Error: {str(e)}") |
| | return f"❌ Error: {str(e)}", state.get_logs(), None |
| |
|
| |
|
| | |
| | |
| | |
| |
|
| | async def extract_tokens(pages_data, progress=gr.Progress()): |
| | """Extract tokens from selected pages (both viewports).""" |
| | |
| | state.log(f"📥 Received pages_data type: {type(pages_data)}") |
| | |
| | if pages_data is None: |
| | return "❌ Please discover pages first", state.get_logs(), None, None |
| | |
| | |
| | selected_urls = [] |
| | |
| | try: |
| | |
| | if hasattr(pages_data, 'iterrows'): |
| | state.log(f"📥 DataFrame with {len(pages_data)} rows, columns: {list(pages_data.columns)}") |
| | |
| | for idx, row in pages_data.iterrows(): |
| | |
| | try: |
| | |
| | is_selected = row.get('Select', row.iloc[0] if len(row) > 0 else False) |
| | url = row.get('URL', row.iloc[1] if len(row) > 1 else '') |
| | except: |
| | |
| | is_selected = row.iloc[0] if len(row) > 0 else False |
| | url = row.iloc[1] if len(row) > 1 else '' |
| | |
| | if is_selected and url: |
| | selected_urls.append(url) |
| | |
| | |
| | elif isinstance(pages_data, dict): |
| | state.log(f"📥 Dict with keys: {list(pages_data.keys())}") |
| | data = pages_data.get('data', []) |
| | for row in data: |
| | if isinstance(row, (list, tuple)) and len(row) >= 2 and row[0]: |
| | selected_urls.append(row[1]) |
| | |
| | |
| | elif isinstance(pages_data, (list, tuple)): |
| | state.log(f"📥 List with {len(pages_data)} items") |
| | for row in pages_data: |
| | if isinstance(row, (list, tuple)) and len(row) >= 2 and row[0]: |
| | selected_urls.append(row[1]) |
| | |
| | except Exception as e: |
| | state.log(f"❌ Error parsing pages_data: {str(e)}") |
| | import traceback |
| | state.log(traceback.format_exc()) |
| | |
| | state.log(f"📋 Found {len(selected_urls)} selected URLs") |
| | |
| | |
| | if not selected_urls and state.discovered_pages: |
| | state.log("⚠️ No URLs from table, using all discovered pages") |
| | selected_urls = [p.url for p in state.discovered_pages if not p.error][:10] |
| | |
| | if not selected_urls: |
| | return "❌ No pages selected. Please select pages or rediscover.", state.get_logs(), None, None |
| | |
| | |
| | selected_urls = selected_urls[:10] |
| | |
| | state.log(f"📋 Extracting from {len(selected_urls)} pages:") |
| | for url in selected_urls[:3]: |
| | state.log(f" • {url}") |
| | if len(selected_urls) > 3: |
| | state.log(f" ... and {len(selected_urls) - 3} more") |
| | |
| | progress(0.05, desc="🚀 Starting extraction...") |
| | |
| | try: |
| | schema = get_schema() |
| | extractor_mod = get_extractor() |
| | normalizer_mod = get_normalizer() |
| | |
| | |
| | state.log("") |
| | state.log("🖥️ DESKTOP EXTRACTION (1440px)") |
| | progress(0.1, desc="🖥️ Extracting desktop tokens...") |
| | |
| | desktop_extractor = extractor_mod.TokenExtractor(viewport=schema.Viewport.DESKTOP) |
| | |
| | def desktop_progress(p): |
| | progress(0.1 + (p * 0.35), desc=f"🖥️ Desktop... {int(p*100)}%") |
| | |
| | state.desktop_raw = await desktop_extractor.extract(selected_urls, progress_callback=desktop_progress) |
| | |
| | state.log(f" Raw: {len(state.desktop_raw.colors)} colors, {len(state.desktop_raw.typography)} typography, {len(state.desktop_raw.spacing)} spacing") |
| | |
| | |
| | state.log(" Normalizing...") |
| | state.desktop_normalized = normalizer_mod.normalize_tokens(state.desktop_raw) |
| | state.log(f" Normalized: {len(state.desktop_normalized.colors)} colors, {len(state.desktop_normalized.typography)} typography, {len(state.desktop_normalized.spacing)} spacing") |
| | |
| | |
| | state.log("") |
| | state.log("📱 MOBILE EXTRACTION (375px)") |
| | progress(0.5, desc="📱 Extracting mobile tokens...") |
| | |
| | mobile_extractor = extractor_mod.TokenExtractor(viewport=schema.Viewport.MOBILE) |
| | |
| | def mobile_progress(p): |
| | progress(0.5 + (p * 0.35), desc=f"📱 Mobile... {int(p*100)}%") |
| | |
| | state.mobile_raw = await mobile_extractor.extract(selected_urls, progress_callback=mobile_progress) |
| | |
| | state.log(f" Raw: {len(state.mobile_raw.colors)} colors, {len(state.mobile_raw.typography)} typography, {len(state.mobile_raw.spacing)} spacing") |
| | |
| | |
| | state.log(" Normalizing...") |
| | state.mobile_normalized = normalizer_mod.normalize_tokens(state.mobile_raw) |
| | state.log(f" Normalized: {len(state.mobile_normalized.colors)} colors, {len(state.mobile_normalized.typography)} typography, {len(state.mobile_normalized.spacing)} spacing") |
| | |
| | progress(0.95, desc="📊 Preparing results...") |
| | |
| | |
| | desktop_data = format_tokens_for_display(state.desktop_normalized) |
| | mobile_data = format_tokens_for_display(state.mobile_normalized) |
| | |
| | state.log("") |
| | state.log("=" * 50) |
| | state.log("✅ EXTRACTION COMPLETE!") |
| | state.log("=" * 50) |
| | |
| | progress(1.0, desc="✅ Complete!") |
| | |
| | status = f"""## ✅ Extraction Complete! |
| | |
| | | Viewport | Colors | Typography | Spacing | |
| | |----------|--------|------------|---------| |
| | | Desktop | {len(state.desktop_normalized.colors)} | {len(state.desktop_normalized.typography)} | {len(state.desktop_normalized.spacing)} | |
| | | Mobile | {len(state.mobile_normalized.colors)} | {len(state.mobile_normalized.typography)} | {len(state.mobile_normalized.spacing)} | |
| | |
| | **Next:** Review the tokens below. Accept or reject, then proceed to Stage 2. |
| | """ |
| | |
| | return status, state.get_logs(), desktop_data, mobile_data |
| | |
| | except Exception as e: |
| | import traceback |
| | state.log(f"❌ Error: {str(e)}") |
| | state.log(traceback.format_exc()) |
| | return f"❌ Error: {str(e)}", state.get_logs(), None, None |
| |
|
| |
|
| | def format_tokens_for_display(normalized) -> dict: |
| | """Format normalized tokens for Gradio display.""" |
| | if normalized is None: |
| | return {"colors": [], "typography": [], "spacing": []} |
| | |
| | |
| | colors = [] |
| | color_items = list(normalized.colors.values()) if isinstance(normalized.colors, dict) else normalized.colors |
| | for c in sorted(color_items, key=lambda x: -x.frequency)[:50]: |
| | colors.append([ |
| | True, |
| | c.value, |
| | c.suggested_name or "", |
| | c.frequency, |
| | c.confidence.value if c.confidence else "medium", |
| | f"{c.contrast_white:.1f}:1" if c.contrast_white else "N/A", |
| | "✓" if c.wcag_aa_small_text else "✗", |
| | ", ".join(c.contexts[:2]) if c.contexts else "", |
| | ]) |
| | |
| | |
| | typography = [] |
| | typo_items = list(normalized.typography.values()) if isinstance(normalized.typography, dict) else normalized.typography |
| | for t in sorted(typo_items, key=lambda x: -x.frequency)[:30]: |
| | typography.append([ |
| | True, |
| | t.font_family, |
| | t.font_size, |
| | str(t.font_weight), |
| | t.line_height or "", |
| | t.suggested_name or "", |
| | t.frequency, |
| | t.confidence.value if t.confidence else "medium", |
| | ]) |
| | |
| | |
| | spacing = [] |
| | spacing_items = list(normalized.spacing.values()) if isinstance(normalized.spacing, dict) else normalized.spacing |
| | for s in sorted(spacing_items, key=lambda x: x.value_px)[:20]: |
| | spacing.append([ |
| | True, |
| | s.value, |
| | f"{s.value_px}px", |
| | s.suggested_name or "", |
| | s.frequency, |
| | "✓" if s.fits_base_8 else "", |
| | s.confidence.value if s.confidence else "medium", |
| | ]) |
| | |
| | return { |
| | "colors": colors, |
| | "typography": typography, |
| | "spacing": spacing, |
| | } |
| |
|
| |
|
| | def switch_viewport(viewport: str): |
| | """Switch between desktop and mobile view.""" |
| | if viewport == "Desktop (1440px)": |
| | data = format_tokens_for_display(state.desktop_normalized) |
| | else: |
| | data = format_tokens_for_display(state.mobile_normalized) |
| | |
| | return data["colors"], data["typography"], data["spacing"] |
| |
|
| |
|
| | |
| | |
| | |
| |
|
| | async def run_stage2_analysis(progress=gr.Progress()): |
| | """Run Agent 3 analysis on extracted tokens.""" |
| | |
| | if not state.desktop_normalized or not state.mobile_normalized: |
| | return ("❌ Please complete Stage 1 first", "", None, None, "", "", "") |
| | |
| | state.log("") |
| | state.log("=" * 50) |
| | state.log("🧠 STAGE 2: AI-POWERED ANALYSIS") |
| | state.log("=" * 50) |
| | |
| | progress(0.1, desc="🤖 Starting AI analysis...") |
| | |
| | try: |
| | advisor_mod = get_advisor() |
| | |
| | |
| | state.log("🔍 Analyzing design patterns...") |
| | progress(0.3, desc="🔍 Analyzing patterns...") |
| | |
| | recommendations = await advisor_mod.analyze_design_system( |
| | desktop_tokens=state.desktop_normalized, |
| | mobile_tokens=state.mobile_normalized, |
| | log_callback=state.log, |
| | ) |
| | |
| | state.upgrade_recommendations = recommendations |
| | |
| | progress(0.9, desc="📊 Preparing recommendations...") |
| | |
| | |
| | typography_data = format_typography_comparison(recommendations) |
| | |
| | |
| | spacing_data = format_spacing_comparison(recommendations) |
| | |
| | |
| | color_ramps_md = format_color_ramps_display(recommendations) |
| | |
| | |
| | radius_md = format_radius_display() |
| | |
| | |
| | shadows_md = format_shadows_display() |
| | |
| | state.log("✅ Analysis complete!") |
| | progress(1.0, desc="✅ Complete!") |
| | |
| | status = f"""## 🧠 AI Analysis Complete! |
| | |
| | ### LLM Recommendation |
| | {recommendations.llm_rationale if recommendations.llm_rationale else "Using rule-based recommendations."} |
| | |
| | ### Detected Patterns |
| | {chr(10).join(['• ' + p for p in recommendations.detected_patterns]) if recommendations.detected_patterns else '• Standard design patterns detected'} |
| | |
| | **Review the options below and select your preferences.** |
| | """ |
| | |
| | return (status, state.get_logs(), typography_data, spacing_data, |
| | color_ramps_md, radius_md, shadows_md) |
| | |
| | except Exception as e: |
| | import traceback |
| | state.log(f"❌ Error: {str(e)}") |
| | state.log(traceback.format_exc()) |
| | return (f"❌ Analysis failed: {str(e)}", state.get_logs(), None, None, "", "", "") |
| |
|
| |
|
| | def format_typography_comparison(recommendations) -> list: |
| | """Format typography comparison table.""" |
| | if not state.desktop_normalized: |
| | return [] |
| | |
| | |
| | current_typo = list(state.desktop_normalized.typography.values()) |
| | current_typo.sort(key=lambda t: -float(t.font_size.replace('px', '').replace('rem', '').replace('em', '') or 16)) |
| | |
| | |
| | sizes = [float(t.font_size.replace('px', '').replace('rem', '').replace('em', '') or 16) for t in current_typo] |
| | base = 16 |
| | for s in sizes: |
| | if 14 <= s <= 18: |
| | base = s |
| | break |
| | |
| | |
| | scale_12 = generate_type_scale_values(base, 1.2) |
| | scale_125 = generate_type_scale_values(base, 1.25) |
| | scale_133 = generate_type_scale_values(base, 1.333) |
| | |
| | |
| | elements = ["Display", "H1", "H2", "H3", "Body", "Caption", "Small"] |
| | data = [] |
| | |
| | for i, elem in enumerate(elements): |
| | current = f"{int(sizes[i])}px" if i < len(sizes) else "—" |
| | s12 = f"{scale_12[i]}px" if i < len(scale_12) else "—" |
| | s125 = f"{scale_125[i]}px" if i < len(scale_125) else "—" |
| | s133 = f"{scale_133[i]}px" if i < len(scale_133) else "—" |
| | data.append([elem, current, s12, s125, s133]) |
| | |
| | return data |
| |
|
| |
|
| | def generate_type_scale_values(base: float, ratio: float) -> list: |
| | """Generate type scale values.""" |
| | |
| | scales = [] |
| | for i in range(4, -1, -1): |
| | scales.append(int(base * (ratio ** i))) |
| | |
| | scales.append(int(base)) |
| | |
| | for i in range(1, 3): |
| | scales.append(int(base / (ratio ** i))) |
| | return scales |
| |
|
| |
|
| | def format_spacing_comparison(recommendations) -> list: |
| | """Format spacing comparison table.""" |
| | if not state.desktop_normalized: |
| | return [] |
| | |
| | |
| | current_spacing = list(state.desktop_normalized.spacing.values()) |
| | current_spacing.sort(key=lambda s: s.value_px) |
| | |
| | data = [] |
| | for s in current_spacing[:10]: |
| | current = f"{s.value_px}px" |
| | grid_8 = f"{snap_to_grid(s.value_px, 8)}px" |
| | grid_4 = f"{snap_to_grid(s.value_px, 4)}px" |
| | |
| | |
| | if s.value_px == snap_to_grid(s.value_px, 8): |
| | grid_8 += " ✓" |
| | if s.value_px == snap_to_grid(s.value_px, 4): |
| | grid_4 += " ✓" |
| | |
| | data.append([current, grid_8, grid_4]) |
| | |
| | return data |
| |
|
| |
|
| | def snap_to_grid(value: float, base: int) -> int: |
| | """Snap value to grid.""" |
| | return round(value / base) * base |
| |
|
| |
|
| | def format_color_ramps_display(recommendations) -> str: |
| | """Format color ramps for display.""" |
| | if not recommendations or not recommendations.color_ramps: |
| | return "No color ramps to generate." |
| | |
| | lines = [] |
| | for opt in recommendations.color_ramps: |
| | base = opt.values.get("base_color", "") |
| | role = opt.values.get("role", "unknown") |
| | ramp = opt.values.get("ramp", {}) |
| | |
| | |
| | shades = [] |
| | for shade in ["50", "200", "500", "700", "900"]: |
| | color = ramp.get(f"{role}.{shade}", "?") |
| | shades.append(f"`{color}`") |
| | |
| | lines.append(f"**{role.title()}** (base: {base})") |
| | lines.append(f" {' → '.join(shades)}") |
| | lines.append("") |
| | |
| | return "\n".join(lines) if lines else "No color ramps to generate." |
| |
|
| |
|
| | def format_radius_display() -> str: |
| | """Format radius display.""" |
| | if not state.desktop_normalized or not state.desktop_normalized.radius: |
| | return "*No border radius values detected.*" |
| | |
| | radii = list(state.desktop_normalized.radius.values()) |
| | values = [r.value for r in radii[:5]] |
| | |
| | return f"**Detected:** {', '.join(values)}\n\n*Radius values will be preserved as-is.*" |
| |
|
| |
|
| | def format_shadows_display() -> str: |
| | """Format shadows display.""" |
| | if not state.desktop_normalized or not state.desktop_normalized.shadows: |
| | return "*No shadow values detected.*" |
| | |
| | count = len(state.desktop_normalized.shadows) |
| | return f"**Detected:** {count} shadow style(s)\n\n*Shadow values will be preserved as-is.*" |
| |
|
| |
|
| | def apply_selected_upgrades(type_choice: str, spacing_choice: str, apply_ramps: bool): |
| | """Apply selected upgrade options.""" |
| | if not state.upgrade_recommendations: |
| | return "❌ Run analysis first", "" |
| | |
| | state.log("✨ Applying selected upgrades...") |
| | |
| | |
| | state.selected_upgrades = { |
| | "type_scale": type_choice, |
| | "spacing": spacing_choice, |
| | "color_ramps": apply_ramps, |
| | } |
| | |
| | state.log(f" Type Scale: {type_choice}") |
| | state.log(f" Spacing: {spacing_choice}") |
| | state.log(f" Color Ramps: {'Yes' if apply_ramps else 'No'}") |
| | |
| | state.log("✅ Upgrades applied! Proceed to Stage 3 for export.") |
| | |
| | return "✅ Upgrades applied! Proceed to Stage 3 to export.", state.get_logs() |
| |
|
| |
|
| | def export_stage1_json(): |
| | """Export Stage 1 tokens (as-is extraction) to JSON.""" |
| | result = { |
| | "metadata": { |
| | "source_url": state.base_url, |
| | "extracted_at": datetime.now().isoformat(), |
| | "version": "v1-stage1-extracted", |
| | "stage": "extraction", |
| | }, |
| | "colors": {}, |
| | "typography": { |
| | "desktop": {}, |
| | "mobile": {}, |
| | }, |
| | "spacing": { |
| | "desktop": {}, |
| | "mobile": {}, |
| | }, |
| | "radius": {}, |
| | } |
| | |
| | |
| | if state.desktop_normalized: |
| | for name, c in state.desktop_normalized.colors.items(): |
| | result["colors"][c.suggested_name or c.value] = { |
| | "value": c.value, |
| | "frequency": c.frequency, |
| | "confidence": c.confidence.value if c.confidence else "medium", |
| | "contexts": c.contexts[:3], |
| | } |
| | |
| | |
| | if state.desktop_normalized: |
| | for name, t in state.desktop_normalized.typography.items(): |
| | key = t.suggested_name or f"{t.font_family}-{t.font_size}" |
| | result["typography"]["desktop"][key] = { |
| | "font_family": t.font_family, |
| | "font_size": t.font_size, |
| | "font_weight": t.font_weight, |
| | "line_height": t.line_height, |
| | "frequency": t.frequency, |
| | } |
| | |
| | if state.mobile_normalized: |
| | for name, t in state.mobile_normalized.typography.items(): |
| | key = t.suggested_name or f"{t.font_family}-{t.font_size}" |
| | result["typography"]["mobile"][key] = { |
| | "font_family": t.font_family, |
| | "font_size": t.font_size, |
| | "font_weight": t.font_weight, |
| | "line_height": t.line_height, |
| | "frequency": t.frequency, |
| | } |
| | |
| | |
| | if state.desktop_normalized: |
| | for name, s in state.desktop_normalized.spacing.items(): |
| | key = s.suggested_name or s.value |
| | result["spacing"]["desktop"][key] = { |
| | "value": s.value, |
| | "value_px": s.value_px, |
| | "fits_base_8": s.fits_base_8, |
| | "frequency": s.frequency, |
| | } |
| | |
| | if state.mobile_normalized: |
| | for name, s in state.mobile_normalized.spacing.items(): |
| | key = s.suggested_name or s.value |
| | result["spacing"]["mobile"][key] = { |
| | "value": s.value, |
| | "value_px": s.value_px, |
| | "fits_base_8": s.fits_base_8, |
| | "frequency": s.frequency, |
| | } |
| | |
| | |
| | if state.desktop_normalized: |
| | for name, r in state.desktop_normalized.radius.items(): |
| | result["radius"][name] = { |
| | "value": r.value, |
| | "frequency": r.frequency, |
| | } |
| | |
| | return json.dumps(result, indent=2, default=str) |
| |
|
| |
|
| | def export_tokens_json(): |
| | """Export tokens to JSON.""" |
| | result = { |
| | "metadata": { |
| | "source_url": state.base_url, |
| | "extracted_at": datetime.now().isoformat(), |
| | "version": "v1-extracted", |
| | }, |
| | "desktop": None, |
| | "mobile": None, |
| | } |
| | |
| | if state.desktop_normalized: |
| | result["desktop"] = { |
| | "colors": [ |
| | {"value": c.value, "name": c.suggested_name, "frequency": c.frequency, |
| | "confidence": c.confidence.value if c.confidence else "medium"} |
| | for c in state.desktop_normalized.colors |
| | ], |
| | "typography": [ |
| | {"font_family": t.font_family, "font_size": t.font_size, |
| | "font_weight": t.font_weight, "line_height": t.line_height, |
| | "name": t.suggested_name, "frequency": t.frequency} |
| | for t in state.desktop_normalized.typography |
| | ], |
| | "spacing": [ |
| | {"value": s.value, "value_px": s.value_px, "name": s.suggested_name, |
| | "frequency": s.frequency, "fits_base_8": s.fits_base_8} |
| | for s in state.desktop_normalized.spacing |
| | ], |
| | } |
| | |
| | if state.mobile_normalized: |
| | result["mobile"] = { |
| | "colors": [ |
| | {"value": c.value, "name": c.suggested_name, "frequency": c.frequency, |
| | "confidence": c.confidence.value if c.confidence else "medium"} |
| | for c in state.mobile_normalized.colors |
| | ], |
| | "typography": [ |
| | {"font_family": t.font_family, "font_size": t.font_size, |
| | "font_weight": t.font_weight, "line_height": t.line_height, |
| | "name": t.suggested_name, "frequency": t.frequency} |
| | for t in state.mobile_normalized.typography |
| | ], |
| | "spacing": [ |
| | {"value": s.value, "value_px": s.value_px, "name": s.suggested_name, |
| | "frequency": s.frequency, "fits_base_8": s.fits_base_8} |
| | for s in state.mobile_normalized.spacing |
| | ], |
| | } |
| | |
| | return json.dumps(result, indent=2, default=str) |
| |
|
| |
|
| | |
| | |
| | |
| |
|
| | def create_ui(): |
| | """Create the Gradio interface.""" |
| | |
| | with gr.Blocks( |
| | title="Design System Extractor v2", |
| | theme=gr.themes.Soft(), |
| | css=""" |
| | .color-swatch { display: inline-block; width: 24px; height: 24px; border-radius: 4px; margin-right: 8px; vertical-align: middle; } |
| | """ |
| | ) as app: |
| | |
| | gr.Markdown(""" |
| | # 🎨 Design System Extractor v2 |
| | |
| | **Reverse-engineer design systems from live websites.** |
| | |
| | A semi-automated, human-in-the-loop system that extracts, normalizes, and upgrades design tokens. |
| | |
| | --- |
| | """) |
| | |
| | |
| | |
| | |
| | |
| | with gr.Accordion("⚙️ Configuration", open=not bool(HF_TOKEN_FROM_ENV)): |
| | gr.Markdown("**HuggingFace Token** — Required for Stage 2 (AI upgrades)") |
| | with gr.Row(): |
| | hf_token_input = gr.Textbox( |
| | label="HF Token", placeholder="hf_xxxx", type="password", |
| | scale=4, value=HF_TOKEN_FROM_ENV, |
| | ) |
| | save_token_btn = gr.Button("💾 Save", scale=1) |
| | token_status = gr.Markdown("✅ Token loaded" if HF_TOKEN_FROM_ENV else "⏳ Enter token") |
| | |
| | def save_token(token): |
| | if token and len(token) > 10: |
| | os.environ["HF_TOKEN"] = token.strip() |
| | return "✅ Token saved!" |
| | return "❌ Invalid token" |
| | |
| | save_token_btn.click(save_token, [hf_token_input], [token_status]) |
| | |
| | |
| | |
| | |
| | |
| | with gr.Accordion("🔍 Step 1: Discover Pages", open=True): |
| | gr.Markdown("Enter your website URL to discover pages for extraction.") |
| | |
| | with gr.Row(): |
| | url_input = gr.Textbox(label="Website URL", placeholder="https://example.com", scale=4) |
| | discover_btn = gr.Button("🔍 Discover Pages", variant="primary", scale=1) |
| | |
| | discover_status = gr.Markdown("") |
| | |
| | with gr.Row(): |
| | log_output = gr.Textbox(label="📋 Log", lines=8, interactive=False) |
| | |
| | pages_table = gr.Dataframe( |
| | headers=["Select", "URL", "Title", "Type", "Status"], |
| | datatype=["bool", "str", "str", "str", "str"], |
| | label="Discovered Pages", |
| | interactive=True, |
| | visible=False, |
| | ) |
| | |
| | extract_btn = gr.Button("🚀 Extract Tokens (Desktop + Mobile)", variant="primary", visible=False) |
| | |
| | |
| | |
| | |
| | |
| | with gr.Accordion("📊 Stage 1: Review Extracted Tokens", open=False) as stage1_accordion: |
| | |
| | extraction_status = gr.Markdown("") |
| | |
| | gr.Markdown(""" |
| | **Review the extracted tokens.** Toggle between Desktop and Mobile viewports. |
| | Accept or reject tokens, then proceed to Stage 2 for AI-powered upgrades. |
| | """) |
| | |
| | viewport_toggle = gr.Radio( |
| | choices=["Desktop (1440px)", "Mobile (375px)"], |
| | value="Desktop (1440px)", |
| | label="Viewport", |
| | ) |
| | |
| | with gr.Tabs(): |
| | with gr.Tab("🎨 Colors"): |
| | colors_table = gr.Dataframe( |
| | headers=["Accept", "Color", "Suggested Name", "Frequency", "Confidence", "Contrast", "AA", "Context"], |
| | datatype=["bool", "str", "str", "number", "str", "str", "str", "str"], |
| | label="Colors", |
| | interactive=True, |
| | ) |
| | |
| | with gr.Tab("📝 Typography"): |
| | typography_table = gr.Dataframe( |
| | headers=["Accept", "Font", "Size", "Weight", "Line Height", "Suggested Name", "Frequency", "Confidence"], |
| | datatype=["bool", "str", "str", "str", "str", "str", "number", "str"], |
| | label="Typography", |
| | interactive=True, |
| | ) |
| | |
| | with gr.Tab("📏 Spacing"): |
| | spacing_table = gr.Dataframe( |
| | headers=["Accept", "Value", "Pixels", "Suggested Name", "Frequency", "Base 8", "Confidence"], |
| | datatype=["bool", "str", "str", "str", "number", "str", "str"], |
| | label="Spacing", |
| | interactive=True, |
| | ) |
| | |
| | with gr.Tab("🔘 Radius"): |
| | radius_table = gr.Dataframe( |
| | headers=["Accept", "Value", "Frequency", "Context"], |
| | datatype=["bool", "str", "number", "str"], |
| | label="Border Radius", |
| | interactive=True, |
| | ) |
| | |
| | with gr.Row(): |
| | proceed_stage2_btn = gr.Button("➡️ Proceed to Stage 2: AI Upgrades", variant="primary") |
| | download_stage1_btn = gr.Button("📥 Download Stage 1 JSON", variant="secondary") |
| | |
| | |
| | |
| | |
| | |
| | with gr.Accordion("🧠 Stage 2: AI-Powered Upgrades", open=False) as stage2_accordion: |
| | |
| | stage2_status = gr.Markdown("Click 'Analyze' to start AI-powered design system analysis.") |
| | |
| | analyze_btn = gr.Button("🤖 Analyze Design System", variant="primary") |
| | |
| | with gr.Accordion("📋 AI Analysis Log", open=False): |
| | stage2_log = gr.Textbox(label="Log", lines=8, interactive=False) |
| | |
| | |
| | |
| | |
| | gr.Markdown("---") |
| | gr.Markdown("## 📐 Typography") |
| | |
| | with gr.Row(): |
| | with gr.Column(scale=2): |
| | gr.Markdown("### Current vs Recommended Type Scales") |
| | typography_comparison = gr.Dataframe( |
| | headers=["Element", "Current", "Scale 1.2", "Scale 1.25 ⭐", "Scale 1.333"], |
| | datatype=["str", "str", "str", "str", "str"], |
| | label="Typography Comparison", |
| | interactive=False, |
| | ) |
| | |
| | with gr.Column(scale=1): |
| | gr.Markdown("### Select Option") |
| | type_scale_radio = gr.Radio( |
| | choices=["Keep Current", "Scale 1.2 (Minor Third)", "Scale 1.25 (Major Third) ⭐", "Scale 1.333 (Perfect Fourth)"], |
| | value="Keep Current", |
| | label="Type Scale", |
| | interactive=True, |
| | ) |
| | gr.Markdown("*Font family will be preserved*") |
| | |
| | |
| | |
| | |
| | gr.Markdown("---") |
| | gr.Markdown("## 🎨 Colors") |
| | |
| | gr.Markdown("Generate full color ramps (50-900 shades) from detected base colors:") |
| | color_ramps_display = gr.Markdown("") |
| | |
| | color_ramps_checkbox = gr.Checkbox( |
| | label="✓ Generate color ramps (base colors preserved, adds tints/shades)", |
| | value=True, |
| | ) |
| | |
| | |
| | |
| | |
| | gr.Markdown("---") |
| | gr.Markdown("## 📏 Spacing") |
| | |
| | with gr.Row(): |
| | with gr.Column(scale=2): |
| | gr.Markdown("### Current vs Grid-Aligned") |
| | spacing_comparison = gr.Dataframe( |
| | headers=["Current", "8px Grid", "4px Grid"], |
| | datatype=["str", "str", "str"], |
| | label="Spacing Comparison", |
| | interactive=False, |
| | ) |
| | |
| | with gr.Column(scale=1): |
| | gr.Markdown("### Select Option") |
| | spacing_radio = gr.Radio( |
| | choices=["Keep Current", "8px Base Grid ⭐", "4px Base Grid"], |
| | value="Keep Current", |
| | label="Spacing System", |
| | interactive=True, |
| | ) |
| | |
| | |
| | |
| | |
| | gr.Markdown("---") |
| | gr.Markdown("## 🔘 Border Radius") |
| | |
| | radius_display = gr.Markdown("*Radius values detected. No changes recommended.*") |
| | |
| | |
| | |
| | |
| | gr.Markdown("---") |
| | gr.Markdown("## 🌫️ Shadows") |
| | |
| | shadows_display = gr.Markdown("*Shadow values detected. No changes recommended.*") |
| | |
| | |
| | |
| | |
| | gr.Markdown("---") |
| | |
| | with gr.Row(): |
| | apply_upgrades_btn = gr.Button("✨ Apply Selected Upgrades", variant="primary", scale=2) |
| | reset_btn = gr.Button("↩️ Reset to Original", variant="secondary", scale=1) |
| | |
| | apply_status = gr.Markdown("") |
| | |
| | |
| | |
| | |
| | |
| | with gr.Accordion("📦 Stage 3: Export", open=False): |
| | gr.Markdown(""" |
| | Export your design tokens to JSON (compatible with Figma Tokens Studio). |
| | |
| | - **Stage 1 JSON**: Raw extracted tokens (as-is) |
| | - **Final JSON**: Upgraded tokens with selected improvements |
| | """) |
| | |
| | with gr.Row(): |
| | export_stage1_btn = gr.Button("📥 Export Stage 1 (As-Is)", variant="secondary") |
| | export_final_btn = gr.Button("📥 Export Final (Upgraded)", variant="primary") |
| | |
| | export_output = gr.Code(label="Tokens JSON", language="json", lines=25) |
| | |
| | export_stage1_btn.click(export_stage1_json, outputs=[export_output]) |
| | export_final_btn.click(export_tokens_json, outputs=[export_output]) |
| | |
| | |
| | |
| | |
| | |
| | |
| | desktop_data = gr.State({}) |
| | mobile_data = gr.State({}) |
| | |
| | |
| | discover_btn.click( |
| | fn=discover_pages, |
| | inputs=[url_input], |
| | outputs=[discover_status, log_output, pages_table], |
| | ).then( |
| | fn=lambda: (gr.update(visible=True), gr.update(visible=True)), |
| | outputs=[pages_table, extract_btn], |
| | ) |
| | |
| | |
| | extract_btn.click( |
| | fn=extract_tokens, |
| | inputs=[pages_table], |
| | outputs=[extraction_status, log_output, desktop_data, mobile_data], |
| | ).then( |
| | fn=lambda d: (d.get("colors", []), d.get("typography", []), d.get("spacing", [])), |
| | inputs=[desktop_data], |
| | outputs=[colors_table, typography_table, spacing_table], |
| | ).then( |
| | fn=lambda: gr.update(open=True), |
| | outputs=[stage1_accordion], |
| | ) |
| | |
| | |
| | viewport_toggle.change( |
| | fn=switch_viewport, |
| | inputs=[viewport_toggle], |
| | outputs=[colors_table, typography_table, spacing_table], |
| | ) |
| | |
| | |
| | analyze_btn.click( |
| | fn=run_stage2_analysis, |
| | outputs=[stage2_status, stage2_log, typography_comparison, spacing_comparison, |
| | color_ramps_display, radius_display, shadows_display], |
| | ) |
| | |
| | |
| | apply_upgrades_btn.click( |
| | fn=apply_selected_upgrades, |
| | inputs=[type_scale_radio, spacing_radio, color_ramps_checkbox], |
| | outputs=[apply_status, stage2_log], |
| | ) |
| | |
| | |
| | proceed_stage2_btn.click( |
| | fn=lambda: gr.update(open=True), |
| | outputs=[stage2_accordion], |
| | ) |
| | |
| | |
| | |
| | |
| | |
| | gr.Markdown(""" |
| | --- |
| | **Design System Extractor v2** | Built with Playwright + Gradio + LangGraph + HuggingFace |
| | |
| | *A semi-automated co-pilot for design system recovery and modernization.* |
| | """) |
| | |
| | return app |
| |
|
| |
|
| | |
| | |
| | |
| |
|
| | if __name__ == "__main__": |
| | app = create_ui() |
| | app.launch(server_name="0.0.0.0", server_port=7860) |
| |
|