Spaces:
Sleeping
Sleeping
| """ | |
| Design System Extractor v2 — Main Application | |
| ============================================== | |
| Flow: | |
| 1. User enters URL | |
| 2. Agent 1 discovers pages → User confirms | |
| 3. Agent 1 extracts tokens (Desktop + Mobile) | |
| 4. Agent 2 normalizes tokens | |
| 5. Stage 1 UI: User reviews tokens (accept/reject, Desktop↔Mobile toggle) | |
| 6. Agent 3 proposes upgrades | |
| 7. Stage 2 UI: User selects options with live preview | |
| 8. Agent 4 generates JSON | |
| 9. Stage 3 UI: User exports | |
| """ | |
| import os | |
| import asyncio | |
| import json | |
| import gradio as gr | |
| from datetime import datetime | |
| from typing import Optional | |
| # Get HF token from environment | |
| HF_TOKEN_FROM_ENV = os.getenv("HF_TOKEN", "") | |
| # ============================================================================= | |
| # GLOBAL STATE | |
| # ============================================================================= | |
| class AppState: | |
| """Global application state.""" | |
| def __init__(self): | |
| self.reset() | |
| def reset(self): | |
| self.discovered_pages = [] | |
| self.base_url = "" | |
| self.desktop_raw = None # ExtractedTokens | |
| self.mobile_raw = None # ExtractedTokens | |
| self.desktop_normalized = None # NormalizedTokens | |
| self.mobile_normalized = None # NormalizedTokens | |
| self.upgrade_recommendations = None # UpgradeRecommendations | |
| self.selected_upgrades = {} # User selections | |
| self.logs = [] | |
| def log(self, message: str): | |
| timestamp = datetime.now().strftime("%H:%M:%S") | |
| self.logs.append(f"[{timestamp}] {message}") | |
| if len(self.logs) > 100: | |
| self.logs.pop(0) | |
| def get_logs(self) -> str: | |
| return "\n".join(self.logs) | |
| state = AppState() | |
| # ============================================================================= | |
| # LAZY IMPORTS | |
| # ============================================================================= | |
| def get_crawler(): | |
| import agents.crawler | |
| return agents.crawler | |
| def get_extractor(): | |
| import agents.extractor | |
| return agents.extractor | |
| def get_normalizer(): | |
| import agents.normalizer | |
| return agents.normalizer | |
| def get_advisor(): | |
| import agents.advisor | |
| return agents.advisor | |
| def get_schema(): | |
| import core.token_schema | |
| return core.token_schema | |
| # ============================================================================= | |
| # PHASE 1: DISCOVER PAGES | |
| # ============================================================================= | |
| async def discover_pages(url: str, progress=gr.Progress()): | |
| """Discover pages from URL.""" | |
| state.reset() | |
| if not url or not url.startswith(("http://", "https://")): | |
| return "❌ Please enter a valid URL", "", None | |
| state.log(f"🚀 Starting discovery for: {url}") | |
| progress(0.1, desc="🔍 Discovering pages...") | |
| try: | |
| crawler = get_crawler() | |
| discoverer = crawler.PageDiscoverer() | |
| pages = await discoverer.discover(url) | |
| state.discovered_pages = pages | |
| state.base_url = url | |
| state.log(f"✅ Found {len(pages)} pages") | |
| # Format for display | |
| pages_data = [] | |
| for page in pages: | |
| pages_data.append([ | |
| True, # Selected by default | |
| page.url, | |
| page.title if page.title else "(No title)", | |
| page.page_type.value, | |
| "✓" if not page.error else f"⚠ {page.error}" | |
| ]) | |
| progress(1.0, desc="✅ Discovery complete!") | |
| status = f"✅ Found {len(pages)} pages. Review and click 'Extract Tokens' to continue." | |
| return status, state.get_logs(), pages_data | |
| except Exception as e: | |
| import traceback | |
| state.log(f"❌ Error: {str(e)}") | |
| return f"❌ Error: {str(e)}", state.get_logs(), None | |
| # ============================================================================= | |
| # PHASE 2: EXTRACT TOKENS | |
| # ============================================================================= | |
| async def extract_tokens(pages_data, progress=gr.Progress()): | |
| """Extract tokens from selected pages (both viewports).""" | |
| state.log(f"📥 Received pages_data type: {type(pages_data)}") | |
| if pages_data is None: | |
| return "❌ Please discover pages first", state.get_logs(), None, None | |
| # Get selected URLs - handle pandas DataFrame | |
| selected_urls = [] | |
| try: | |
| # Check if it's a pandas DataFrame | |
| if hasattr(pages_data, 'iterrows'): | |
| state.log(f"📥 DataFrame with {len(pages_data)} rows, columns: {list(pages_data.columns)}") | |
| for idx, row in pages_data.iterrows(): | |
| # Get values by column name or position | |
| try: | |
| # Try column names first | |
| is_selected = row.get('Select', row.iloc[0] if len(row) > 0 else False) | |
| url = row.get('URL', row.iloc[1] if len(row) > 1 else '') | |
| except: | |
| # Fallback to positional | |
| is_selected = row.iloc[0] if len(row) > 0 else False | |
| url = row.iloc[1] if len(row) > 1 else '' | |
| if is_selected and url: | |
| selected_urls.append(url) | |
| # If it's a dict (Gradio sometimes sends this) | |
| elif isinstance(pages_data, dict): | |
| state.log(f"📥 Dict with keys: {list(pages_data.keys())}") | |
| data = pages_data.get('data', []) | |
| for row in data: | |
| if isinstance(row, (list, tuple)) and len(row) >= 2 and row[0]: | |
| selected_urls.append(row[1]) | |
| # If it's a list | |
| elif isinstance(pages_data, (list, tuple)): | |
| state.log(f"📥 List with {len(pages_data)} items") | |
| for row in pages_data: | |
| if isinstance(row, (list, tuple)) and len(row) >= 2 and row[0]: | |
| selected_urls.append(row[1]) | |
| except Exception as e: | |
| state.log(f"❌ Error parsing pages_data: {str(e)}") | |
| import traceback | |
| state.log(traceback.format_exc()) | |
| state.log(f"📋 Found {len(selected_urls)} selected URLs") | |
| # If still no URLs, try using stored discovered pages | |
| if not selected_urls and state.discovered_pages: | |
| state.log("⚠️ No URLs from table, using all discovered pages") | |
| selected_urls = [p.url for p in state.discovered_pages if not p.error][:10] | |
| if not selected_urls: | |
| return "❌ No pages selected. Please select pages or rediscover.", state.get_logs(), None, None | |
| # Limit to 10 pages for performance | |
| selected_urls = selected_urls[:10] | |
| state.log(f"📋 Extracting from {len(selected_urls)} pages:") | |
| for url in selected_urls[:3]: | |
| state.log(f" • {url}") | |
| if len(selected_urls) > 3: | |
| state.log(f" ... and {len(selected_urls) - 3} more") | |
| progress(0.05, desc="🚀 Starting extraction...") | |
| try: | |
| schema = get_schema() | |
| extractor_mod = get_extractor() | |
| normalizer_mod = get_normalizer() | |
| # === DESKTOP EXTRACTION === | |
| state.log("") | |
| state.log("=" * 60) | |
| state.log("🖥️ DESKTOP EXTRACTION (1440px)") | |
| state.log("=" * 60) | |
| state.log("") | |
| state.log("📡 Enhanced extraction from 7 sources:") | |
| state.log(" 1. DOM computed styles (getComputedStyle)") | |
| state.log(" 2. CSS variables (:root { --color: })") | |
| state.log(" 3. SVG colors (fill, stroke)") | |
| state.log(" 4. Inline styles (style='color:')") | |
| state.log(" 5. Stylesheet rules (CSS files)") | |
| state.log(" 6. External CSS files (fetch & parse)") | |
| state.log(" 7. Page content scan (brute-force)") | |
| state.log("") | |
| progress(0.1, desc="🖥️ Extracting desktop tokens...") | |
| desktop_extractor = extractor_mod.TokenExtractor(viewport=schema.Viewport.DESKTOP) | |
| def desktop_progress(p): | |
| progress(0.1 + (p * 0.35), desc=f"🖥️ Desktop... {int(p*100)}%") | |
| state.desktop_raw = await desktop_extractor.extract(selected_urls, progress_callback=desktop_progress) | |
| # Log extraction details | |
| state.log("📊 EXTRACTION RESULTS:") | |
| state.log(f" Colors: {len(state.desktop_raw.colors)} unique") | |
| state.log(f" Typography: {len(state.desktop_raw.typography)} styles") | |
| state.log(f" Spacing: {len(state.desktop_raw.spacing)} values") | |
| state.log(f" Radius: {len(state.desktop_raw.radius)} values") | |
| state.log(f" Shadows: {len(state.desktop_raw.shadows)} values") | |
| # Log CSS variables if found | |
| if hasattr(desktop_extractor, 'css_variables') and desktop_extractor.css_variables: | |
| state.log("") | |
| state.log(f"🎨 CSS Variables found: {len(desktop_extractor.css_variables)}") | |
| for var_name, var_value in list(desktop_extractor.css_variables.items())[:5]: | |
| state.log(f" {var_name}: {var_value}") | |
| if len(desktop_extractor.css_variables) > 5: | |
| state.log(f" ... and {len(desktop_extractor.css_variables) - 5} more") | |
| # Log warnings if any | |
| if desktop_extractor.warnings: | |
| state.log("") | |
| state.log("⚠️ Warnings:") | |
| for w in desktop_extractor.warnings[:3]: | |
| state.log(f" {w}") | |
| # Normalize desktop | |
| state.log("") | |
| state.log("🔄 Normalizing (deduping, naming)...") | |
| state.desktop_normalized = normalizer_mod.normalize_tokens(state.desktop_raw) | |
| state.log(f" ✅ Normalized: {len(state.desktop_normalized.colors)} colors, {len(state.desktop_normalized.typography)} typography, {len(state.desktop_normalized.spacing)} spacing") | |
| # === MOBILE EXTRACTION === | |
| state.log("") | |
| state.log("=" * 60) | |
| state.log("📱 MOBILE EXTRACTION (375px)") | |
| state.log("=" * 60) | |
| state.log("") | |
| progress(0.5, desc="📱 Extracting mobile tokens...") | |
| mobile_extractor = extractor_mod.TokenExtractor(viewport=schema.Viewport.MOBILE) | |
| def mobile_progress(p): | |
| progress(0.5 + (p * 0.35), desc=f"📱 Mobile... {int(p*100)}%") | |
| state.mobile_raw = await mobile_extractor.extract(selected_urls, progress_callback=mobile_progress) | |
| # Log extraction details | |
| state.log("📊 EXTRACTION RESULTS:") | |
| state.log(f" Colors: {len(state.mobile_raw.colors)} unique") | |
| state.log(f" Typography: {len(state.mobile_raw.typography)} styles") | |
| state.log(f" Spacing: {len(state.mobile_raw.spacing)} values") | |
| state.log(f" Radius: {len(state.mobile_raw.radius)} values") | |
| state.log(f" Shadows: {len(state.mobile_raw.shadows)} values") | |
| # Normalize mobile | |
| state.log("") | |
| state.log("🔄 Normalizing...") | |
| state.mobile_normalized = normalizer_mod.normalize_tokens(state.mobile_raw) | |
| state.log(f" ✅ Normalized: {len(state.mobile_normalized.colors)} colors, {len(state.mobile_normalized.typography)} typography, {len(state.mobile_normalized.spacing)} spacing") | |
| # === FIRECRAWL CSS EXTRACTION (Agent 1B) === | |
| progress(0.88, desc="🔥 Firecrawl CSS analysis...") | |
| try: | |
| from agents.firecrawl_extractor import extract_css_colors | |
| # Get base URL for Firecrawl | |
| base_url = selected_urls[0] if selected_urls else state.base_url | |
| # Extract CSS colors using Firecrawl | |
| firecrawl_result = await extract_css_colors( | |
| url=base_url, | |
| api_key=None, # Will use fallback method | |
| log_callback=state.log | |
| ) | |
| # Merge Firecrawl colors into desktop normalized | |
| firecrawl_colors = firecrawl_result.get("colors", {}) | |
| if firecrawl_colors: | |
| state.log("") | |
| state.log("🔀 Merging Firecrawl colors with Playwright extraction...") | |
| # Count new colors | |
| new_colors_count = 0 | |
| for hex_val, color_data in firecrawl_colors.items(): | |
| # Check if this color already exists | |
| existing = False | |
| for name, existing_color in state.desktop_normalized.colors.items(): | |
| if existing_color.value.lower() == hex_val.lower(): | |
| existing = True | |
| # Update frequency | |
| existing_color.frequency += color_data.get("frequency", 1) | |
| if "firecrawl" not in existing_color.contexts: | |
| existing_color.contexts.append("firecrawl") | |
| break | |
| if not existing: | |
| # Add new color from Firecrawl | |
| from core.token_schema import ColorToken, TokenSource, Confidence | |
| new_token = ColorToken( | |
| value=hex_val, | |
| frequency=color_data.get("frequency", 1), | |
| contexts=["firecrawl"] + color_data.get("contexts", []), | |
| elements=["css-file"], | |
| css_properties=color_data.get("sources", []), | |
| contrast_white=color_data.get("contrast_white", 0), | |
| contrast_black=color_data.get("contrast_black", 0), | |
| source=TokenSource.DETECTED, | |
| confidence=Confidence.MEDIUM, | |
| ) | |
| # Generate name | |
| new_token.suggested_name = f"color.firecrawl.{len(state.desktop_normalized.colors)}" | |
| state.desktop_normalized.colors[hex_val] = new_token | |
| new_colors_count += 1 | |
| state.log(f" ✅ Added {new_colors_count} new colors from Firecrawl") | |
| state.log(f" 📊 Total colors now: {len(state.desktop_normalized.colors)}") | |
| except Exception as e: | |
| state.log(f" ⚠️ Firecrawl extraction skipped: {str(e)}") | |
| # === SEMANTIC COLOR ANALYSIS (Agent 1C) === | |
| progress(0.92, desc="🧠 Semantic color analysis...") | |
| semantic_result = {} | |
| semantic_preview_html = "" | |
| try: | |
| from agents.semantic_analyzer import SemanticColorAnalyzer, generate_semantic_preview_html | |
| # Create analyzer (using rule-based for now, can add LLM later) | |
| semantic_analyzer = SemanticColorAnalyzer(llm_provider=None) | |
| # Run analysis | |
| semantic_result = semantic_analyzer.analyze_sync( | |
| colors=state.desktop_normalized.colors, | |
| log_callback=state.log | |
| ) | |
| # Store in state for Stage 2 | |
| state.semantic_analysis = semantic_result | |
| # Generate preview HTML | |
| semantic_preview_html = generate_semantic_preview_html(semantic_result) | |
| except Exception as e: | |
| state.log(f" ⚠️ Semantic analysis skipped: {str(e)}") | |
| import traceback | |
| state.log(traceback.format_exc()) | |
| progress(0.95, desc="📊 Preparing results...") | |
| # Format results for Stage 1 UI | |
| desktop_data = format_tokens_for_display(state.desktop_normalized) | |
| mobile_data = format_tokens_for_display(state.mobile_normalized) | |
| # Generate visual previews - AS-IS for Stage 1 (no ramps, no enhancements) | |
| state.log("") | |
| state.log("🎨 Generating AS-IS visual previews...") | |
| from core.preview_generator import ( | |
| generate_typography_preview_html, | |
| generate_colors_asis_preview_html, | |
| generate_spacing_asis_preview_html, | |
| generate_radius_asis_preview_html, | |
| generate_shadows_asis_preview_html, | |
| ) | |
| # Get detected font | |
| fonts = get_detected_fonts() | |
| primary_font = fonts.get("primary", "Open Sans") | |
| # Convert typography tokens to dict format for preview | |
| typo_dict = {} | |
| for name, t in state.desktop_normalized.typography.items(): | |
| typo_dict[name] = { | |
| "font_size": t.font_size, | |
| "font_weight": t.font_weight, | |
| "line_height": t.line_height or "1.5", | |
| "letter_spacing": "0", | |
| } | |
| # Convert color tokens to dict format for preview (with full metadata) | |
| color_dict = {} | |
| for name, c in state.desktop_normalized.colors.items(): | |
| color_dict[name] = { | |
| "value": c.value, | |
| "frequency": c.frequency, | |
| "contexts": c.contexts[:3] if c.contexts else [], | |
| "elements": c.elements[:3] if c.elements else [], | |
| "css_properties": c.css_properties[:3] if c.css_properties else [], | |
| "contrast_white": c.contrast_white, | |
| "contrast_black": getattr(c, 'contrast_black', 0), | |
| } | |
| # Convert spacing tokens to dict format | |
| spacing_dict = {} | |
| for name, s in state.desktop_normalized.spacing.items(): | |
| spacing_dict[name] = { | |
| "value": s.value, | |
| "value_px": s.value_px, | |
| } | |
| # Convert radius tokens to dict format | |
| radius_dict = {} | |
| for name, r in state.desktop_normalized.radius.items(): | |
| radius_dict[name] = {"value": r.value} | |
| # Convert shadow tokens to dict format | |
| shadow_dict = {} | |
| for name, s in state.desktop_normalized.shadows.items(): | |
| shadow_dict[name] = {"value": s.value} | |
| # Generate AS-IS previews (Stage 1 - raw extracted values) | |
| typography_preview_html = generate_typography_preview_html( | |
| typography_tokens=typo_dict, | |
| font_family=primary_font, | |
| sample_text="The quick brown fox jumps over the lazy dog", | |
| ) | |
| # AS-IS color preview (no ramps) | |
| colors_asis_preview_html = generate_colors_asis_preview_html( | |
| color_tokens=color_dict, | |
| ) | |
| # AS-IS spacing preview | |
| spacing_asis_preview_html = generate_spacing_asis_preview_html( | |
| spacing_tokens=spacing_dict, | |
| ) | |
| # AS-IS radius preview | |
| radius_asis_preview_html = generate_radius_asis_preview_html( | |
| radius_tokens=radius_dict, | |
| ) | |
| # AS-IS shadows preview | |
| shadows_asis_preview_html = generate_shadows_asis_preview_html( | |
| shadow_tokens=shadow_dict, | |
| ) | |
| state.log(" ✅ Typography preview generated") | |
| state.log(" ✅ Colors AS-IS preview generated (no ramps)") | |
| state.log(" ✅ Semantic color analysis preview generated") | |
| state.log(" ✅ Spacing AS-IS preview generated") | |
| state.log(" ✅ Radius AS-IS preview generated") | |
| state.log(" ✅ Shadows AS-IS preview generated") | |
| # Get semantic summary for status | |
| brand_count = len(semantic_result.get("brand", {})) | |
| text_count = len(semantic_result.get("text", {})) | |
| bg_count = len(semantic_result.get("background", {})) | |
| state.log("") | |
| state.log("=" * 50) | |
| state.log("✅ EXTRACTION COMPLETE!") | |
| state.log(f" Enhanced extraction captured:") | |
| state.log(f" • {len(state.desktop_normalized.colors)} colors (DOM + CSS vars + SVG + inline)") | |
| state.log(f" • {len(state.desktop_normalized.typography)} typography styles") | |
| state.log(f" • {len(state.desktop_normalized.spacing)} spacing values") | |
| state.log(f" • {len(state.desktop_normalized.radius)} radius values") | |
| state.log(f" • {len(state.desktop_normalized.shadows)} shadow values") | |
| state.log(f" Semantic Analysis:") | |
| state.log(f" • {brand_count} brand colors identified") | |
| state.log(f" • {text_count} text colors identified") | |
| state.log(f" • {bg_count} background colors identified") | |
| state.log("=" * 50) | |
| progress(1.0, desc="✅ Complete!") | |
| status = f"""## ✅ Extraction Complete! | |
| | Viewport | Colors | Typography | Spacing | Radius | Shadows | | |
| |----------|--------|------------|---------|--------|---------| | |
| | Desktop | {len(state.desktop_normalized.colors)} | {len(state.desktop_normalized.typography)} | {len(state.desktop_normalized.spacing)} | {len(state.desktop_normalized.radius)} | {len(state.desktop_normalized.shadows)} | | |
| | Mobile | {len(state.mobile_normalized.colors)} | {len(state.mobile_normalized.typography)} | {len(state.mobile_normalized.spacing)} | {len(state.mobile_normalized.radius)} | {len(state.mobile_normalized.shadows)} | | |
| **Primary Font:** {primary_font} | |
| **Semantic Analysis:** {brand_count} brand, {text_count} text, {bg_count} background colors | |
| **Enhanced Extraction:** DOM + CSS Variables + SVG + Inline + Stylesheets + Firecrawl | |
| **Next:** Review the tokens below. Accept or reject, then proceed to Stage 2. | |
| """ | |
| # Return all AS-IS previews including semantic | |
| return ( | |
| status, | |
| state.get_logs(), | |
| desktop_data, | |
| mobile_data, | |
| typography_preview_html, | |
| colors_asis_preview_html, | |
| semantic_preview_html, | |
| spacing_asis_preview_html, | |
| radius_asis_preview_html, | |
| shadows_asis_preview_html, | |
| ) | |
| except Exception as e: | |
| import traceback | |
| state.log(f"❌ Error: {str(e)}") | |
| state.log(traceback.format_exc()) | |
| return f"❌ Error: {str(e)}", state.get_logs(), None, None, "", "", "", "", "", "" | |
| def format_tokens_for_display(normalized) -> dict: | |
| """Format normalized tokens for Gradio display.""" | |
| if normalized is None: | |
| return {"colors": [], "typography": [], "spacing": []} | |
| # Colors are now a dict | |
| colors = [] | |
| color_items = list(normalized.colors.values()) if isinstance(normalized.colors, dict) else normalized.colors | |
| for c in sorted(color_items, key=lambda x: -x.frequency)[:50]: | |
| colors.append([ | |
| True, # Accept checkbox | |
| c.value, | |
| c.suggested_name or "", | |
| c.frequency, | |
| c.confidence.value if c.confidence else "medium", | |
| f"{c.contrast_white:.1f}:1" if c.contrast_white else "N/A", | |
| "✓" if c.wcag_aa_small_text else "✗", | |
| ", ".join(c.contexts[:2]) if c.contexts else "", | |
| ]) | |
| # Typography | |
| typography = [] | |
| typo_items = list(normalized.typography.values()) if isinstance(normalized.typography, dict) else normalized.typography | |
| for t in sorted(typo_items, key=lambda x: -x.frequency)[:30]: | |
| typography.append([ | |
| True, # Accept checkbox | |
| t.font_family, | |
| t.font_size, | |
| str(t.font_weight), | |
| t.line_height or "", | |
| t.suggested_name or "", | |
| t.frequency, | |
| t.confidence.value if t.confidence else "medium", | |
| ]) | |
| # Spacing | |
| spacing = [] | |
| spacing_items = list(normalized.spacing.values()) if isinstance(normalized.spacing, dict) else normalized.spacing | |
| for s in sorted(spacing_items, key=lambda x: x.value_px)[:20]: | |
| spacing.append([ | |
| True, # Accept checkbox | |
| s.value, | |
| f"{s.value_px}px", | |
| s.suggested_name or "", | |
| s.frequency, | |
| "✓" if s.fits_base_8 else "", | |
| s.confidence.value if s.confidence else "medium", | |
| ]) | |
| return { | |
| "colors": colors, | |
| "typography": typography, | |
| "spacing": spacing, | |
| } | |
| def switch_viewport(viewport: str): | |
| """Switch between desktop and mobile view.""" | |
| if viewport == "Desktop (1440px)": | |
| data = format_tokens_for_display(state.desktop_normalized) | |
| else: | |
| data = format_tokens_for_display(state.mobile_normalized) | |
| return data["colors"], data["typography"], data["spacing"] | |
| # ============================================================================= | |
| # STAGE 2: AI ANALYSIS (Multi-Agent) | |
| # ============================================================================= | |
| async def run_stage2_analysis(competitors_str: str = "", progress=gr.Progress()): | |
| """Run multi-agent analysis on extracted tokens.""" | |
| if not state.desktop_normalized or not state.mobile_normalized: | |
| return ("❌ Please complete Stage 1 first", "", "", "", None, None, None, "", "", "", "") | |
| # Parse competitors from input | |
| default_competitors = [ | |
| "Material Design 3", | |
| "Apple Human Interface Guidelines", | |
| "Shopify Polaris", | |
| "IBM Carbon", | |
| "Atlassian Design System" | |
| ] | |
| if competitors_str and competitors_str.strip(): | |
| competitors = [c.strip() for c in competitors_str.split(",") if c.strip()] | |
| else: | |
| competitors = default_competitors | |
| progress(0.05, desc="🤖 Initializing multi-agent analysis...") | |
| try: | |
| # Import the multi-agent workflow | |
| from agents.stage2_graph import run_stage2_multi_agent | |
| # Convert normalized tokens to dict for the workflow | |
| desktop_dict = normalized_to_dict(state.desktop_normalized) | |
| mobile_dict = normalized_to_dict(state.mobile_normalized) | |
| # Run multi-agent analysis with semantic context | |
| progress(0.1, desc="🚀 Running parallel LLM analysis...") | |
| result = await run_stage2_multi_agent( | |
| desktop_tokens=desktop_dict, | |
| mobile_tokens=mobile_dict, | |
| competitors=competitors, | |
| log_callback=state.log, | |
| semantic_analysis=getattr(state, 'semantic_analysis', None), # Pass semantic context! | |
| ) | |
| progress(0.8, desc="📊 Processing results...") | |
| # Extract results | |
| final_recs = result.get("final_recommendations", {}) | |
| llm1_analysis = result.get("llm1_analysis", {}) | |
| llm2_analysis = result.get("llm2_analysis", {}) | |
| rule_calculations = result.get("rule_calculations", {}) | |
| cost_tracking = result.get("cost_tracking", {}) | |
| # Store for later use | |
| state.upgrade_recommendations = final_recs | |
| state.multi_agent_result = result | |
| # Get font info | |
| fonts = get_detected_fonts() | |
| base_size = get_base_font_size() | |
| progress(0.9, desc="📊 Formatting results...") | |
| # Build status markdown | |
| status = build_analysis_status(final_recs, cost_tracking, result.get("errors", [])) | |
| # Format brand/competitor comparison from LLM analyses | |
| brand_md = format_multi_agent_comparison(llm1_analysis, llm2_analysis, final_recs) | |
| # Format font families display | |
| font_families_md = format_font_families_display(fonts) | |
| # Format typography with BOTH desktop and mobile | |
| typography_desktop_data = format_typography_comparison_viewport( | |
| state.desktop_normalized, base_size, "desktop" | |
| ) | |
| typography_mobile_data = format_typography_comparison_viewport( | |
| state.mobile_normalized, base_size, "mobile" | |
| ) | |
| # Format spacing comparison table | |
| spacing_data = format_spacing_comparison_from_rules(rule_calculations) | |
| # Format color display: BASE colors + ramps separately | |
| base_colors_md = format_base_colors() | |
| color_ramps_md = format_color_ramps_from_rules(rule_calculations) | |
| # Format radius display (with token suggestions) | |
| radius_md = format_radius_with_tokens() | |
| # Format shadows display (with token suggestions) | |
| shadows_md = format_shadows_with_tokens() | |
| # Generate visual previews for Stage 2 | |
| state.log("") | |
| state.log("🎨 Generating visual previews...") | |
| from core.preview_generator import ( | |
| generate_typography_preview_html, | |
| generate_color_ramps_preview_html, | |
| generate_semantic_color_ramps_html | |
| ) | |
| primary_font = fonts.get("primary", "Open Sans") | |
| # Convert typography tokens to dict format for preview | |
| typo_dict = {} | |
| for name, t in state.desktop_normalized.typography.items(): | |
| typo_dict[name] = { | |
| "font_size": t.font_size, | |
| "font_weight": t.font_weight, | |
| "line_height": t.line_height or "1.5", | |
| "letter_spacing": "0", | |
| } | |
| # Convert color tokens to dict format for preview (with frequency for sorting) | |
| color_dict = {} | |
| for name, c in state.desktop_normalized.colors.items(): | |
| color_dict[name] = { | |
| "value": c.value, | |
| "frequency": c.frequency, | |
| } | |
| typography_preview_html = generate_typography_preview_html( | |
| typography_tokens=typo_dict, | |
| font_family=primary_font, | |
| sample_text="The quick brown fox jumps over the lazy dog", | |
| ) | |
| # Use semantic color ramps if available, otherwise fallback to regular | |
| semantic_analysis = getattr(state, 'semantic_analysis', None) | |
| if semantic_analysis: | |
| # Extract LLM color recommendations | |
| llm_color_recs = {} | |
| if final_recs and isinstance(final_recs, dict): | |
| llm_color_recs = final_recs.get("color_recommendations", {}) | |
| # Also add accessibility fixes | |
| aa_fixes = final_recs.get("accessibility_fixes", []) | |
| if aa_fixes: | |
| llm_color_recs["changes_made"] = [ | |
| f"AA fix suggested for {f.get('color', '?')}" | |
| for f in aa_fixes if isinstance(f, dict) | |
| ][:5] | |
| color_ramps_preview_html = generate_semantic_color_ramps_html( | |
| semantic_analysis=semantic_analysis, | |
| color_tokens=color_dict, | |
| llm_recommendations={"color_recommendations": llm_color_recs} if llm_color_recs else None, | |
| ) | |
| state.log(" ✅ Semantic color ramps preview generated (with LLM recommendations)") | |
| else: | |
| color_ramps_preview_html = generate_color_ramps_preview_html( | |
| color_tokens=color_dict, | |
| ) | |
| state.log(" ✅ Color ramps preview generated (no semantic data)") | |
| state.log(" ✅ Typography preview generated") | |
| # Generate LLM recommendations display | |
| llm_recs_html = format_llm_color_recommendations_html(final_recs, semantic_analysis) | |
| llm_recs_table = format_llm_color_recommendations_table(final_recs, semantic_analysis) | |
| state.log(" ✅ LLM recommendations formatted") | |
| progress(1.0, desc="✅ Analysis complete!") | |
| return (status, state.get_logs(), brand_md, font_families_md, | |
| typography_desktop_data, typography_mobile_data, spacing_data, | |
| base_colors_md, color_ramps_md, radius_md, shadows_md, | |
| typography_preview_html, color_ramps_preview_html, | |
| llm_recs_html, llm_recs_table) | |
| except Exception as e: | |
| import traceback | |
| state.log(f"❌ Error: {str(e)}") | |
| state.log(traceback.format_exc()) | |
| return (f"❌ Analysis failed: {str(e)}", state.get_logs(), "", "", None, None, None, "", "", "", "", "", "", "", []) | |
| def normalized_to_dict(normalized) -> dict: | |
| """Convert NormalizedTokens to dict for workflow.""" | |
| if not normalized: | |
| return {} | |
| result = { | |
| "colors": {}, | |
| "typography": {}, | |
| "spacing": {}, | |
| "radius": {}, | |
| "shadows": {}, | |
| } | |
| # Colors | |
| for name, c in normalized.colors.items(): | |
| result["colors"][name] = { | |
| "value": c.value, | |
| "frequency": c.frequency, | |
| "suggested_name": c.suggested_name, | |
| "contrast_white": c.contrast_white, | |
| "contrast_black": c.contrast_black, | |
| } | |
| # Typography | |
| for name, t in normalized.typography.items(): | |
| result["typography"][name] = { | |
| "font_family": t.font_family, | |
| "font_size": t.font_size, | |
| "font_weight": t.font_weight, | |
| "line_height": t.line_height, | |
| "frequency": t.frequency, | |
| } | |
| # Spacing | |
| for name, s in normalized.spacing.items(): | |
| result["spacing"][name] = { | |
| "value": s.value, | |
| "value_px": s.value_px, | |
| "frequency": s.frequency, | |
| } | |
| # Radius | |
| for name, r in normalized.radius.items(): | |
| result["radius"][name] = { | |
| "value": r.value, | |
| "frequency": r.frequency, | |
| } | |
| # Shadows | |
| for name, s in normalized.shadows.items(): | |
| result["shadows"][name] = { | |
| "value": s.value, | |
| "frequency": s.frequency, | |
| } | |
| return result | |
| def build_analysis_status(final_recs: dict, cost_tracking: dict, errors: list) -> str: | |
| """Build status markdown from analysis results.""" | |
| lines = ["## 🧠 Multi-Agent Analysis Complete!"] | |
| lines.append("") | |
| # Cost summary | |
| if cost_tracking: | |
| total_cost = cost_tracking.get("total_cost", 0) | |
| lines.append(f"### 💰 Cost Summary") | |
| lines.append(f"**Total estimated cost:** ${total_cost:.4f}") | |
| lines.append(f"*(Free tier: $0.10/mo | Pro: $2.00/mo)*") | |
| lines.append("") | |
| # Final recommendations | |
| if final_recs and "final_recommendations" in final_recs: | |
| recs = final_recs["final_recommendations"] | |
| lines.append("### 📋 Recommendations") | |
| if recs.get("type_scale"): | |
| lines.append(f"**Type Scale:** {recs['type_scale']}") | |
| if recs.get("type_scale_rationale"): | |
| lines.append(f" *{recs['type_scale_rationale'][:100]}*") | |
| if recs.get("spacing_base"): | |
| lines.append(f"**Spacing:** {recs['spacing_base']}") | |
| lines.append("") | |
| # Summary | |
| if final_recs.get("summary"): | |
| lines.append("### 📝 Summary") | |
| lines.append(final_recs["summary"]) | |
| lines.append("") | |
| # Confidence | |
| if final_recs.get("overall_confidence"): | |
| lines.append(f"**Confidence:** {final_recs['overall_confidence']}%") | |
| # Errors | |
| if errors: | |
| lines.append("") | |
| lines.append("### ⚠️ Warnings") | |
| for err in errors[:3]: | |
| lines.append(f"- {err[:100]}") | |
| return "\n".join(lines) | |
| def format_multi_agent_comparison(llm1: dict, llm2: dict, final: dict) -> str: | |
| """Format comparison from multi-agent analysis.""" | |
| lines = ["### 📊 Multi-Agent Analysis Comparison"] | |
| lines.append("") | |
| # Agreements | |
| if final.get("agreements"): | |
| lines.append("#### ✅ Agreements (High Confidence)") | |
| for a in final["agreements"][:5]: | |
| topic = a.get("topic", "?") | |
| finding = a.get("finding", "?")[:80] | |
| lines.append(f"- **{topic}**: {finding}") | |
| lines.append("") | |
| # Disagreements and resolutions | |
| if final.get("disagreements"): | |
| lines.append("#### 🔄 Resolved Disagreements") | |
| for d in final["disagreements"][:3]: | |
| topic = d.get("topic", "?") | |
| resolution = d.get("resolution", "?")[:100] | |
| lines.append(f"- **{topic}**: {resolution}") | |
| lines.append("") | |
| # Score comparison | |
| lines.append("#### 📈 Score Comparison") | |
| lines.append("") | |
| lines.append("| Category | LLM 1 (Qwen) | LLM 2 (Llama) |") | |
| lines.append("|----------|--------------|---------------|") | |
| categories = ["typography", "colors", "accessibility", "spacing"] | |
| for cat in categories: | |
| llm1_score = llm1.get(cat, {}).get("score", "?") if isinstance(llm1.get(cat), dict) else "?" | |
| llm2_score = llm2.get(cat, {}).get("score", "?") if isinstance(llm2.get(cat), dict) else "?" | |
| lines.append(f"| {cat.title()} | {llm1_score}/10 | {llm2_score}/10 |") | |
| return "\n".join(lines) | |
| def format_spacing_comparison_from_rules(rule_calculations: dict) -> list: | |
| """Format spacing comparison from rule engine.""" | |
| if not rule_calculations: | |
| return [] | |
| spacing_options = rule_calculations.get("spacing_options", {}) | |
| data = [] | |
| for i in range(10): | |
| current = f"{(i+1) * 4}px" if i < 5 else f"{(i+1) * 8}px" | |
| grid_8 = spacing_options.get("8px", []) | |
| grid_4 = spacing_options.get("4px", []) | |
| val_8 = f"{grid_8[i+1]}px" if i+1 < len(grid_8) else "—" | |
| val_4 = f"{grid_4[i+1]}px" if i+1 < len(grid_4) else "—" | |
| data.append([current, val_8, val_4]) | |
| return data | |
| def format_color_ramps_from_rules(rule_calculations: dict) -> str: | |
| """Format color ramps from rule engine.""" | |
| if not rule_calculations: | |
| return "*No color ramps generated*" | |
| ramps = rule_calculations.get("color_ramps", {}) | |
| if not ramps: | |
| return "*No color ramps generated*" | |
| lines = ["### 🌈 Generated Color Ramps"] | |
| lines.append("") | |
| for name, ramp in list(ramps.items())[:6]: | |
| lines.append(f"**{name}**") | |
| if isinstance(ramp, list) and len(ramp) >= 10: | |
| lines.append("| 50 | 100 | 200 | 300 | 400 | 500 | 600 | 700 | 800 | 900 |") | |
| lines.append("|---|---|---|---|---|---|---|---|---|---|") | |
| row = "| " + " | ".join([f"`{ramp[i]}`" for i in range(10)]) + " |" | |
| lines.append(row) | |
| lines.append("") | |
| return "\n".join(lines) | |
| def get_detected_fonts() -> dict: | |
| """Get detected font information.""" | |
| if not state.desktop_normalized: | |
| return {"primary": "Unknown", "weights": []} | |
| fonts = {} | |
| weights = set() | |
| for t in state.desktop_normalized.typography.values(): | |
| family = t.font_family | |
| weight = t.font_weight | |
| if family not in fonts: | |
| fonts[family] = 0 | |
| fonts[family] += t.frequency | |
| if weight: | |
| try: | |
| weights.add(int(weight)) | |
| except: | |
| pass | |
| primary = max(fonts.items(), key=lambda x: x[1])[0] if fonts else "Unknown" | |
| return { | |
| "primary": primary, | |
| "weights": sorted(weights) if weights else [400], | |
| "all_fonts": fonts, | |
| } | |
| def get_base_font_size() -> int: | |
| """Detect base font size from typography.""" | |
| if not state.desktop_normalized: | |
| return 16 | |
| # Find most common size in body range (14-18px) | |
| sizes = {} | |
| for t in state.desktop_normalized.typography.values(): | |
| size_str = str(t.font_size).replace('px', '').replace('rem', '').replace('em', '') | |
| try: | |
| size = float(size_str) | |
| if 14 <= size <= 18: | |
| sizes[size] = sizes.get(size, 0) + t.frequency | |
| except: | |
| pass | |
| if sizes: | |
| return int(max(sizes.items(), key=lambda x: x[1])[0]) | |
| return 16 | |
| def format_brand_comparison(recommendations) -> str: | |
| """Format brand comparison as markdown table.""" | |
| if not recommendations.brand_analysis: | |
| return "*Brand analysis not available*" | |
| lines = [ | |
| "### 📊 Design System Comparison (5 Top Brands)", | |
| "", | |
| "| Brand | Type Ratio | Base Size | Spacing | Notes |", | |
| "|-------|------------|-----------|---------|-------|", | |
| ] | |
| for brand in recommendations.brand_analysis[:5]: | |
| name = brand.get("brand", "Unknown") | |
| ratio = brand.get("ratio", "?") | |
| base = brand.get("base", "?") | |
| spacing = brand.get("spacing", "?") | |
| notes = brand.get("notes", "")[:50] + ("..." if len(brand.get("notes", "")) > 50 else "") | |
| lines.append(f"| {name} | {ratio} | {base}px | {spacing} | {notes} |") | |
| return "\n".join(lines) | |
| def format_font_families_display(fonts: dict) -> str: | |
| """Format detected font families for display.""" | |
| lines = [] | |
| primary = fonts.get("primary", "Unknown") | |
| weights = fonts.get("weights", [400]) | |
| all_fonts = fonts.get("all_fonts", {}) | |
| lines.append(f"### Primary Font: **{primary}**") | |
| lines.append("") | |
| lines.append(f"**Weights detected:** {', '.join(map(str, weights))}") | |
| lines.append("") | |
| if all_fonts and len(all_fonts) > 1: | |
| lines.append("### All Fonts Detected") | |
| lines.append("") | |
| lines.append("| Font Family | Usage Count |") | |
| lines.append("|-------------|-------------|") | |
| sorted_fonts = sorted(all_fonts.items(), key=lambda x: -x[1]) | |
| for font, count in sorted_fonts[:5]: | |
| lines.append(f"| {font} | {count:,} |") | |
| lines.append("") | |
| lines.append("*Note: This analysis focuses on English typography only.*") | |
| return "\n".join(lines) | |
| def format_llm_color_recommendations_html(final_recs: dict, semantic_analysis: dict) -> str: | |
| """Generate HTML showing LLM color recommendations with before/after comparison.""" | |
| if not final_recs: | |
| return ''' | |
| <div style="padding: 20px; background: #f0f0f0 !important; border-radius: 8px; text-align: center;"> | |
| <p style="color: #666 !important;">No LLM recommendations available yet. Run analysis first.</p> | |
| </div> | |
| ''' | |
| color_recs = final_recs.get("color_recommendations", {}) | |
| aa_fixes = final_recs.get("accessibility_fixes", []) | |
| if not color_recs and not aa_fixes: | |
| return ''' | |
| <div style="padding: 20px; background: #d4edda !important; border-radius: 8px; border: 1px solid #28a745;"> | |
| <p style="color: #155724 !important; margin: 0;">✅ No color changes recommended. Your colors look good!</p> | |
| </div> | |
| ''' | |
| # Build recommendations HTML | |
| recs_html = "" | |
| # Process color recommendations | |
| for role, rec in color_recs.items(): | |
| if not isinstance(rec, dict): | |
| continue | |
| if role in ["generate_ramps_for", "changes_made"]: | |
| continue | |
| current = rec.get("current", "?") | |
| suggested = rec.get("suggested", current) | |
| action = rec.get("action", "keep") | |
| rationale = rec.get("rationale", "") | |
| if action == "keep" or suggested == current: | |
| # No change needed | |
| recs_html += f''' | |
| <div class="llm-rec-row keep"> | |
| <div class="rec-color-box" style="background: {current};"></div> | |
| <div class="rec-details"> | |
| <span class="rec-role">{role}</span> | |
| <span class="rec-current">{current}</span> | |
| <span class="rec-action keep">✓ Keep</span> | |
| </div> | |
| </div> | |
| ''' | |
| else: | |
| # Change suggested | |
| recs_html += f''' | |
| <div class="llm-rec-row change"> | |
| <div class="rec-comparison"> | |
| <div class="rec-before"> | |
| <div class="rec-color-box" style="background: {current};"></div> | |
| <span class="rec-label">Before</span> | |
| <span class="rec-hex">{current}</span> | |
| </div> | |
| <span class="rec-arrow">→</span> | |
| <div class="rec-after"> | |
| <div class="rec-color-box" style="background: {suggested};"></div> | |
| <span class="rec-label">After</span> | |
| <span class="rec-hex">{suggested}</span> | |
| </div> | |
| </div> | |
| <div class="rec-details"> | |
| <span class="rec-role">{role}</span> | |
| <span class="rec-rationale">{rationale[:80]}...</span> | |
| </div> | |
| </div> | |
| ''' | |
| # Process accessibility fixes | |
| for fix in aa_fixes: | |
| if not isinstance(fix, dict): | |
| continue | |
| color = fix.get("color", "?") | |
| role = fix.get("role", "unknown") | |
| issue = fix.get("issue", "contrast issue") | |
| fix_color = fix.get("fix", color) | |
| current_contrast = fix.get("current_contrast", "?") | |
| fixed_contrast = fix.get("fixed_contrast", "?") | |
| if fix_color and fix_color != color: | |
| recs_html += f''' | |
| <div class="llm-rec-row aa-fix"> | |
| <div class="rec-comparison"> | |
| <div class="rec-before"> | |
| <div class="rec-color-box" style="background: {color};"></div> | |
| <span class="rec-label">⚠️ {current_contrast}:1</span> | |
| <span class="rec-hex">{color}</span> | |
| </div> | |
| <span class="rec-arrow">→</span> | |
| <div class="rec-after"> | |
| <div class="rec-color-box" style="background: {fix_color};"></div> | |
| <span class="rec-label">✓ {fixed_contrast}:1</span> | |
| <span class="rec-hex">{fix_color}</span> | |
| </div> | |
| </div> | |
| <div class="rec-details"> | |
| <span class="rec-role">{role}</span> | |
| <span class="rec-issue">🔴 {issue}</span> | |
| </div> | |
| </div> | |
| ''' | |
| if not recs_html: | |
| return ''' | |
| <div style="padding: 20px; background: #d4edda !important; border-radius: 8px; border: 1px solid #28a745;"> | |
| <p style="color: #155724 !important; margin: 0;">✅ No color changes recommended. Your colors look good!</p> | |
| </div> | |
| ''' | |
| html = f''' | |
| <style> | |
| .llm-recs-container {{ | |
| font-family: system-ui, -apple-system, sans-serif; | |
| background: #f5f5f5 !important; | |
| border-radius: 12px; | |
| padding: 16px; | |
| }} | |
| .llm-rec-row {{ | |
| display: flex; | |
| align-items: center; | |
| padding: 12px; | |
| margin-bottom: 12px; | |
| border-radius: 8px; | |
| background: #ffffff !important; | |
| border: 1px solid #e0e0e0 !important; | |
| }} | |
| .llm-rec-row.change {{ | |
| border-left: 4px solid #f59e0b !important; | |
| }} | |
| .llm-rec-row.aa-fix {{ | |
| border-left: 4px solid #dc2626 !important; | |
| background: #fef2f2 !important; | |
| }} | |
| .llm-rec-row.keep {{ | |
| border-left: 4px solid #22c55e !important; | |
| background: #f0fdf4 !important; | |
| }} | |
| .rec-comparison {{ | |
| display: flex; | |
| align-items: center; | |
| gap: 12px; | |
| margin-right: 20px; | |
| }} | |
| .rec-before, .rec-after {{ | |
| display: flex; | |
| flex-direction: column; | |
| align-items: center; | |
| gap: 4px; | |
| }} | |
| .rec-color-box {{ | |
| width: 48px; | |
| height: 48px; | |
| border-radius: 8px; | |
| border: 2px solid rgba(0,0,0,0.15) !important; | |
| box-shadow: 0 2px 4px rgba(0,0,0,0.1); | |
| }} | |
| .rec-label {{ | |
| font-size: 11px; | |
| font-weight: 600; | |
| color: #666 !important; | |
| }} | |
| .rec-hex {{ | |
| font-family: 'SF Mono', Monaco, monospace; | |
| font-size: 11px; | |
| color: #333 !important; | |
| }} | |
| .rec-arrow {{ | |
| font-size: 20px; | |
| color: #666 !important; | |
| font-weight: bold; | |
| }} | |
| .rec-details {{ | |
| flex: 1; | |
| display: flex; | |
| flex-direction: column; | |
| gap: 4px; | |
| }} | |
| .rec-role {{ | |
| font-weight: 700; | |
| font-size: 14px; | |
| color: #1a1a1a !important; | |
| }} | |
| .rec-action {{ | |
| font-size: 12px; | |
| padding: 2px 8px; | |
| border-radius: 4px; | |
| }} | |
| .rec-action.keep {{ | |
| background: #dcfce7 !important; | |
| color: #166534 !important; | |
| }} | |
| .rec-rationale {{ | |
| font-size: 12px; | |
| color: #666 !important; | |
| }} | |
| .rec-issue {{ | |
| font-size: 12px; | |
| color: #991b1b !important; | |
| font-weight: 500; | |
| }} | |
| </style> | |
| <div class="llm-recs-container"> | |
| {recs_html} | |
| </div> | |
| ''' | |
| return html | |
| def format_llm_color_recommendations_table(final_recs: dict, semantic_analysis: dict) -> list: | |
| """Generate table data for LLM color recommendations with accept/reject checkboxes.""" | |
| rows = [] | |
| if not final_recs: | |
| return rows | |
| color_recs = final_recs.get("color_recommendations", {}) | |
| aa_fixes = final_recs.get("accessibility_fixes", []) | |
| # Process color recommendations | |
| for role, rec in color_recs.items(): | |
| if not isinstance(rec, dict): | |
| continue | |
| if role in ["generate_ramps_for", "changes_made"]: | |
| continue | |
| current = rec.get("current", "?") | |
| suggested = rec.get("suggested", current) | |
| action = rec.get("action", "keep") | |
| rationale = rec.get("rationale", "")[:50] | |
| if action != "keep" and suggested != current: | |
| # Calculate contrast improvement | |
| try: | |
| from core.color_utils import get_contrast_with_white | |
| old_contrast = get_contrast_with_white(current) | |
| new_contrast = get_contrast_with_white(suggested) | |
| contrast_str = f"{old_contrast:.1f} → {new_contrast:.1f}" | |
| except: | |
| contrast_str = "?" | |
| rows.append([ | |
| True, # Accept checkbox (default True) | |
| role, | |
| current, | |
| rationale or action, | |
| suggested, | |
| contrast_str, | |
| ]) | |
| # Process accessibility fixes | |
| for fix in aa_fixes: | |
| if not isinstance(fix, dict): | |
| continue | |
| color = fix.get("color", "?") | |
| role = fix.get("role", "unknown") | |
| issue = fix.get("issue", "contrast")[:40] | |
| fix_color = fix.get("fix", color) | |
| current_contrast = fix.get("current_contrast", "?") | |
| fixed_contrast = fix.get("fixed_contrast", "?") | |
| if fix_color and fix_color != color: | |
| rows.append([ | |
| True, # Accept checkbox | |
| f"{role} (AA fix)", | |
| color, | |
| issue, | |
| fix_color, | |
| f"{current_contrast}:1 → {fixed_contrast}:1", | |
| ]) | |
| return rows | |
| def format_typography_comparison_viewport(normalized_tokens, base_size: int, viewport: str) -> list: | |
| """Format typography comparison for a specific viewport.""" | |
| if not normalized_tokens: | |
| return [] | |
| # Get current typography sorted by size | |
| current_typo = list(normalized_tokens.typography.values()) | |
| # Parse and sort sizes | |
| def parse_size(t): | |
| size_str = str(t.font_size).replace('px', '').replace('rem', '').replace('em', '') | |
| try: | |
| return float(size_str) | |
| except: | |
| return 16 | |
| current_typo.sort(key=lambda t: -parse_size(t)) | |
| sizes = [parse_size(t) for t in current_typo] | |
| # Use detected base or default | |
| base = base_size if base_size else 16 | |
| # Scale factors for mobile (typically 0.85-0.9 of desktop) | |
| mobile_factor = 0.875 if viewport == "mobile" else 1.0 | |
| # Token names (13 levels) | |
| token_names = [ | |
| "display.2xl", "display.xl", "display.lg", "display.md", | |
| "heading.xl", "heading.lg", "heading.md", "heading.sm", | |
| "body.lg", "body.md", "body.sm", | |
| "caption", "overline" | |
| ] | |
| # Generate scales - use base size and round to sensible values | |
| def round_to_even(val): | |
| """Round to even numbers for cleaner type scales.""" | |
| return int(round(val / 2) * 2) | |
| scales = { | |
| "1.2": [round_to_even(base * mobile_factor * (1.2 ** (8-i))) for i in range(13)], | |
| "1.25": [round_to_even(base * mobile_factor * (1.25 ** (8-i))) for i in range(13)], | |
| "1.333": [round_to_even(base * mobile_factor * (1.333 ** (8-i))) for i in range(13)], | |
| } | |
| # Build comparison table | |
| data = [] | |
| for i, name in enumerate(token_names): | |
| current = f"{int(sizes[i])}px" if i < len(sizes) else "—" | |
| s12 = f"{scales['1.2'][i]}px" | |
| s125 = f"{scales['1.25'][i]}px" | |
| s133 = f"{scales['1.333'][i]}px" | |
| keep = current | |
| data.append([name, current, s12, s125, s133, keep]) | |
| return data | |
| def format_base_colors() -> str: | |
| """Format base colors (detected) separately from ramps.""" | |
| if not state.desktop_normalized: | |
| return "*No colors detected*" | |
| colors = list(state.desktop_normalized.colors.values()) | |
| colors.sort(key=lambda c: -c.frequency) | |
| lines = [ | |
| "### 🎨 Base Colors (Detected)", | |
| "", | |
| "These are the primary colors extracted from your website:", | |
| "", | |
| "| Color | Hex | Role | Frequency | Contrast |", | |
| "|-------|-----|------|-----------|----------|", | |
| ] | |
| for color in colors[:10]: | |
| hex_val = color.value | |
| role = "Primary" if color.suggested_name and "primary" in color.suggested_name.lower() else \ | |
| "Text" if color.suggested_name and "text" in color.suggested_name.lower() else \ | |
| "Background" if color.suggested_name and "background" in color.suggested_name.lower() else \ | |
| "Border" if color.suggested_name and "border" in color.suggested_name.lower() else \ | |
| "Accent" | |
| freq = f"{color.frequency:,}" | |
| contrast = f"{color.contrast_white:.1f}:1" if color.contrast_white else "—" | |
| # Create a simple color indicator | |
| lines.append(f"| 🟦 | `{hex_val}` | {role} | {freq} | {contrast} |") | |
| return "\n".join(lines) | |
| def format_color_ramps_visual(recommendations) -> str: | |
| """Format color ramps with visual display showing all shades.""" | |
| if not state.desktop_normalized: | |
| return "*No colors to display*" | |
| colors = list(state.desktop_normalized.colors.values()) | |
| colors.sort(key=lambda c: -c.frequency) | |
| lines = [ | |
| "### 🌈 Generated Color Ramps", | |
| "", | |
| "Full ramp (50-950) generated for each base color:", | |
| "", | |
| ] | |
| from core.color_utils import generate_color_ramp | |
| for color in colors[:6]: # Top 6 colors | |
| hex_val = color.value | |
| role = color.suggested_name.split('.')[1] if color.suggested_name and '.' in color.suggested_name else "color" | |
| # Generate ramp | |
| try: | |
| ramp = generate_color_ramp(hex_val) | |
| lines.append(f"**{role.upper()}** (base: `{hex_val}`)") | |
| lines.append("") | |
| lines.append("| 50 | 100 | 200 | 300 | 400 | 500 | 600 | 700 | 800 | 900 |") | |
| lines.append("|---|---|---|---|---|---|---|---|---|---|") | |
| # Create row with hex values | |
| row = "|" | |
| for i in range(10): | |
| if i < len(ramp): | |
| row += f" `{ramp[i]}` |" | |
| else: | |
| row += " — |" | |
| lines.append(row) | |
| lines.append("") | |
| except Exception as e: | |
| lines.append(f"**{role}** (`{hex_val}`) — Could not generate ramp: {str(e)}") | |
| lines.append("") | |
| return "\n".join(lines) | |
| def format_radius_with_tokens() -> str: | |
| """Format radius with token name suggestions.""" | |
| if not state.desktop_normalized or not state.desktop_normalized.radius: | |
| return "*No border radius values detected.*" | |
| radii = list(state.desktop_normalized.radius.values()) | |
| lines = [ | |
| "### 🔘 Border Radius Tokens", | |
| "", | |
| "| Detected | Suggested Token | Usage |", | |
| "|----------|-----------------|-------|", | |
| ] | |
| # Sort by pixel value | |
| def parse_radius(r): | |
| val = str(r.value).replace('px', '').replace('%', '') | |
| try: | |
| return float(val) | |
| except: | |
| return 999 | |
| radii.sort(key=lambda r: parse_radius(r)) | |
| token_map = { | |
| (0, 2): ("radius.none", "Sharp corners"), | |
| (2, 4): ("radius.xs", "Subtle rounding"), | |
| (4, 6): ("radius.sm", "Small elements"), | |
| (6, 10): ("radius.md", "Buttons, cards"), | |
| (10, 16): ("radius.lg", "Modals, panels"), | |
| (16, 32): ("radius.xl", "Large containers"), | |
| (32, 100): ("radius.2xl", "Pill shapes"), | |
| } | |
| for r in radii[:8]: | |
| val = str(r.value) | |
| px = parse_radius(r) | |
| if "%" in str(r.value) or px >= 50: | |
| token = "radius.full" | |
| usage = "Circles, avatars" | |
| else: | |
| token = "radius.md" | |
| usage = "General use" | |
| for (low, high), (t, u) in token_map.items(): | |
| if low <= px < high: | |
| token = t | |
| usage = u | |
| break | |
| lines.append(f"| {val} | `{token}` | {usage} |") | |
| return "\n".join(lines) | |
| def format_shadows_with_tokens() -> str: | |
| """Format shadows with token name suggestions.""" | |
| if not state.desktop_normalized or not state.desktop_normalized.shadows: | |
| return "*No shadow values detected.*" | |
| shadows = list(state.desktop_normalized.shadows.values()) | |
| lines = [ | |
| "### 🌫️ Shadow Tokens", | |
| "", | |
| "| Detected Value | Suggested Token | Use Case |", | |
| "|----------------|-----------------|----------|", | |
| ] | |
| shadow_sizes = ["shadow.xs", "shadow.sm", "shadow.md", "shadow.lg", "shadow.xl", "shadow.2xl"] | |
| for i, s in enumerate(shadows[:6]): | |
| val = str(s.value)[:40] + ("..." if len(str(s.value)) > 40 else "") | |
| token = shadow_sizes[i] if i < len(shadow_sizes) else f"shadow.custom-{i}" | |
| # Guess use case based on index | |
| use_cases = ["Subtle elevation", "Cards, dropdowns", "Modals, dialogs", "Popovers", "Floating elements", "Dramatic effect"] | |
| use = use_cases[i] if i < len(use_cases) else "Custom" | |
| lines.append(f"| `{val}` | `{token}` | {use} |") | |
| return "\n".join(lines) | |
| def format_spacing_comparison(recommendations) -> list: | |
| """Format spacing comparison table.""" | |
| if not state.desktop_normalized: | |
| return [] | |
| # Get current spacing | |
| current_spacing = list(state.desktop_normalized.spacing.values()) | |
| current_spacing.sort(key=lambda s: s.value_px) | |
| data = [] | |
| for s in current_spacing[:10]: | |
| current = f"{s.value_px}px" | |
| grid_8 = f"{snap_to_grid(s.value_px, 8)}px" | |
| grid_4 = f"{snap_to_grid(s.value_px, 4)}px" | |
| # Mark if value fits | |
| if s.value_px == snap_to_grid(s.value_px, 8): | |
| grid_8 += " ✓" | |
| if s.value_px == snap_to_grid(s.value_px, 4): | |
| grid_4 += " ✓" | |
| data.append([current, grid_8, grid_4]) | |
| return data | |
| def snap_to_grid(value: float, base: int) -> int: | |
| """Snap value to grid.""" | |
| return round(value / base) * base | |
| def apply_selected_upgrades(type_choice: str, spacing_choice: str, apply_ramps: bool, color_recs_table: list = None): | |
| """Apply selected upgrade options including LLM color recommendations.""" | |
| if not state.upgrade_recommendations: | |
| return "❌ Run analysis first", "" | |
| state.log("✨ Applying selected upgrades...") | |
| # Store selections | |
| state.selected_upgrades = { | |
| "type_scale": type_choice, | |
| "spacing": spacing_choice, | |
| "color_ramps": apply_ramps, | |
| } | |
| state.log(f" Type Scale: {type_choice}") | |
| state.log(f" Spacing: {spacing_choice}") | |
| state.log(f" Color Ramps: {'Yes' if apply_ramps else 'No'}") | |
| # Process accepted color recommendations | |
| accepted_color_changes = [] | |
| if color_recs_table: | |
| state.log("") | |
| state.log(" 🎨 LLM Color Recommendations:") | |
| for row in color_recs_table: | |
| if len(row) >= 5: | |
| accept = row[0] # Boolean checkbox | |
| role = row[1] # Role name | |
| current = row[2] # Current color | |
| issue = row[3] # Issue description | |
| suggested = row[4] # Suggested color | |
| if accept and suggested and current != suggested: | |
| accepted_color_changes.append({ | |
| "role": role, | |
| "from": current, | |
| "to": suggested, | |
| "reason": issue, | |
| }) | |
| state.log(f" ├─ ✅ ACCEPTED: {role}") | |
| state.log(f" │ └─ {current} → {suggested}") | |
| elif not accept: | |
| state.log(f" ├─ ❌ REJECTED: {role} (keeping {current})") | |
| # Store accepted changes | |
| state.selected_upgrades["color_changes"] = accepted_color_changes | |
| if accepted_color_changes: | |
| state.log("") | |
| state.log(f" 📊 {len(accepted_color_changes)} color change(s) will be applied to export") | |
| state.log("") | |
| state.log("✅ Upgrades applied! Proceed to Stage 3 for export.") | |
| return "✅ Upgrades applied! Proceed to Stage 3 to export.", state.get_logs() | |
| def export_stage1_json(): | |
| """Export Stage 1 tokens (as-is extraction) to JSON - FLAT structure for Figma Tokens Studio.""" | |
| if not state.desktop_normalized: | |
| return json.dumps({"error": "No tokens extracted. Please run extraction first."}, indent=2) | |
| # FLAT structure for Figma Tokens Studio compatibility | |
| result = { | |
| "metadata": { | |
| "source_url": state.base_url, | |
| "extracted_at": datetime.now().isoformat(), | |
| "version": "v1-stage1-as-is", | |
| "stage": "extraction", | |
| "description": "Raw extracted tokens before upgrades - Figma Tokens Studio compatible", | |
| }, | |
| "fonts": {}, | |
| "colors": {}, | |
| "typography": {}, # FLAT: font.display.xl.desktop, font.display.xl.mobile | |
| "spacing": {}, # FLAT: space.1.desktop, space.1.mobile | |
| "radius": {}, | |
| "shadows": {}, | |
| } | |
| # ========================================================================= | |
| # FONTS | |
| # ========================================================================= | |
| fonts_info = get_detected_fonts() | |
| result["fonts"] = { | |
| "primary": fonts_info.get("primary", "Unknown"), | |
| "weights": fonts_info.get("weights", [400]), | |
| } | |
| # ========================================================================= | |
| # COLORS (viewport-agnostic - same across devices) | |
| # ========================================================================= | |
| if state.desktop_normalized and state.desktop_normalized.colors: | |
| for name, c in state.desktop_normalized.colors.items(): | |
| # Use semantic name or create one from value | |
| base_name = c.suggested_name or name | |
| # Clean up the name for Figma compatibility | |
| clean_name = base_name.replace(" ", ".").replace("_", ".").lower() | |
| if not clean_name.startswith("color."): | |
| clean_name = f"color.{clean_name}" | |
| result["colors"][clean_name] = { | |
| "value": c.value, | |
| "type": "color", | |
| "source": "detected", | |
| } | |
| # ========================================================================= | |
| # TYPOGRAPHY - FLAT structure with viewport suffix | |
| # ========================================================================= | |
| # Desktop typography | |
| if state.desktop_normalized and state.desktop_normalized.typography: | |
| for name, t in state.desktop_normalized.typography.items(): | |
| base_name = t.suggested_name or name | |
| clean_name = base_name.replace(" ", ".").replace("_", ".").replace("-", ".").lower() | |
| if not clean_name.startswith("font."): | |
| clean_name = f"font.{clean_name}" | |
| # Add .desktop suffix | |
| token_key = f"{clean_name}.desktop" | |
| result["typography"][token_key] = { | |
| "value": t.font_size, | |
| "type": "dimension", | |
| "fontFamily": t.font_family, | |
| "fontWeight": str(t.font_weight), | |
| "lineHeight": t.line_height or "1.5", | |
| "source": "detected", | |
| } | |
| # Mobile typography | |
| if state.mobile_normalized and state.mobile_normalized.typography: | |
| for name, t in state.mobile_normalized.typography.items(): | |
| base_name = t.suggested_name or name | |
| clean_name = base_name.replace(" ", ".").replace("_", ".").replace("-", ".").lower() | |
| if not clean_name.startswith("font."): | |
| clean_name = f"font.{clean_name}" | |
| # Add .mobile suffix | |
| token_key = f"{clean_name}.mobile" | |
| result["typography"][token_key] = { | |
| "value": t.font_size, | |
| "type": "dimension", | |
| "fontFamily": t.font_family, | |
| "fontWeight": str(t.font_weight), | |
| "lineHeight": t.line_height or "1.5", | |
| "source": "detected", | |
| } | |
| # ========================================================================= | |
| # SPACING - FLAT structure with viewport suffix | |
| # ========================================================================= | |
| # Desktop spacing | |
| if state.desktop_normalized and state.desktop_normalized.spacing: | |
| for name, s in state.desktop_normalized.spacing.items(): | |
| base_name = s.suggested_name or name | |
| clean_name = base_name.replace(" ", ".").replace("_", ".").replace("-", ".").lower() | |
| if not clean_name.startswith("space."): | |
| clean_name = f"space.{clean_name}" | |
| # Add .desktop suffix | |
| token_key = f"{clean_name}.desktop" | |
| result["spacing"][token_key] = { | |
| "value": s.value, | |
| "type": "dimension", | |
| "source": "detected", | |
| } | |
| # Mobile spacing | |
| if state.mobile_normalized and state.mobile_normalized.spacing: | |
| for name, s in state.mobile_normalized.spacing.items(): | |
| base_name = s.suggested_name or name | |
| clean_name = base_name.replace(" ", ".").replace("_", ".").replace("-", ".").lower() | |
| if not clean_name.startswith("space."): | |
| clean_name = f"space.{clean_name}" | |
| # Add .mobile suffix | |
| token_key = f"{clean_name}.mobile" | |
| result["spacing"][token_key] = { | |
| "value": s.value, | |
| "type": "dimension", | |
| "source": "detected", | |
| } | |
| # ========================================================================= | |
| # RADIUS (viewport-agnostic) | |
| # ========================================================================= | |
| if state.desktop_normalized and state.desktop_normalized.radius: | |
| for name, r in state.desktop_normalized.radius.items(): | |
| clean_name = name.replace(" ", ".").replace("_", ".").replace("-", ".").lower() | |
| if not clean_name.startswith("radius."): | |
| clean_name = f"radius.{clean_name}" | |
| result["radius"][clean_name] = { | |
| "value": r.value, | |
| "type": "dimension", | |
| "source": "detected", | |
| } | |
| # ========================================================================= | |
| # SHADOWS (viewport-agnostic) | |
| # ========================================================================= | |
| if state.desktop_normalized and state.desktop_normalized.shadows: | |
| for name, s in state.desktop_normalized.shadows.items(): | |
| clean_name = name.replace(" ", ".").replace("_", ".").replace("-", ".").lower() | |
| if not clean_name.startswith("shadow."): | |
| clean_name = f"shadow.{clean_name}" | |
| result["shadows"][clean_name] = { | |
| "value": s.value, | |
| "type": "boxShadow", | |
| "source": "detected", | |
| } | |
| return json.dumps(result, indent=2, default=str) | |
| def export_tokens_json(): | |
| """Export final tokens with selected upgrades applied - FLAT structure for Figma Tokens Studio.""" | |
| if not state.desktop_normalized: | |
| return json.dumps({"error": "No tokens extracted. Please run extraction first."}, indent=2) | |
| # Get selected upgrades | |
| upgrades = getattr(state, 'selected_upgrades', {}) | |
| type_scale_choice = upgrades.get('type_scale', 'Keep Current') | |
| spacing_choice = upgrades.get('spacing', 'Keep Current') | |
| apply_ramps = upgrades.get('color_ramps', True) | |
| # Determine ratio from choice | |
| ratio = None | |
| if "1.2" in type_scale_choice: | |
| ratio = 1.2 | |
| elif "1.25" in type_scale_choice: | |
| ratio = 1.25 | |
| elif "1.333" in type_scale_choice: | |
| ratio = 1.333 | |
| # Determine spacing base | |
| spacing_base = None | |
| if "8px" in spacing_choice: | |
| spacing_base = 8 | |
| elif "4px" in spacing_choice: | |
| spacing_base = 4 | |
| # FLAT structure for Figma Tokens Studio compatibility | |
| result = { | |
| "metadata": { | |
| "source_url": state.base_url, | |
| "extracted_at": datetime.now().isoformat(), | |
| "version": "v2-upgraded", | |
| "stage": "final", | |
| "description": "Upgraded tokens - Figma Tokens Studio compatible", | |
| "upgrades_applied": { | |
| "type_scale": type_scale_choice, | |
| "spacing": spacing_choice, | |
| "color_ramps": apply_ramps, | |
| }, | |
| }, | |
| "fonts": {}, | |
| "colors": {}, | |
| "typography": {}, # FLAT: font.display.xl.desktop, font.display.xl.mobile | |
| "spacing": {}, # FLAT: space.1.desktop, space.1.mobile | |
| "radius": {}, | |
| "shadows": {}, | |
| } | |
| # ========================================================================= | |
| # FONTS | |
| # ========================================================================= | |
| fonts_info = get_detected_fonts() | |
| result["fonts"] = { | |
| "primary": fonts_info.get("primary", "Unknown"), | |
| "weights": fonts_info.get("weights", [400]), | |
| } | |
| primary_font = fonts_info.get("primary", "sans-serif") | |
| # ========================================================================= | |
| # COLORS with optional ramps | |
| # ========================================================================= | |
| if state.desktop_normalized and state.desktop_normalized.colors: | |
| from core.color_utils import generate_color_ramp | |
| for name, c in state.desktop_normalized.colors.items(): | |
| base_name = c.suggested_name or name | |
| clean_name = base_name.replace(" ", ".").replace("_", ".").lower() | |
| if not clean_name.startswith("color."): | |
| clean_name = f"color.{clean_name}" | |
| if apply_ramps: | |
| # Generate full ramp (50-950) | |
| try: | |
| ramp = generate_color_ramp(c.value) | |
| shades = ["50", "100", "200", "300", "400", "500", "600", "700", "800", "900", "950"] | |
| for i, shade in enumerate(shades): | |
| if i < len(ramp): | |
| shade_key = f"{clean_name}.{shade}" | |
| result["colors"][shade_key] = { | |
| "value": ramp[i] if isinstance(ramp[i], str) else ramp[i].get("hex", c.value), | |
| "type": "color", | |
| "source": "upgraded" if shade != "500" else "detected", | |
| } | |
| except: | |
| result["colors"][clean_name] = { | |
| "value": c.value, | |
| "type": "color", | |
| "source": "detected", | |
| } | |
| else: | |
| result["colors"][clean_name] = { | |
| "value": c.value, | |
| "type": "color", | |
| "source": "detected", | |
| } | |
| # ========================================================================= | |
| # TYPOGRAPHY - FLAT structure with viewport suffix | |
| # ========================================================================= | |
| base_size = get_base_font_size() | |
| token_names = [ | |
| "font.display.2xl", "font.display.xl", "font.display.lg", "font.display.md", | |
| "font.heading.xl", "font.heading.lg", "font.heading.md", "font.heading.sm", | |
| "font.body.lg", "font.body.md", "font.body.sm", "font.caption", "font.overline" | |
| ] | |
| # Desktop typography | |
| if ratio: | |
| # Apply type scale | |
| scales = [int(round(base_size * (ratio ** (8-i)) / 2) * 2) for i in range(13)] | |
| for i, token_name in enumerate(token_names): | |
| desktop_key = f"{token_name}.desktop" | |
| result["typography"][desktop_key] = { | |
| "value": f"{scales[i]}px", | |
| "type": "dimension", | |
| "fontFamily": primary_font, | |
| "source": "upgraded", | |
| } | |
| elif state.desktop_normalized and state.desktop_normalized.typography: | |
| # Keep original with flat structure | |
| for name, t in state.desktop_normalized.typography.items(): | |
| base_name = t.suggested_name or name | |
| clean_name = base_name.replace(" ", ".").replace("_", ".").replace("-", ".").lower() | |
| if not clean_name.startswith("font."): | |
| clean_name = f"font.{clean_name}" | |
| desktop_key = f"{clean_name}.desktop" | |
| result["typography"][desktop_key] = { | |
| "value": t.font_size, | |
| "type": "dimension", | |
| "fontFamily": t.font_family, | |
| "fontWeight": str(t.font_weight), | |
| "lineHeight": t.line_height or "1.5", | |
| "source": "detected", | |
| } | |
| # Mobile typography | |
| if ratio: | |
| # Apply type scale with mobile factor | |
| mobile_factor = 0.875 | |
| scales = [int(round(base_size * mobile_factor * (ratio ** (8-i)) / 2) * 2) for i in range(13)] | |
| for i, token_name in enumerate(token_names): | |
| mobile_key = f"{token_name}.mobile" | |
| result["typography"][mobile_key] = { | |
| "value": f"{scales[i]}px", | |
| "type": "dimension", | |
| "fontFamily": primary_font, | |
| "source": "upgraded", | |
| } | |
| elif state.mobile_normalized and state.mobile_normalized.typography: | |
| for name, t in state.mobile_normalized.typography.items(): | |
| base_name = t.suggested_name or name | |
| clean_name = base_name.replace(" ", ".").replace("_", ".").replace("-", ".").lower() | |
| if not clean_name.startswith("font."): | |
| clean_name = f"font.{clean_name}" | |
| mobile_key = f"{clean_name}.mobile" | |
| result["typography"][mobile_key] = { | |
| "value": t.font_size, | |
| "type": "dimension", | |
| "fontFamily": t.font_family, | |
| "fontWeight": str(t.font_weight), | |
| "lineHeight": t.line_height or "1.5", | |
| "source": "detected", | |
| } | |
| # ========================================================================= | |
| # SPACING - FLAT structure with viewport suffix | |
| # ========================================================================= | |
| spacing_token_names = [ | |
| "space.1", "space.2", "space.3", "space.4", "space.5", | |
| "space.6", "space.8", "space.10", "space.12", "space.16" | |
| ] | |
| if spacing_base: | |
| # Generate grid-aligned spacing for both viewports | |
| for i, token_name in enumerate(spacing_token_names): | |
| value = spacing_base * (i + 1) | |
| # Desktop | |
| desktop_key = f"{token_name}.desktop" | |
| result["spacing"][desktop_key] = { | |
| "value": f"{value}px", | |
| "type": "dimension", | |
| "source": "upgraded", | |
| } | |
| # Mobile (same values) | |
| mobile_key = f"{token_name}.mobile" | |
| result["spacing"][mobile_key] = { | |
| "value": f"{value}px", | |
| "type": "dimension", | |
| "source": "upgraded", | |
| } | |
| else: | |
| # Keep original with flat structure | |
| if state.desktop_normalized and state.desktop_normalized.spacing: | |
| for name, s in state.desktop_normalized.spacing.items(): | |
| base_name = s.suggested_name or name | |
| clean_name = base_name.replace(" ", ".").replace("_", ".").replace("-", ".").lower() | |
| if not clean_name.startswith("space."): | |
| clean_name = f"space.{clean_name}" | |
| desktop_key = f"{clean_name}.desktop" | |
| result["spacing"][desktop_key] = { | |
| "value": s.value, | |
| "type": "dimension", | |
| "source": "detected", | |
| } | |
| if state.mobile_normalized and state.mobile_normalized.spacing: | |
| for name, s in state.mobile_normalized.spacing.items(): | |
| base_name = s.suggested_name or name | |
| clean_name = base_name.replace(" ", ".").replace("_", ".").replace("-", ".").lower() | |
| if not clean_name.startswith("space."): | |
| clean_name = f"space.{clean_name}" | |
| mobile_key = f"{clean_name}.mobile" | |
| result["spacing"][mobile_key] = { | |
| "value": s.value, | |
| "type": "dimension", | |
| "source": "detected", | |
| } | |
| # ========================================================================= | |
| # RADIUS (viewport-agnostic) | |
| # ========================================================================= | |
| if state.desktop_normalized and state.desktop_normalized.radius: | |
| for name, r in state.desktop_normalized.radius.items(): | |
| clean_name = name.replace(" ", ".").replace("_", ".").replace("-", ".").lower() | |
| if not clean_name.startswith("radius."): | |
| clean_name = f"radius.{clean_name}" | |
| result["radius"][clean_name] = { | |
| "value": r.value, | |
| "type": "dimension", | |
| "source": "detected", | |
| } | |
| # ========================================================================= | |
| # SHADOWS (viewport-agnostic) | |
| # ========================================================================= | |
| if state.desktop_normalized and state.desktop_normalized.shadows: | |
| for name, s in state.desktop_normalized.shadows.items(): | |
| clean_name = name.replace(" ", ".").replace("_", ".").replace("-", ".").lower() | |
| if not clean_name.startswith("shadow."): | |
| clean_name = f"shadow.{clean_name}" | |
| result["shadows"][clean_name] = { | |
| "value": s.value, | |
| "type": "boxShadow", | |
| "source": "detected", | |
| } | |
| return json.dumps(result, indent=2, default=str) | |
| # ============================================================================= | |
| # UI BUILDING | |
| # ============================================================================= | |
| def create_ui(): | |
| """Create the Gradio interface.""" | |
| with gr.Blocks( | |
| title="Design System Extractor v2", | |
| theme=gr.themes.Soft(), | |
| css=""" | |
| .color-swatch { display: inline-block; width: 24px; height: 24px; border-radius: 4px; margin-right: 8px; vertical-align: middle; } | |
| """ | |
| ) as app: | |
| gr.Markdown(""" | |
| # 🎨 Design System Extractor v2 | |
| **Reverse-engineer design systems from live websites.** | |
| A semi-automated, human-in-the-loop system that extracts, normalizes, and upgrades design tokens. | |
| --- | |
| """) | |
| # ================================================================= | |
| # CONFIGURATION | |
| # ================================================================= | |
| with gr.Accordion("⚙️ Configuration", open=not bool(HF_TOKEN_FROM_ENV)): | |
| gr.Markdown("**HuggingFace Token** — Required for Stage 2 (AI upgrades)") | |
| with gr.Row(): | |
| hf_token_input = gr.Textbox( | |
| label="HF Token", placeholder="hf_xxxx", type="password", | |
| scale=4, value=HF_TOKEN_FROM_ENV, | |
| ) | |
| save_token_btn = gr.Button("💾 Save", scale=1) | |
| token_status = gr.Markdown("✅ Token loaded" if HF_TOKEN_FROM_ENV else "⏳ Enter token") | |
| def save_token(token): | |
| if token and len(token) > 10: | |
| os.environ["HF_TOKEN"] = token.strip() | |
| return "✅ Token saved!" | |
| return "❌ Invalid token" | |
| save_token_btn.click(save_token, [hf_token_input], [token_status]) | |
| # ================================================================= | |
| # URL INPUT & PAGE DISCOVERY | |
| # ================================================================= | |
| with gr.Accordion("🔍 Step 1: Discover Pages", open=True): | |
| gr.Markdown("Enter your website URL to discover pages for extraction.") | |
| with gr.Row(): | |
| url_input = gr.Textbox(label="Website URL", placeholder="https://example.com", scale=4) | |
| discover_btn = gr.Button("🔍 Discover Pages", variant="primary", scale=1) | |
| discover_status = gr.Markdown("") | |
| with gr.Row(): | |
| log_output = gr.Textbox(label="📋 Log", lines=8, interactive=False) | |
| pages_table = gr.Dataframe( | |
| headers=["Select", "URL", "Title", "Type", "Status"], | |
| datatype=["bool", "str", "str", "str", "str"], | |
| label="Discovered Pages", | |
| interactive=True, | |
| visible=False, | |
| ) | |
| extract_btn = gr.Button("🚀 Extract Tokens (Desktop + Mobile)", variant="primary", visible=False) | |
| # ================================================================= | |
| # STAGE 1: EXTRACTION REVIEW | |
| # ================================================================= | |
| with gr.Accordion("📊 Stage 1: Review Extracted Tokens", open=False) as stage1_accordion: | |
| extraction_status = gr.Markdown("") | |
| gr.Markdown(""" | |
| **Review the extracted tokens.** Toggle between Desktop and Mobile viewports. | |
| Accept or reject tokens, then proceed to Stage 2 for AI-powered upgrades. | |
| """) | |
| viewport_toggle = gr.Radio( | |
| choices=["Desktop (1440px)", "Mobile (375px)"], | |
| value="Desktop (1440px)", | |
| label="Viewport", | |
| ) | |
| with gr.Tabs(): | |
| with gr.Tab("🎨 Colors"): | |
| colors_table = gr.Dataframe( | |
| headers=["Accept", "Color", "Suggested Name", "Frequency", "Confidence", "Contrast", "AA", "Context"], | |
| datatype=["bool", "str", "str", "number", "str", "str", "str", "str"], | |
| label="Colors", | |
| interactive=True, | |
| ) | |
| with gr.Tab("📝 Typography"): | |
| typography_table = gr.Dataframe( | |
| headers=["Accept", "Font", "Size", "Weight", "Line Height", "Suggested Name", "Frequency", "Confidence"], | |
| datatype=["bool", "str", "str", "str", "str", "str", "number", "str"], | |
| label="Typography", | |
| interactive=True, | |
| ) | |
| with gr.Tab("📏 Spacing"): | |
| spacing_table = gr.Dataframe( | |
| headers=["Accept", "Value", "Pixels", "Suggested Name", "Frequency", "Base 8", "Confidence"], | |
| datatype=["bool", "str", "str", "str", "number", "str", "str"], | |
| label="Spacing", | |
| interactive=True, | |
| ) | |
| with gr.Tab("🔘 Radius"): | |
| radius_table = gr.Dataframe( | |
| headers=["Accept", "Value", "Frequency", "Context"], | |
| datatype=["bool", "str", "number", "str"], | |
| label="Border Radius", | |
| interactive=True, | |
| ) | |
| # ============================================================= | |
| # VISUAL PREVIEWS (Stage 1) - AS-IS only, no enhancements | |
| # ============================================================= | |
| gr.Markdown("---") | |
| gr.Markdown("## 👁️ Visual Previews (AS-IS)") | |
| gr.Markdown("*Raw extracted values from the website — no enhancements applied*") | |
| with gr.Tabs(): | |
| with gr.Tab("🔤 Typography"): | |
| gr.Markdown("*Actual typography rendered with the detected font*") | |
| stage1_typography_preview = gr.HTML( | |
| value="<div style='padding: 20px; background: #f5f5f5; border-radius: 8px; color: #666;'>Typography preview will appear after extraction...</div>", | |
| label="Typography Preview" | |
| ) | |
| with gr.Tab("🎨 Colors"): | |
| gr.Markdown("*All detected colors (AS-IS — no generated ramps)*") | |
| stage1_colors_preview = gr.HTML( | |
| value="<div style='padding: 20px; background: #f5f5f5; border-radius: 8px; color: #666;'>Colors preview will appear after extraction...</div>", | |
| label="Colors Preview" | |
| ) | |
| with gr.Tab("🧠 Semantic Colors"): | |
| gr.Markdown("*Colors categorized by usage: Brand, Text, Background, Border, Feedback*") | |
| stage1_semantic_preview = gr.HTML( | |
| value="<div style='padding: 20px; background: #f5f5f5; border-radius: 8px; color: #666;'>Semantic color analysis will appear after extraction...</div>", | |
| label="Semantic Colors Preview" | |
| ) | |
| with gr.Tab("📏 Spacing"): | |
| gr.Markdown("*All detected spacing values*") | |
| stage1_spacing_preview = gr.HTML( | |
| value="<div style='padding: 20px; background: #f5f5f5; border-radius: 8px; color: #666;'>Spacing preview will appear after extraction...</div>", | |
| label="Spacing Preview" | |
| ) | |
| with gr.Tab("🔘 Radius"): | |
| gr.Markdown("*All detected border radius values*") | |
| stage1_radius_preview = gr.HTML( | |
| value="<div style='padding: 20px; background: #f5f5f5; border-radius: 8px; color: #666;'>Radius preview will appear after extraction...</div>", | |
| label="Radius Preview" | |
| ) | |
| with gr.Tab("🌑 Shadows"): | |
| gr.Markdown("*All detected box shadow values*") | |
| stage1_shadows_preview = gr.HTML( | |
| value="<div style='padding: 20px; background: #f5f5f5; border-radius: 8px; color: #666;'>Shadows preview will appear after extraction...</div>", | |
| label="Shadows Preview" | |
| ) | |
| with gr.Row(): | |
| proceed_stage2_btn = gr.Button("➡️ Proceed to Stage 2: AI Upgrades", variant="primary") | |
| download_stage1_btn = gr.Button("📥 Download Stage 1 JSON", variant="secondary") | |
| # ================================================================= | |
| # STAGE 2: AI UPGRADES | |
| # ================================================================= | |
| with gr.Accordion("🧠 Stage 2: AI-Powered Upgrades", open=False) as stage2_accordion: | |
| stage2_status = gr.Markdown("Click 'Analyze' to start AI-powered design system analysis.") | |
| # ============================================================= | |
| # LLM CONFIGURATION & COMPETITORS | |
| # ============================================================= | |
| with gr.Accordion("⚙️ Analysis Configuration", open=False): | |
| gr.Markdown(""" | |
| ### 🤖 LLM Models Used | |
| | Role | Model | Expertise | | |
| |------|-------|-----------| | |
| | **Typography Analyst** | meta-llama/Llama-3.1-70B | Type scale patterns, readability | | |
| | **Color Analyst** | meta-llama/Llama-3.1-70B | Color theory, accessibility | | |
| | **Spacing Analyst** | Rule-based | Grid alignment, consistency | | |
| *Analysis compares your design against industry leaders.* | |
| """) | |
| gr.Markdown("### 🎯 Competitor Design Systems") | |
| gr.Markdown("Enter design systems to compare against (comma-separated):") | |
| competitors_input = gr.Textbox( | |
| value="Material Design 3, Apple HIG, Shopify Polaris, IBM Carbon, Atlassian", | |
| label="Competitors", | |
| placeholder="Material Design 3, Apple HIG, Shopify Polaris...", | |
| ) | |
| gr.Markdown("*Suggestions: Ant Design, Chakra UI, Tailwind, Bootstrap, Salesforce Lightning*") | |
| analyze_btn = gr.Button("🤖 Analyze Design System", variant="primary", size="lg") | |
| with gr.Accordion("📋 AI Analysis Log", open=True): | |
| stage2_log = gr.Textbox(label="Log", lines=18, interactive=False) | |
| # ============================================================= | |
| # BRAND COMPARISON (LLM Research) | |
| # ============================================================= | |
| gr.Markdown("---") | |
| brand_comparison = gr.Markdown("*Brand comparison will appear after analysis*") | |
| # ============================================================= | |
| # FONT FAMILIES DETECTED | |
| # ============================================================= | |
| gr.Markdown("---") | |
| gr.Markdown("## 🔤 Font Families Detected") | |
| font_families_display = gr.Markdown("*Font information will appear after analysis*") | |
| # ============================================================= | |
| # TYPOGRAPHY SECTION - Desktop & Mobile | |
| # ============================================================= | |
| gr.Markdown("---") | |
| gr.Markdown("## 📐 Typography") | |
| # Visual Preview | |
| with gr.Accordion("👁️ Typography Visual Preview", open=True): | |
| stage2_typography_preview = gr.HTML( | |
| value="<div style='padding: 20px; background: #f5f5f5; border-radius: 8px; color: #666;'>Typography preview will appear after analysis...</div>", | |
| label="Typography Preview" | |
| ) | |
| with gr.Row(): | |
| with gr.Column(scale=2): | |
| gr.Markdown("### 🖥️ Desktop (1440px)") | |
| typography_desktop = gr.Dataframe( | |
| headers=["Token", "Current", "Scale 1.2", "Scale 1.25 ⭐", "Scale 1.333", "Keep"], | |
| datatype=["str", "str", "str", "str", "str", "str"], | |
| label="Desktop Typography", | |
| interactive=False, | |
| ) | |
| with gr.Column(scale=2): | |
| gr.Markdown("### 📱 Mobile (375px)") | |
| typography_mobile = gr.Dataframe( | |
| headers=["Token", "Current", "Scale 1.2", "Scale 1.25 ⭐", "Scale 1.333", "Keep"], | |
| datatype=["str", "str", "str", "str", "str", "str"], | |
| label="Mobile Typography", | |
| interactive=False, | |
| ) | |
| with gr.Row(): | |
| with gr.Column(): | |
| gr.Markdown("### Select Type Scale Option") | |
| type_scale_radio = gr.Radio( | |
| choices=["Keep Current", "Scale 1.2 (Minor Third)", "Scale 1.25 (Major Third) ⭐", "Scale 1.333 (Perfect Fourth)"], | |
| value="Scale 1.25 (Major Third) ⭐", | |
| label="Type Scale", | |
| interactive=True, | |
| ) | |
| gr.Markdown("*Font family will be preserved. Sizes rounded to even numbers.*") | |
| # ============================================================= | |
| # COLORS SECTION - Base Colors + Ramps + LLM Recommendations | |
| # ============================================================= | |
| gr.Markdown("---") | |
| gr.Markdown("## 🎨 Colors") | |
| # LLM Recommendations Section (NEW) | |
| with gr.Accordion("🤖 LLM Color Recommendations", open=True): | |
| gr.Markdown(""" | |
| *The LLMs analyzed your colors and made these suggestions. Accept or reject each one.* | |
| """) | |
| llm_color_recommendations = gr.HTML( | |
| value="<div style='padding: 20px; background: #f5f5f5; border-radius: 8px; color: #666;'>LLM recommendations will appear after analysis...</div>", | |
| label="LLM Recommendations" | |
| ) | |
| # Accept/Reject table for color recommendations | |
| color_recommendations_table = gr.Dataframe( | |
| headers=["Accept", "Role", "Current", "Issue", "Suggested", "Contrast"], | |
| datatype=["bool", "str", "str", "str", "str", "str"], | |
| label="Color Recommendations", | |
| interactive=True, | |
| col_count=(6, "fixed"), | |
| ) | |
| # Visual Preview | |
| with gr.Accordion("👁️ Color Ramps Visual Preview (Semantic Groups)", open=True): | |
| stage2_color_ramps_preview = gr.HTML( | |
| value="<div style='padding: 20px; background: #f5f5f5; border-radius: 8px; color: #666;'>Color ramps preview will appear after analysis...</div>", | |
| label="Color Ramps Preview" | |
| ) | |
| base_colors_display = gr.Markdown("*Base colors will appear after analysis*") | |
| gr.Markdown("---") | |
| color_ramps_display = gr.Markdown("*Color ramps will appear after analysis*") | |
| color_ramps_checkbox = gr.Checkbox( | |
| label="✓ Generate color ramps (keeps base colors, adds 50-950 shades)", | |
| value=True, | |
| ) | |
| # ============================================================= | |
| # SPACING SECTION | |
| # ============================================================= | |
| gr.Markdown("---") | |
| gr.Markdown("## 📏 Spacing (Rule-Based)") | |
| with gr.Row(): | |
| with gr.Column(scale=2): | |
| spacing_comparison = gr.Dataframe( | |
| headers=["Current", "8px Grid", "4px Grid"], | |
| datatype=["str", "str", "str"], | |
| label="Spacing Comparison", | |
| interactive=False, | |
| ) | |
| with gr.Column(scale=1): | |
| spacing_radio = gr.Radio( | |
| choices=["Keep Current", "8px Base Grid ⭐", "4px Base Grid"], | |
| value="8px Base Grid ⭐", | |
| label="Spacing System", | |
| interactive=True, | |
| ) | |
| # ============================================================= | |
| # RADIUS SECTION | |
| # ============================================================= | |
| gr.Markdown("---") | |
| gr.Markdown("## 🔘 Border Radius (Rule-Based)") | |
| radius_display = gr.Markdown("*Radius tokens will appear after analysis*") | |
| # ============================================================= | |
| # SHADOWS SECTION | |
| # ============================================================= | |
| gr.Markdown("---") | |
| gr.Markdown("## 🌫️ Shadows (Rule-Based)") | |
| shadows_display = gr.Markdown("*Shadow tokens will appear after analysis*") | |
| # ============================================================= | |
| # APPLY SECTION | |
| # ============================================================= | |
| gr.Markdown("---") | |
| with gr.Row(): | |
| apply_upgrades_btn = gr.Button("✨ Apply Selected Upgrades", variant="primary", scale=2) | |
| reset_btn = gr.Button("↩️ Reset to Original", variant="secondary", scale=1) | |
| apply_status = gr.Markdown("") | |
| # ================================================================= | |
| # STAGE 3: EXPORT | |
| # ================================================================= | |
| with gr.Accordion("📦 Stage 3: Export", open=False): | |
| gr.Markdown(""" | |
| Export your design tokens to JSON (compatible with Figma Tokens Studio). | |
| - **Stage 1 JSON**: Raw extracted tokens (as-is) | |
| - **Final JSON**: Upgraded tokens with selected improvements | |
| """) | |
| with gr.Row(): | |
| export_stage1_btn = gr.Button("📥 Export Stage 1 (As-Is)", variant="secondary") | |
| export_final_btn = gr.Button("📥 Export Final (Upgraded)", variant="primary") | |
| export_output = gr.Code(label="Tokens JSON", language="json", lines=25) | |
| export_stage1_btn.click(export_stage1_json, outputs=[export_output]) | |
| export_final_btn.click(export_tokens_json, outputs=[export_output]) | |
| # ================================================================= | |
| # EVENT HANDLERS | |
| # ================================================================= | |
| # Store data for viewport toggle | |
| desktop_data = gr.State({}) | |
| mobile_data = gr.State({}) | |
| # Discover pages | |
| discover_btn.click( | |
| fn=discover_pages, | |
| inputs=[url_input], | |
| outputs=[discover_status, log_output, pages_table], | |
| ).then( | |
| fn=lambda: (gr.update(visible=True), gr.update(visible=True)), | |
| outputs=[pages_table, extract_btn], | |
| ) | |
| # Extract tokens | |
| extract_btn.click( | |
| fn=extract_tokens, | |
| inputs=[pages_table], | |
| outputs=[extraction_status, log_output, desktop_data, mobile_data, | |
| stage1_typography_preview, stage1_colors_preview, | |
| stage1_semantic_preview, | |
| stage1_spacing_preview, stage1_radius_preview, stage1_shadows_preview], | |
| ).then( | |
| fn=lambda d: (d.get("colors", []), d.get("typography", []), d.get("spacing", [])), | |
| inputs=[desktop_data], | |
| outputs=[colors_table, typography_table, spacing_table], | |
| ).then( | |
| fn=lambda: gr.update(open=True), | |
| outputs=[stage1_accordion], | |
| ) | |
| # Viewport toggle | |
| viewport_toggle.change( | |
| fn=switch_viewport, | |
| inputs=[viewport_toggle], | |
| outputs=[colors_table, typography_table, spacing_table], | |
| ) | |
| # Stage 2: Analyze | |
| analyze_btn.click( | |
| fn=run_stage2_analysis, | |
| inputs=[competitors_input], | |
| outputs=[stage2_status, stage2_log, brand_comparison, font_families_display, | |
| typography_desktop, typography_mobile, spacing_comparison, | |
| base_colors_display, color_ramps_display, radius_display, shadows_display, | |
| stage2_typography_preview, stage2_color_ramps_preview, | |
| llm_color_recommendations, color_recommendations_table], | |
| ) | |
| # Stage 2: Apply upgrades | |
| apply_upgrades_btn.click( | |
| fn=apply_selected_upgrades, | |
| inputs=[type_scale_radio, spacing_radio, color_ramps_checkbox, color_recommendations_table], | |
| outputs=[apply_status, stage2_log], | |
| ) | |
| # Stage 1: Download JSON | |
| download_stage1_btn.click( | |
| fn=export_stage1_json, | |
| outputs=[export_output], | |
| ) | |
| # Proceed to Stage 2 button | |
| proceed_stage2_btn.click( | |
| fn=lambda: gr.update(open=True), | |
| outputs=[stage2_accordion], | |
| ) | |
| # ================================================================= | |
| # FOOTER | |
| # ================================================================= | |
| gr.Markdown(""" | |
| --- | |
| **Design System Extractor v2** | Built with Playwright + Gradio + LangGraph + HuggingFace | |
| *A semi-automated co-pilot for design system recovery and modernization.* | |
| """) | |
| return app | |
| # ============================================================================= | |
| # MAIN | |
| # ============================================================================= | |
| if __name__ == "__main__": | |
| app = create_ui() | |
| app.launch(server_name="0.0.0.0", server_port=7860) | |