diff --git "a/app.py" "b/app.py"
new file mode 100644--- /dev/null
+++ "b/app.py"
@@ -0,0 +1,3626 @@
+"""
+Design System Extractor v2 β Main Application
+==============================================
+
+Flow:
+1. User enters URL
+2. Agent 1 discovers pages β User confirms
+3. Agent 1 extracts tokens (Desktop + Mobile)
+4. Agent 2 normalizes tokens
+5. Stage 1 UI: User reviews tokens (accept/reject, DesktopβMobile toggle)
+6. Agent 3 proposes upgrades
+7. Stage 2 UI: User selects options with live preview
+8. Agent 4 generates JSON
+9. Stage 3 UI: User exports
+"""
+
+import os
+import asyncio
+import json
+import gradio as gr
+from datetime import datetime
+from typing import Optional
+
+# Get HF token from environment
+HF_TOKEN_FROM_ENV = os.getenv("HF_TOKEN", "")
+
+# =============================================================================
+# GLOBAL STATE
+# =============================================================================
+
+class AppState:
+ """Global application state."""
+ def __init__(self):
+ self.reset()
+
+ def reset(self):
+ self.discovered_pages = []
+ self.base_url = ""
+ self.desktop_raw = None # ExtractedTokens
+ self.mobile_raw = None # ExtractedTokens
+ self.desktop_normalized = None # NormalizedTokens
+ self.mobile_normalized = None # NormalizedTokens
+ self.upgrade_recommendations = None # UpgradeRecommendations
+ self.selected_upgrades = {} # User selections
+ self.logs = []
+
+ def log(self, message: str):
+ timestamp = datetime.now().strftime("%H:%M:%S")
+ self.logs.append(f"[{timestamp}] {message}")
+ if len(self.logs) > 100:
+ self.logs.pop(0)
+
+ def get_logs(self) -> str:
+ return "\n".join(self.logs)
+
+state = AppState()
+
+
+# =============================================================================
+# LAZY IMPORTS
+# =============================================================================
+
+def get_crawler():
+ import agents.crawler
+ return agents.crawler
+
+def get_extractor():
+ import agents.extractor
+ return agents.extractor
+
+def get_normalizer():
+ import agents.normalizer
+ return agents.normalizer
+
+def get_advisor():
+ import agents.advisor
+ return agents.advisor
+
+def get_schema():
+ import core.token_schema
+ return core.token_schema
+
+
+# =============================================================================
+# PHASE 1: DISCOVER PAGES
+# =============================================================================
+
+async def discover_pages(url: str, progress=gr.Progress()):
+ """Discover pages from URL."""
+ state.reset()
+
+ if not url or not url.startswith(("http://", "https://")):
+ return "β Please enter a valid URL", "", None
+
+ state.log(f"π Starting discovery for: {url}")
+ progress(0.1, desc="π Discovering pages...")
+
+ try:
+ crawler = get_crawler()
+ discoverer = crawler.PageDiscoverer()
+
+ pages = await discoverer.discover(url)
+
+ state.discovered_pages = pages
+ state.base_url = url
+
+ state.log(f"β
Found {len(pages)} pages")
+
+ # Format for display
+ pages_data = []
+ for page in pages:
+ pages_data.append([
+ True, # Selected by default
+ page.url,
+ page.title if page.title else "(No title)",
+ page.page_type.value,
+ "β" if not page.error else f"β {page.error}"
+ ])
+
+ progress(1.0, desc="β
Discovery complete!")
+
+ status = f"β
Found {len(pages)} pages. Review and click 'Extract Tokens' to continue."
+
+ return status, state.get_logs(), pages_data
+
+ except Exception as e:
+ import traceback
+ state.log(f"β Error: {str(e)}")
+ return f"β Error: {str(e)}", state.get_logs(), None
+
+
+# =============================================================================
+# PHASE 2: EXTRACT TOKENS
+# =============================================================================
+
+async def extract_tokens(pages_data, progress=gr.Progress()):
+ """Extract tokens from selected pages (both viewports)."""
+
+ state.log(f"π₯ Received pages_data type: {type(pages_data)}")
+
+ if pages_data is None:
+ return "β Please discover pages first", state.get_logs(), None, None
+
+ # Get selected URLs - handle pandas DataFrame
+ selected_urls = []
+
+ try:
+ # Check if it's a pandas DataFrame
+ if hasattr(pages_data, 'iterrows'):
+ state.log(f"π₯ DataFrame with {len(pages_data)} rows, columns: {list(pages_data.columns)}")
+
+ for idx, row in pages_data.iterrows():
+ # Get values by column name or position
+ try:
+ # Try column names first
+ is_selected = row.get('Select', row.iloc[0] if len(row) > 0 else False)
+ url = row.get('URL', row.iloc[1] if len(row) > 1 else '')
+ except:
+ # Fallback to positional
+ is_selected = row.iloc[0] if len(row) > 0 else False
+ url = row.iloc[1] if len(row) > 1 else ''
+
+ if is_selected and url:
+ selected_urls.append(url)
+
+ # If it's a dict (Gradio sometimes sends this)
+ elif isinstance(pages_data, dict):
+ state.log(f"π₯ Dict with keys: {list(pages_data.keys())}")
+ data = pages_data.get('data', [])
+ for row in data:
+ if isinstance(row, (list, tuple)) and len(row) >= 2 and row[0]:
+ selected_urls.append(row[1])
+
+ # If it's a list
+ elif isinstance(pages_data, (list, tuple)):
+ state.log(f"π₯ List with {len(pages_data)} items")
+ for row in pages_data:
+ if isinstance(row, (list, tuple)) and len(row) >= 2 and row[0]:
+ selected_urls.append(row[1])
+
+ except Exception as e:
+ state.log(f"β Error parsing pages_data: {str(e)}")
+ import traceback
+ state.log(traceback.format_exc())
+
+ state.log(f"π Found {len(selected_urls)} selected URLs")
+
+ # If still no URLs, try using stored discovered pages
+ if not selected_urls and state.discovered_pages:
+ state.log("β οΈ No URLs from table, using all discovered pages")
+ selected_urls = [p.url for p in state.discovered_pages if not p.error][:10]
+
+ if not selected_urls:
+ return "β No pages selected. Please select pages or rediscover.", state.get_logs(), None, None
+
+ # Limit to 10 pages for performance
+ selected_urls = selected_urls[:10]
+
+ state.log(f"π Extracting from {len(selected_urls)} pages:")
+ for url in selected_urls[:3]:
+ state.log(f" β’ {url}")
+ if len(selected_urls) > 3:
+ state.log(f" ... and {len(selected_urls) - 3} more")
+
+ progress(0.05, desc="π Starting extraction...")
+
+ try:
+ schema = get_schema()
+ extractor_mod = get_extractor()
+ normalizer_mod = get_normalizer()
+
+ # === DESKTOP EXTRACTION ===
+ state.log("")
+ state.log("=" * 60)
+ state.log("π₯οΈ DESKTOP EXTRACTION (1440px)")
+ state.log("=" * 60)
+ state.log("")
+ state.log("π‘ Enhanced extraction from 7 sources:")
+ state.log(" 1. DOM computed styles (getComputedStyle)")
+ state.log(" 2. CSS variables (:root { --color: })")
+ state.log(" 3. SVG colors (fill, stroke)")
+ state.log(" 4. Inline styles (style='color:')")
+ state.log(" 5. Stylesheet rules (CSS files)")
+ state.log(" 6. External CSS files (fetch & parse)")
+ state.log(" 7. Page content scan (brute-force)")
+ state.log("")
+
+ progress(0.1, desc="π₯οΈ Extracting desktop tokens...")
+
+ desktop_extractor = extractor_mod.TokenExtractor(viewport=schema.Viewport.DESKTOP)
+
+ def desktop_progress(p):
+ progress(0.1 + (p * 0.35), desc=f"π₯οΈ Desktop... {int(p*100)}%")
+
+ state.desktop_raw = await desktop_extractor.extract(selected_urls, progress_callback=desktop_progress)
+
+ # Log extraction details
+ state.log("π EXTRACTION RESULTS:")
+ state.log(f" Colors: {len(state.desktop_raw.colors)} unique")
+ state.log(f" Typography: {len(state.desktop_raw.typography)} styles")
+ state.log(f" Spacing: {len(state.desktop_raw.spacing)} values")
+ state.log(f" Radius: {len(state.desktop_raw.radius)} values")
+ state.log(f" Shadows: {len(state.desktop_raw.shadows)} values")
+
+ # Store foreground-background pairs for real AA checking in Stage 2
+ if hasattr(desktop_extractor, 'fg_bg_pairs') and desktop_extractor.fg_bg_pairs:
+ state.fg_bg_pairs = desktop_extractor.fg_bg_pairs
+ state.log(f" FG/BG Pairs: {len(state.fg_bg_pairs)} unique pairs for AA checking")
+ else:
+ state.fg_bg_pairs = []
+
+ # Log CSS variables if found
+ if hasattr(desktop_extractor, 'css_variables') and desktop_extractor.css_variables:
+ state.log("")
+ state.log(f"π¨ CSS Variables found: {len(desktop_extractor.css_variables)}")
+ for var_name, var_value in list(desktop_extractor.css_variables.items())[:5]:
+ state.log(f" {var_name}: {var_value}")
+ if len(desktop_extractor.css_variables) > 5:
+ state.log(f" ... and {len(desktop_extractor.css_variables) - 5} more")
+
+ # Log warnings if any
+ if desktop_extractor.warnings:
+ state.log("")
+ state.log("β οΈ Warnings:")
+ for w in desktop_extractor.warnings[:3]:
+ state.log(f" {w}")
+
+ # Normalize desktop
+ state.log("")
+ state.log("π Normalizing (deduping, naming)...")
+ state.desktop_normalized = normalizer_mod.normalize_tokens(state.desktop_raw)
+ state.log(f" β
Normalized: {len(state.desktop_normalized.colors)} colors, {len(state.desktop_normalized.typography)} typography, {len(state.desktop_normalized.spacing)} spacing")
+
+ # === MOBILE EXTRACTION ===
+ state.log("")
+ state.log("=" * 60)
+ state.log("π± MOBILE EXTRACTION (375px)")
+ state.log("=" * 60)
+ state.log("")
+
+ progress(0.5, desc="π± Extracting mobile tokens...")
+
+ mobile_extractor = extractor_mod.TokenExtractor(viewport=schema.Viewport.MOBILE)
+
+ def mobile_progress(p):
+ progress(0.5 + (p * 0.35), desc=f"π± Mobile... {int(p*100)}%")
+
+ state.mobile_raw = await mobile_extractor.extract(selected_urls, progress_callback=mobile_progress)
+
+ # Log extraction details
+ state.log("π EXTRACTION RESULTS:")
+ state.log(f" Colors: {len(state.mobile_raw.colors)} unique")
+ state.log(f" Typography: {len(state.mobile_raw.typography)} styles")
+ state.log(f" Spacing: {len(state.mobile_raw.spacing)} values")
+ state.log(f" Radius: {len(state.mobile_raw.radius)} values")
+ state.log(f" Shadows: {len(state.mobile_raw.shadows)} values")
+
+ # Normalize mobile
+ state.log("")
+ state.log("π Normalizing...")
+ state.mobile_normalized = normalizer_mod.normalize_tokens(state.mobile_raw)
+ state.log(f" β
Normalized: {len(state.mobile_normalized.colors)} colors, {len(state.mobile_normalized.typography)} typography, {len(state.mobile_normalized.spacing)} spacing")
+
+ # === FIRECRAWL CSS EXTRACTION (Agent 1B) ===
+ progress(0.88, desc="π₯ Firecrawl CSS analysis...")
+
+ try:
+ from agents.firecrawl_extractor import extract_css_colors
+
+ # Get base URL for Firecrawl
+ base_url = selected_urls[0] if selected_urls else state.base_url
+
+ # Extract CSS colors using Firecrawl
+ firecrawl_result = await extract_css_colors(
+ url=base_url,
+ api_key=None, # Will use fallback method
+ log_callback=state.log
+ )
+
+ # Merge Firecrawl colors into desktop normalized
+ firecrawl_colors = firecrawl_result.get("colors", {})
+
+ if firecrawl_colors:
+ state.log("")
+ state.log("π Merging Firecrawl colors with Playwright extraction...")
+
+ # Count new colors
+ new_colors_count = 0
+
+ for hex_val, color_data in firecrawl_colors.items():
+ # Check if this color already exists
+ existing = False
+ for name, existing_color in state.desktop_normalized.colors.items():
+ if existing_color.value.lower() == hex_val.lower():
+ existing = True
+ # Update frequency
+ existing_color.frequency += color_data.get("frequency", 1)
+ if "firecrawl" not in existing_color.contexts:
+ existing_color.contexts.append("firecrawl")
+ break
+
+ if not existing:
+ # Add new color from Firecrawl
+ from core.token_schema import ColorToken, TokenSource, Confidence
+
+ new_token = ColorToken(
+ value=hex_val,
+ frequency=color_data.get("frequency", 1),
+ contexts=["firecrawl"] + color_data.get("contexts", []),
+ elements=["css-file"],
+ css_properties=color_data.get("sources", []),
+ contrast_white=color_data.get("contrast_white", 0),
+ contrast_black=color_data.get("contrast_black", 0),
+ source=TokenSource.DETECTED,
+ confidence=Confidence.MEDIUM,
+ )
+
+ # Generate name
+ new_token.suggested_name = f"color.firecrawl.{len(state.desktop_normalized.colors)}"
+
+ state.desktop_normalized.colors[hex_val] = new_token
+ new_colors_count += 1
+
+ state.log(f" β
Added {new_colors_count} new colors from Firecrawl")
+ state.log(f" π Total colors now: {len(state.desktop_normalized.colors)}")
+
+ except Exception as e:
+ state.log(f" β οΈ Firecrawl extraction skipped: {str(e)}")
+
+ # === SEMANTIC COLOR ANALYSIS (Agent 1C) ===
+ progress(0.92, desc="π§ Semantic color analysis...")
+
+ semantic_result = {}
+ semantic_preview_html = ""
+
+ try:
+ from agents.semantic_analyzer import SemanticColorAnalyzer, generate_semantic_preview_html
+
+ # Create analyzer (using rule-based for now, can add LLM later)
+ semantic_analyzer = SemanticColorAnalyzer(llm_provider=None)
+
+ # Run analysis
+ semantic_result = semantic_analyzer.analyze_sync(
+ colors=state.desktop_normalized.colors,
+ log_callback=state.log
+ )
+
+ # Store in state for Stage 2
+ state.semantic_analysis = semantic_result
+
+ # Generate preview HTML
+ semantic_preview_html = generate_semantic_preview_html(semantic_result)
+
+ except Exception as e:
+ state.log(f" β οΈ Semantic analysis skipped: {str(e)}")
+ import traceback
+ state.log(traceback.format_exc())
+
+ progress(0.95, desc="π Preparing results...")
+
+ # Format results for Stage 1 UI
+ desktop_data = format_tokens_for_display(state.desktop_normalized)
+ mobile_data = format_tokens_for_display(state.mobile_normalized)
+
+ # Generate visual previews - AS-IS for Stage 1 (no ramps, no enhancements)
+ state.log("")
+ state.log("π¨ Generating AS-IS visual previews...")
+
+ from core.preview_generator import (
+ generate_typography_preview_html,
+ generate_colors_asis_preview_html,
+ generate_spacing_asis_preview_html,
+ generate_radius_asis_preview_html,
+ generate_shadows_asis_preview_html,
+ )
+
+ # Get detected font
+ fonts = get_detected_fonts()
+ primary_font = fonts.get("primary", "Open Sans")
+
+ # Convert typography tokens to dict format for preview
+ typo_dict = {}
+ for name, t in state.desktop_normalized.typography.items():
+ typo_dict[name] = {
+ "font_size": t.font_size,
+ "font_weight": t.font_weight,
+ "line_height": t.line_height or "1.5",
+ "letter_spacing": "0",
+ }
+
+ # Convert color tokens to dict format for preview (with full metadata)
+ color_dict = {}
+ for name, c in state.desktop_normalized.colors.items():
+ color_dict[name] = {
+ "value": c.value,
+ "frequency": c.frequency,
+ "contexts": c.contexts[:3] if c.contexts else [],
+ "elements": c.elements[:3] if c.elements else [],
+ "css_properties": c.css_properties[:3] if c.css_properties else [],
+ "contrast_white": c.contrast_white,
+ "contrast_black": getattr(c, 'contrast_black', 0),
+ }
+
+ # Convert spacing tokens to dict format
+ spacing_dict = {}
+ for name, s in state.desktop_normalized.spacing.items():
+ spacing_dict[name] = {
+ "value": s.value,
+ "value_px": s.value_px,
+ }
+
+ # Convert radius tokens to dict format
+ radius_dict = {}
+ for name, r in state.desktop_normalized.radius.items():
+ radius_dict[name] = {"value": r.value}
+
+ # Convert shadow tokens to dict format
+ shadow_dict = {}
+ for name, s in state.desktop_normalized.shadows.items():
+ shadow_dict[name] = {"value": s.value}
+
+ # Generate AS-IS previews (Stage 1 - raw extracted values)
+ typography_preview_html = generate_typography_preview_html(
+ typography_tokens=typo_dict,
+ font_family=primary_font,
+ sample_text="The quick brown fox jumps over the lazy dog",
+ )
+
+ # AS-IS color preview (no ramps)
+ colors_asis_preview_html = generate_colors_asis_preview_html(
+ color_tokens=color_dict,
+ )
+
+ # AS-IS spacing preview
+ spacing_asis_preview_html = generate_spacing_asis_preview_html(
+ spacing_tokens=spacing_dict,
+ )
+
+ # AS-IS radius preview
+ radius_asis_preview_html = generate_radius_asis_preview_html(
+ radius_tokens=radius_dict,
+ )
+
+ # AS-IS shadows preview
+ shadows_asis_preview_html = generate_shadows_asis_preview_html(
+ shadow_tokens=shadow_dict,
+ )
+
+ state.log(" β
Typography preview generated")
+ state.log(" β
Colors AS-IS preview generated (no ramps)")
+ state.log(" β
Semantic color analysis preview generated")
+ state.log(" β
Spacing AS-IS preview generated")
+ state.log(" β
Radius AS-IS preview generated")
+ state.log(" β
Shadows AS-IS preview generated")
+
+ # Get semantic summary for status
+ brand_count = len(semantic_result.get("brand", {}))
+ text_count = len(semantic_result.get("text", {}))
+ bg_count = len(semantic_result.get("background", {}))
+
+ state.log("")
+ state.log("=" * 50)
+ state.log("β
EXTRACTION COMPLETE!")
+ state.log(f" Enhanced extraction captured:")
+ state.log(f" β’ {len(state.desktop_normalized.colors)} colors (DOM + CSS vars + SVG + inline)")
+ state.log(f" β’ {len(state.desktop_normalized.typography)} typography styles")
+ state.log(f" β’ {len(state.desktop_normalized.spacing)} spacing values")
+ state.log(f" β’ {len(state.desktop_normalized.radius)} radius values")
+ state.log(f" β’ {len(state.desktop_normalized.shadows)} shadow values")
+ state.log(f" Semantic Analysis:")
+ state.log(f" β’ {brand_count} brand colors identified")
+ state.log(f" β’ {text_count} text colors identified")
+ state.log(f" β’ {bg_count} background colors identified")
+ state.log("=" * 50)
+
+ progress(1.0, desc="β
Complete!")
+
+ status = f"""## β
Extraction Complete!
+
+| Viewport | Colors | Typography | Spacing | Radius | Shadows |
+|----------|--------|------------|---------|--------|---------|
+| Desktop | {len(state.desktop_normalized.colors)} | {len(state.desktop_normalized.typography)} | {len(state.desktop_normalized.spacing)} | {len(state.desktop_normalized.radius)} | {len(state.desktop_normalized.shadows)} |
+| Mobile | {len(state.mobile_normalized.colors)} | {len(state.mobile_normalized.typography)} | {len(state.mobile_normalized.spacing)} | {len(state.mobile_normalized.radius)} | {len(state.mobile_normalized.shadows)} |
+
+**Primary Font:** {primary_font}
+
+**Semantic Analysis:** {brand_count} brand, {text_count} text, {bg_count} background colors
+
+**Enhanced Extraction:** DOM + CSS Variables + SVG + Inline + Stylesheets + Firecrawl
+
+**Next:** Review the tokens below. Accept or reject, then proceed to Stage 2.
+"""
+
+ # Return all AS-IS previews including semantic
+ return (
+ status,
+ state.get_logs(),
+ desktop_data,
+ mobile_data,
+ typography_preview_html,
+ colors_asis_preview_html,
+ semantic_preview_html,
+ spacing_asis_preview_html,
+ radius_asis_preview_html,
+ shadows_asis_preview_html,
+ )
+
+ except Exception as e:
+ import traceback
+ state.log(f"β Error: {str(e)}")
+ state.log(traceback.format_exc())
+ return f"β Error: {str(e)}", state.get_logs(), None, None, "", "", "", "", "", ""
+
+
+def format_tokens_for_display(normalized) -> dict:
+ """Format normalized tokens for Gradio display."""
+ if normalized is None:
+ return {"colors": [], "typography": [], "spacing": []}
+
+ # Colors are now a dict
+ colors = []
+ color_items = list(normalized.colors.values()) if isinstance(normalized.colors, dict) else normalized.colors
+ for c in sorted(color_items, key=lambda x: -x.frequency)[:50]:
+ colors.append([
+ True, # Accept checkbox
+ c.value,
+ c.suggested_name or "",
+ c.frequency,
+ c.confidence.value if c.confidence else "medium",
+ f"{c.contrast_white:.1f}:1" if c.contrast_white else "N/A",
+ "β" if c.wcag_aa_small_text else "β",
+ ", ".join(c.contexts[:2]) if c.contexts else "",
+ ])
+
+ # Typography
+ typography = []
+ typo_items = list(normalized.typography.values()) if isinstance(normalized.typography, dict) else normalized.typography
+ for t in sorted(typo_items, key=lambda x: -x.frequency)[:30]:
+ typography.append([
+ True, # Accept checkbox
+ t.font_family,
+ t.font_size,
+ str(t.font_weight),
+ t.line_height or "",
+ t.suggested_name or "",
+ t.frequency,
+ t.confidence.value if t.confidence else "medium",
+ ])
+
+ # Spacing
+ spacing = []
+ spacing_items = list(normalized.spacing.values()) if isinstance(normalized.spacing, dict) else normalized.spacing
+ for s in sorted(spacing_items, key=lambda x: x.value_px)[:20]:
+ spacing.append([
+ True, # Accept checkbox
+ s.value,
+ f"{s.value_px}px",
+ s.suggested_name or "",
+ s.frequency,
+ "β" if s.fits_base_8 else "",
+ s.confidence.value if s.confidence else "medium",
+ ])
+
+ return {
+ "colors": colors,
+ "typography": typography,
+ "spacing": spacing,
+ }
+
+
+def switch_viewport(viewport: str):
+ """Switch between desktop and mobile view."""
+ if viewport == "Desktop (1440px)":
+ data = format_tokens_for_display(state.desktop_normalized)
+ else:
+ data = format_tokens_for_display(state.mobile_normalized)
+
+ return data["colors"], data["typography"], data["spacing"]
+
+
+# =============================================================================
+# STAGE 2: AI ANALYSIS (Multi-Agent)
+# =============================================================================
+
+async def run_stage2_analysis(competitors_str: str = "", progress=gr.Progress()):
+ """Run multi-agent analysis on extracted tokens."""
+
+ if not state.desktop_normalized or not state.mobile_normalized:
+ return ("β Please complete Stage 1 first", "", "", "", None, None, None, "", "", "", "")
+
+ # Parse competitors from input
+ default_competitors = [
+ "Material Design 3",
+ "Apple Human Interface Guidelines",
+ "Shopify Polaris",
+ "IBM Carbon",
+ "Atlassian Design System"
+ ]
+
+ if competitors_str and competitors_str.strip():
+ competitors = [c.strip() for c in competitors_str.split(",") if c.strip()]
+ else:
+ competitors = default_competitors
+
+ progress(0.05, desc="π€ Initializing multi-agent analysis...")
+
+ try:
+ # Import the multi-agent workflow
+ from agents.stage2_graph import run_stage2_multi_agent
+
+ # Convert normalized tokens to dict for the workflow
+ desktop_dict = normalized_to_dict(state.desktop_normalized)
+ mobile_dict = normalized_to_dict(state.mobile_normalized)
+
+ # Run multi-agent analysis with semantic context
+ progress(0.1, desc="π Running parallel LLM analysis...")
+
+ result = await run_stage2_multi_agent(
+ desktop_tokens=desktop_dict,
+ mobile_tokens=mobile_dict,
+ competitors=competitors,
+ log_callback=state.log,
+ semantic_analysis=getattr(state, 'semantic_analysis', None), # Pass semantic context!
+ )
+
+ progress(0.8, desc="π Processing results...")
+
+ # Extract results
+ final_recs = result.get("final_recommendations", {})
+ llm1_analysis = result.get("llm1_analysis", {})
+ llm2_analysis = result.get("llm2_analysis", {})
+ rule_calculations = result.get("rule_calculations", {})
+ cost_tracking = result.get("cost_tracking", {})
+
+ # Store for later use
+ state.upgrade_recommendations = final_recs
+ state.multi_agent_result = result
+
+ # Get font info
+ fonts = get_detected_fonts()
+ base_size = get_base_font_size()
+
+ progress(0.9, desc="π Formatting results...")
+
+ # Build status markdown
+ status = build_analysis_status(final_recs, cost_tracking, result.get("errors", []))
+
+ # Format brand/competitor comparison from LLM analyses
+ brand_md = format_multi_agent_comparison(llm1_analysis, llm2_analysis, final_recs)
+
+ # Format font families display
+ font_families_md = format_font_families_display(fonts)
+
+ # Format typography with BOTH desktop and mobile
+ typography_desktop_data = format_typography_comparison_viewport(
+ state.desktop_normalized, base_size, "desktop"
+ )
+ typography_mobile_data = format_typography_comparison_viewport(
+ state.mobile_normalized, base_size, "mobile"
+ )
+
+ # Format spacing comparison table
+ spacing_data = format_spacing_comparison_from_rules(rule_calculations)
+
+ # Format color display: BASE colors + ramps separately
+ base_colors_md = format_base_colors()
+ color_ramps_md = format_color_ramps_from_rules(rule_calculations)
+
+ # Format radius display (with token suggestions)
+ radius_md = format_radius_with_tokens()
+
+ # Format shadows display (with token suggestions)
+ shadows_md = format_shadows_with_tokens()
+
+ # Generate visual previews for Stage 2
+ state.log("")
+ state.log("π¨ Generating visual previews...")
+
+ from core.preview_generator import (
+ generate_typography_preview_html,
+ generate_color_ramps_preview_html,
+ generate_semantic_color_ramps_html
+ )
+
+ primary_font = fonts.get("primary", "Open Sans")
+
+ # Convert typography tokens to dict format for preview
+ typo_dict = {}
+ for name, t in state.desktop_normalized.typography.items():
+ typo_dict[name] = {
+ "font_size": t.font_size,
+ "font_weight": t.font_weight,
+ "line_height": t.line_height or "1.5",
+ "letter_spacing": "0",
+ }
+
+ # Convert color tokens to dict format for preview (with frequency for sorting)
+ color_dict = {}
+ for name, c in state.desktop_normalized.colors.items():
+ color_dict[name] = {
+ "value": c.value,
+ "frequency": c.frequency,
+ }
+
+ typography_preview_html = generate_typography_preview_html(
+ typography_tokens=typo_dict,
+ font_family=primary_font,
+ sample_text="The quick brown fox jumps over the lazy dog",
+ )
+
+ # Use semantic color ramps if available, otherwise fallback to regular
+ semantic_analysis = getattr(state, 'semantic_analysis', None)
+ if semantic_analysis:
+ # Extract LLM color recommendations
+ llm_color_recs = {}
+ if final_recs and isinstance(final_recs, dict):
+ llm_color_recs = final_recs.get("color_recommendations", {})
+ # Also add accessibility fixes
+ aa_fixes = final_recs.get("accessibility_fixes", [])
+ if aa_fixes:
+ llm_color_recs["changes_made"] = [
+ f"AA fix suggested for {f.get('color', '?')}"
+ for f in aa_fixes if isinstance(f, dict)
+ ][:5]
+
+ color_ramps_preview_html = generate_semantic_color_ramps_html(
+ semantic_analysis=semantic_analysis,
+ color_tokens=color_dict,
+ llm_recommendations={"color_recommendations": llm_color_recs} if llm_color_recs else None,
+ )
+ state.log(" β
Semantic color ramps preview generated (with LLM recommendations)")
+ else:
+ color_ramps_preview_html = generate_color_ramps_preview_html(
+ color_tokens=color_dict,
+ )
+ state.log(" β
Color ramps preview generated (no semantic data)")
+
+ state.log(" β
Typography preview generated")
+
+ # Generate LLM recommendations display
+ llm_recs_html = format_llm_color_recommendations_html(final_recs, semantic_analysis)
+ llm_recs_table = format_llm_color_recommendations_table(final_recs, semantic_analysis)
+
+ state.log(" β
LLM recommendations formatted")
+
+ progress(1.0, desc="β
Analysis complete!")
+
+ return (status, state.get_logs(), brand_md, font_families_md,
+ typography_desktop_data, typography_mobile_data, spacing_data,
+ base_colors_md, color_ramps_md, radius_md, shadows_md,
+ typography_preview_html, color_ramps_preview_html,
+ llm_recs_html, llm_recs_table)
+
+ except Exception as e:
+ import traceback
+ state.log(f"β Error: {str(e)}")
+ state.log(traceback.format_exc())
+ return (f"β Analysis failed: {str(e)}", state.get_logs(), "", "", None, None, None, "", "", "", "", "", "", "", [])
+
+
+def normalized_to_dict(normalized) -> dict:
+ """Convert NormalizedTokens to dict for workflow."""
+ if not normalized:
+ return {}
+
+ result = {
+ "colors": {},
+ "typography": {},
+ "spacing": {},
+ "radius": {},
+ "shadows": {},
+ }
+
+ # Colors
+ for name, c in normalized.colors.items():
+ result["colors"][name] = {
+ "value": c.value,
+ "frequency": c.frequency,
+ "suggested_name": c.suggested_name,
+ "contrast_white": c.contrast_white,
+ "contrast_black": c.contrast_black,
+ }
+
+ # Typography
+ for name, t in normalized.typography.items():
+ result["typography"][name] = {
+ "font_family": t.font_family,
+ "font_size": t.font_size,
+ "font_weight": t.font_weight,
+ "line_height": t.line_height,
+ "frequency": t.frequency,
+ }
+
+ # Spacing
+ for name, s in normalized.spacing.items():
+ result["spacing"][name] = {
+ "value": s.value,
+ "value_px": s.value_px,
+ "frequency": s.frequency,
+ }
+
+ # Radius
+ for name, r in normalized.radius.items():
+ result["radius"][name] = {
+ "value": r.value,
+ "frequency": r.frequency,
+ }
+
+ # Shadows
+ for name, s in normalized.shadows.items():
+ result["shadows"][name] = {
+ "value": s.value,
+ "frequency": s.frequency,
+ }
+
+ return result
+
+
+# =============================================================================
+# STAGE 2: NEW ARCHITECTURE (Rule Engine + Benchmark Research + LLM Agents)
+# =============================================================================
+
+async def run_stage2_analysis_v2(
+ selected_benchmarks: list[str] = None,
+ progress=gr.Progress()
+):
+ """
+ Run Stage 2 analysis with new architecture:
+ - Layer 1: Rule Engine (FREE)
+ - Layer 2: Benchmark Research (Firecrawl + Cache)
+ - Layer 3: LLM Agents (Brand ID, Benchmark Advisor, Best Practices)
+ - Layer 4: HEAD Synthesizer
+
+ Includes comprehensive error handling for graceful degradation.
+ """
+
+ # Validate Stage 1 completion
+ if not state.desktop_normalized or not state.mobile_normalized:
+ return create_stage2_error_response("β Please complete Stage 1 first")
+
+ # Default benchmarks if none selected
+ if not selected_benchmarks or len(selected_benchmarks) == 0:
+ selected_benchmarks = [
+ "material_design_3",
+ "shopify_polaris",
+ "atlassian_design",
+ ]
+
+ state.log("")
+ state.log("β" * 60)
+ state.log("π STAGE 2: MULTI-AGENT ANALYSIS")
+ state.log("β" * 60)
+ state.log(f" Started: {datetime.now().strftime('%H:%M:%S')}")
+ state.log(f" Benchmarks: {', '.join(selected_benchmarks)}")
+ state.log("")
+
+ # Initialize results with defaults (for graceful degradation)
+ rule_results = None
+ benchmark_comparisons = []
+ brand_result = None
+ benchmark_advice = None
+ best_practices = None
+ final_synthesis = None
+
+ progress(0.05, desc="βοΈ Running Rule Engine...")
+
+ try:
+ # =================================================================
+ # LAYER 1: RULE ENGINE (FREE) - Critical, must succeed
+ # =================================================================
+ try:
+ from core.rule_engine import run_rule_engine
+
+ # Convert tokens to dict
+ desktop_dict = normalized_to_dict(state.desktop_normalized)
+ mobile_dict = normalized_to_dict(state.mobile_normalized)
+
+ # Validate we have data
+ if not desktop_dict.get("colors") and not desktop_dict.get("typography"):
+ raise ValueError("No tokens extracted from Stage 1")
+
+ # Run rule engine
+ rule_results = run_rule_engine(
+ typography_tokens=desktop_dict.get("typography", {}),
+ color_tokens=desktop_dict.get("colors", {}),
+ spacing_tokens=desktop_dict.get("spacing", {}),
+ radius_tokens=desktop_dict.get("radius", {}),
+ shadow_tokens=desktop_dict.get("shadows", {}),
+ log_callback=state.log,
+ fg_bg_pairs=getattr(state, 'fg_bg_pairs', None),
+ )
+
+ state.rule_engine_results = rule_results
+ state.log("")
+ state.log(" β
Rule Engine: SUCCESS")
+
+ except Exception as e:
+ state.log(f" β Rule Engine FAILED: {str(e)[:100]}")
+ state.log(" ββ Cannot proceed without rule engine results")
+ import traceback
+ state.log(traceback.format_exc()[:500])
+ return create_stage2_error_response(f"β Rule Engine failed: {str(e)}")
+
+ progress(0.20, desc="π¬ Researching benchmarks...")
+
+ # =================================================================
+ # LAYER 2: BENCHMARK RESEARCH - Can use fallback
+ # =================================================================
+ try:
+ from agents.benchmark_researcher import BenchmarkResearcher, FALLBACK_BENCHMARKS, BenchmarkData
+
+ # Try to get Firecrawl client (optional)
+ firecrawl_client = None
+ try:
+ from agents.firecrawl_extractor import get_firecrawl_client
+ firecrawl_client = get_firecrawl_client()
+ state.log(" ββ Firecrawl client: Available")
+ except Exception as fc_err:
+ state.log(f" ββ Firecrawl client: Not available ({str(fc_err)[:30]})")
+ state.log(" β ββ Will use cached/fallback data")
+
+ # Get HF client for LLM extraction (optional)
+ hf_client = None
+ try:
+ from core.hf_inference import get_inference_client
+ hf_client = get_inference_client()
+ state.log(" ββ HF client: Available")
+ except Exception as hf_err:
+ state.log(f" ββ HF client: Not available ({str(hf_err)[:30]})")
+
+ researcher = BenchmarkResearcher(
+ firecrawl_client=firecrawl_client,
+ hf_client=hf_client,
+ )
+
+ # Research selected benchmarks (with fallback)
+ try:
+ benchmarks = await researcher.research_selected_benchmarks(
+ selected_keys=selected_benchmarks,
+ log_callback=state.log,
+ )
+ except Exception as research_err:
+ state.log(f" β οΈ Research failed, using fallback: {str(research_err)[:50]}")
+ # Use fallback data
+ benchmarks = []
+ for key in selected_benchmarks:
+ if key in FALLBACK_BENCHMARKS:
+ data = FALLBACK_BENCHMARKS[key]
+ benchmarks.append(BenchmarkData(
+ key=key,
+ name=key.replace("_", " ").title(),
+ short_name=key.split("_")[0].title(),
+ vendor="",
+ icon="π¦",
+ typography=data.get("typography", {}),
+ spacing=data.get("spacing", {}),
+ colors=data.get("colors", {}),
+ fetched_at=datetime.now().isoformat(),
+ confidence="fallback",
+ best_for=[],
+ ))
+
+ # Compare to benchmarks
+ if benchmarks and rule_results:
+ benchmark_comparisons = researcher.compare_to_benchmarks(
+ your_ratio=rule_results.typography.detected_ratio,
+ your_base_size=int(rule_results.typography.base_size) if rule_results.typography.sizes_px else 16,
+ your_spacing_grid=rule_results.spacing.detected_base,
+ benchmarks=benchmarks,
+ log_callback=state.log,
+ )
+ state.benchmark_comparisons = benchmark_comparisons
+ state.log("")
+ state.log(f" β
Benchmark Research: SUCCESS ({len(benchmarks)} systems)")
+ else:
+ state.log(" β οΈ No benchmarks available for comparison")
+
+ except Exception as e:
+ state.log(f" β οΈ Benchmark Research FAILED: {str(e)[:100]}")
+ state.log(" ββ Continuing without benchmark comparison...")
+ benchmark_comparisons = []
+
+ progress(0.40, desc="π€ Running LLM Agents...")
+
+ # =================================================================
+ # LAYER 3: LLM AGENTS - Can fail gracefully
+ # =================================================================
+ try:
+ from agents.llm_agents import (
+ BrandIdentifierAgent,
+ BenchmarkAdvisorAgent,
+ BestPracticesValidatorAgent,
+ BrandIdentification,
+ BenchmarkAdvice,
+ BestPracticesResult,
+ )
+
+ state.log("")
+ state.log("β" * 60)
+ state.log("π€ LAYER 3: LLM ANALYSIS")
+ state.log("β" * 60)
+
+ # Check if HF client is available
+ if not hf_client:
+ try:
+ from core.hf_inference import get_inference_client
+ hf_client = get_inference_client()
+ except Exception:
+ state.log(" β οΈ HF client not available - skipping LLM agents")
+ hf_client = None
+
+ if hf_client:
+ # Initialize agents
+ brand_agent = BrandIdentifierAgent(hf_client)
+ benchmark_agent = BenchmarkAdvisorAgent(hf_client)
+ best_practices_agent = BestPracticesValidatorAgent(hf_client)
+
+ # Get semantic analysis from Stage 1
+ semantic_analysis = getattr(state, 'semantic_analysis', {})
+ desktop_dict = normalized_to_dict(state.desktop_normalized)
+
+ # Run agents (with individual error handling)
+ # Brand Identifier
+ try:
+ brand_result = await brand_agent.analyze(
+ color_tokens=desktop_dict.get("colors", {}),
+ semantic_analysis=semantic_analysis,
+ log_callback=state.log,
+ )
+ except Exception as e:
+ state.log(f" β οΈ Brand Identifier failed: {str(e)[:50]}")
+ brand_result = BrandIdentification()
+
+ # Benchmark Advisor
+ if benchmark_comparisons:
+ try:
+ benchmark_advice = await benchmark_agent.analyze(
+ user_ratio=rule_results.typography.detected_ratio,
+ user_base=int(rule_results.typography.base_size) if rule_results.typography.sizes_px else 16,
+ user_spacing=rule_results.spacing.detected_base,
+ benchmark_comparisons=benchmark_comparisons,
+ log_callback=state.log,
+ )
+ except Exception as e:
+ state.log(f" β οΈ Benchmark Advisor failed: {str(e)[:50]}")
+ benchmark_advice = BenchmarkAdvice()
+ else:
+ benchmark_advice = BenchmarkAdvice()
+
+ # Best Practices Validator
+ try:
+ best_practices = await best_practices_agent.analyze(
+ rule_engine_results=rule_results,
+ log_callback=state.log,
+ )
+ except Exception as e:
+ state.log(f" β οΈ Best Practices Validator failed: {str(e)[:50]}")
+ best_practices = BestPracticesResult(overall_score=rule_results.consistency_score)
+ else:
+ # No HF client - use defaults
+ state.log(" ββ Using default values (no LLM)")
+ brand_result = BrandIdentification()
+ benchmark_advice = BenchmarkAdvice()
+ best_practices = BestPracticesResult(overall_score=rule_results.consistency_score)
+
+ except Exception as e:
+ state.log(f" β οΈ LLM Agents FAILED: {str(e)[:100]}")
+ brand_result = BrandIdentification() if not brand_result else brand_result
+ benchmark_advice = BenchmarkAdvice() if not benchmark_advice else benchmark_advice
+ best_practices = BestPracticesResult(overall_score=rule_results.consistency_score if rule_results else 50)
+
+ progress(0.70, desc="π§ Synthesizing results...")
+
+ # =================================================================
+ # LAYER 4: HEAD SYNTHESIZER - Can use fallback
+ # =================================================================
+ try:
+ from agents.llm_agents import HeadSynthesizerAgent, HeadSynthesis
+
+ if hf_client and brand_result and benchmark_advice and best_practices:
+ head_agent = HeadSynthesizerAgent(hf_client)
+
+ try:
+ final_synthesis = await head_agent.synthesize(
+ rule_engine_results=rule_results,
+ benchmark_comparisons=benchmark_comparisons,
+ brand_identification=brand_result,
+ benchmark_advice=benchmark_advice,
+ best_practices=best_practices,
+ log_callback=state.log,
+ )
+ except Exception as e:
+ state.log(f" β οΈ HEAD Synthesizer failed: {str(e)[:50]}")
+ final_synthesis = None
+
+ # Create fallback synthesis if needed
+ if not final_synthesis:
+ state.log(" ββ Creating fallback synthesis...")
+ final_synthesis = create_fallback_synthesis(
+ rule_results, benchmark_comparisons, brand_result, best_practices
+ )
+
+ state.final_synthesis = final_synthesis
+
+ except Exception as e:
+ state.log(f" β οΈ Synthesis FAILED: {str(e)[:100]}")
+ final_synthesis = create_fallback_synthesis(
+ rule_results, benchmark_comparisons, brand_result, best_practices
+ )
+ state.final_synthesis = final_synthesis
+
+ progress(0.85, desc="π Formatting results...")
+
+ # =================================================================
+ # FORMAT OUTPUTS FOR UI
+ # =================================================================
+
+ try:
+ # Build status markdown
+ status_md = format_stage2_status_v2(
+ rule_results=rule_results,
+ final_synthesis=final_synthesis,
+ best_practices=best_practices,
+ )
+
+ # Build benchmark comparison HTML
+ benchmark_md = format_benchmark_comparison_v2(
+ benchmark_comparisons=benchmark_comparisons,
+ benchmark_advice=benchmark_advice,
+ )
+
+ # Build scores dashboard HTML
+ scores_html = format_scores_dashboard_v2(
+ rule_results=rule_results,
+ final_synthesis=final_synthesis,
+ best_practices=best_practices,
+ )
+
+ # Build priority actions HTML
+ actions_html = format_priority_actions_v2(
+ rule_results=rule_results,
+ final_synthesis=final_synthesis,
+ best_practices=best_practices,
+ )
+
+ # Build color recommendations table
+ color_recs_table = format_color_recommendations_table_v2(
+ rule_results=rule_results,
+ brand_result=brand_result,
+ final_synthesis=final_synthesis,
+ )
+
+ # Get fonts and typography data
+ fonts = get_detected_fonts()
+ base_size = get_base_font_size()
+
+ typography_desktop_data = format_typography_comparison_viewport(
+ state.desktop_normalized, base_size, "desktop"
+ )
+ typography_mobile_data = format_typography_comparison_viewport(
+ state.mobile_normalized, base_size, "mobile"
+ )
+
+ # Generate visual previews
+ typography_preview_html = ""
+ try:
+ from core.preview_generator import generate_typography_preview_html
+
+ primary_font = fonts.get("primary", "Open Sans")
+ desktop_typo_dict = {
+ name: {
+ "font_size": t.font_size,
+ "font_weight": t.font_weight,
+ "line_height": t.line_height,
+ }
+ for name, t in state.desktop_normalized.typography.items()
+ }
+ typography_preview_html = generate_typography_preview_html(desktop_typo_dict, primary_font)
+ except Exception as preview_err:
+ state.log(f" β οΈ Preview generation failed: {str(preview_err)[:50]}")
+ typography_preview_html = "
Preview unavailable
"
+
+ except Exception as format_err:
+ state.log(f" β οΈ Formatting failed: {str(format_err)[:100]}")
+ # Return minimal results
+ return (
+ f"β οΈ Analysis completed with formatting errors: {str(format_err)[:50]}",
+ state.get_logs(),
+ "*Benchmark comparison unavailable*",
+ "Scores unavailable
",
+ "Actions unavailable
",
+ [],
+ None,
+ None,
+ "",
+ )
+
+ progress(0.95, desc="β
Complete!")
+
+ # Final log summary
+ state.log("")
+ state.log("β" * 60)
+ state.log("π FINAL RESULTS")
+ state.log("β" * 60)
+ state.log("")
+ overall_score = final_synthesis.scores.get('overall', rule_results.consistency_score) if final_synthesis else rule_results.consistency_score
+ state.log(f" π― OVERALL SCORE: {overall_score}/100")
+ if final_synthesis and final_synthesis.scores:
+ state.log(f" ββ Accessibility: {final_synthesis.scores.get('accessibility', '?')}/100")
+ state.log(f" ββ Consistency: {final_synthesis.scores.get('consistency', '?')}/100")
+ state.log(f" ββ Organization: {final_synthesis.scores.get('organization', '?')}/100")
+ state.log("")
+ if benchmark_comparisons:
+ state.log(f" π Closest Benchmark: {benchmark_comparisons[0].benchmark.name if benchmark_comparisons else 'N/A'}")
+ state.log("")
+ state.log(" π― TOP 3 ACTIONS:")
+ if final_synthesis and final_synthesis.top_3_actions:
+ for i, action in enumerate(final_synthesis.top_3_actions[:3]):
+ impact = action.get('impact', 'medium')
+ icon = "π΄" if impact == "high" else "π‘" if impact == "medium" else "π’"
+ state.log(f" β {i+1}. {icon} {action.get('action', 'N/A')}")
+ else:
+ state.log(f" β 1. π΄ Fix {rule_results.aa_failures} AA compliance failures")
+ state.log("")
+ state.log("β" * 60)
+ state.log(f" π° TOTAL COST: ~$0.003")
+ state.log(f" β±οΈ COMPLETED: {datetime.now().strftime('%H:%M:%S')}")
+ state.log("β" * 60)
+
+ return (
+ status_md,
+ state.get_logs(),
+ benchmark_md,
+ scores_html,
+ actions_html,
+ color_recs_table,
+ typography_desktop_data,
+ typography_mobile_data,
+ typography_preview_html,
+ )
+
+ except Exception as e:
+ import traceback
+ state.log(f"β Critical Error: {str(e)}")
+ state.log(traceback.format_exc())
+ return create_stage2_error_response(f"β Analysis failed: {str(e)}")
+
+
+def create_fallback_synthesis(rule_results, benchmark_comparisons, brand_result, best_practices):
+ """Create a fallback synthesis when LLM synthesis fails."""
+ from agents.llm_agents import HeadSynthesis
+
+ # Calculate scores from rule engine
+ overall = rule_results.consistency_score if rule_results else 50
+ accessibility = max(0, 100 - (rule_results.aa_failures * 10)) if rule_results else 50
+
+ # Build actions from rule engine
+ actions = []
+ if rule_results and rule_results.aa_failures > 0:
+ actions.append({
+ "action": f"Fix {rule_results.aa_failures} colors failing AA compliance",
+ "impact": "high",
+ "effort": "30 min",
+ })
+ if rule_results and not rule_results.typography.is_consistent:
+ actions.append({
+ "action": f"Align type scale to {rule_results.typography.recommendation} ({rule_results.typography.recommendation_name})",
+ "impact": "medium",
+ "effort": "1 hour",
+ })
+ if rule_results and rule_results.color_stats.unique_count > 30:
+ actions.append({
+ "action": f"Consolidate {rule_results.color_stats.unique_count} colors to ~15 semantic colors",
+ "impact": "medium",
+ "effort": "2 hours",
+ })
+
+ return HeadSynthesis(
+ executive_summary=f"Your design system scores {overall}/100. Analysis completed with fallback synthesis.",
+ scores={
+ "overall": overall,
+ "accessibility": accessibility,
+ "consistency": overall,
+ "organization": 50,
+ },
+ benchmark_fit={
+ "closest": benchmark_comparisons[0].benchmark.name if benchmark_comparisons else "Unknown",
+ "similarity": f"{benchmark_comparisons[0].overall_match_pct:.0f}%" if benchmark_comparisons else "N/A",
+ },
+ brand_analysis={
+ "primary": brand_result.brand_primary.get("color", "Unknown") if brand_result else "Unknown",
+ "cohesion": brand_result.cohesion_score if brand_result else 5,
+ },
+ top_3_actions=actions[:3],
+ color_recommendations=[],
+ type_scale_recommendation={
+ "current_ratio": rule_results.typography.detected_ratio if rule_results else 1.0,
+ "recommended_ratio": rule_results.typography.recommendation if rule_results else 1.25,
+ },
+ spacing_recommendation={
+ "current": f"{rule_results.spacing.detected_base}px" if rule_results else "Unknown",
+ "recommended": f"{rule_results.spacing.recommendation}px" if rule_results else "8px",
+ },
+ )
+
+
+def create_stage2_error_response(error_msg: str):
+ """Create error response tuple for Stage 2."""
+ return (
+ error_msg,
+ state.get_logs(),
+ "", # benchmark_md
+ "", # scores_html
+ "", # actions_html
+ [], # color_recs_table
+ None, # typography_desktop
+ None, # typography_mobile
+ "", # typography_preview
+ )
+
+
+def format_stage2_status_v2(rule_results, final_synthesis, best_practices) -> str:
+ """Format Stage 2 status with new architecture results."""
+
+ lines = []
+ lines.append("## β
Analysis Complete!")
+ lines.append("")
+
+ # Overall Score
+ overall = final_synthesis.scores.get('overall', rule_results.consistency_score)
+ lines.append(f"### π― Overall Score: {overall}/100")
+ lines.append("")
+
+ # Executive Summary
+ if final_synthesis.executive_summary:
+ lines.append(f"*{final_synthesis.executive_summary}*")
+ lines.append("")
+
+ # Quick Stats
+ lines.append("### π Quick Stats")
+ lines.append(f"- **AA Failures:** {rule_results.aa_failures}")
+ lines.append(f"- **Type Scale:** {rule_results.typography.detected_ratio:.3f} ({rule_results.typography.scale_name})")
+ lines.append(f"- **Spacing Grid:** {rule_results.spacing.detected_base}px ({rule_results.spacing.alignment_percentage:.0f}% aligned)")
+ lines.append(f"- **Unique Colors:** {rule_results.color_stats.unique_count}")
+ lines.append("")
+
+ # Cost
+ lines.append("### π° Cost")
+ lines.append("**Total:** ~$0.003 (Rule Engine: $0 + LLM: ~$0.003)")
+
+ return "\n".join(lines)
+
+
+def format_benchmark_comparison_v2(benchmark_comparisons, benchmark_advice) -> str:
+ """Format benchmark comparison results."""
+
+ if not benchmark_comparisons:
+ return "*No benchmark comparison available*"
+
+ lines = []
+ lines.append("## π Benchmark Comparison")
+ lines.append("")
+
+ # Recommended benchmark
+ if benchmark_advice and benchmark_advice.recommended_benchmark_name:
+ lines.append(f"### π Recommended: {benchmark_advice.recommended_benchmark_name}")
+ if benchmark_advice.reasoning:
+ lines.append(f"*{benchmark_advice.reasoning[:200]}*")
+ lines.append("")
+
+ # Comparison table
+ lines.append("### π Similarity Ranking")
+ lines.append("")
+ lines.append("| Rank | Design System | Match | Type Ratio | Base | Grid |")
+ lines.append("|------|---------------|-------|------------|------|------|")
+
+ medals = ["π₯", "π₯", "π₯"]
+ for i, c in enumerate(benchmark_comparisons[:5]):
+ medal = medals[i] if i < 3 else str(i+1)
+ b = c.benchmark
+ lines.append(
+ f"| {medal} | {b.icon} {b.short_name} | {c.overall_match_pct:.0f}% | "
+ f"{b.typography.get('scale_ratio', '?')} | {b.typography.get('base_size', '?')}px | "
+ f"{b.spacing.get('base', '?')}px |"
+ )
+
+ lines.append("")
+
+ # Alignment changes needed
+ if benchmark_advice and benchmark_advice.alignment_changes:
+ lines.append("### π§ Changes to Align")
+ for change in benchmark_advice.alignment_changes[:3]:
+ lines.append(f"- **{change.get('change', '?')}**: {change.get('from', '?')} β {change.get('to', '?')} (effort: {change.get('effort', '?')})")
+
+ return "\n".join(lines)
+
+
+def format_scores_dashboard_v2(rule_results, final_synthesis, best_practices) -> str:
+ """Format scores dashboard HTML."""
+
+ overall = final_synthesis.scores.get('overall', rule_results.consistency_score)
+ accessibility = final_synthesis.scores.get('accessibility', 100 - (rule_results.aa_failures * 5))
+ consistency = final_synthesis.scores.get('consistency', rule_results.consistency_score)
+ organization = final_synthesis.scores.get('organization', 50)
+
+ def score_color(score):
+ if score >= 80:
+ return "#10b981" # Green
+ elif score >= 60:
+ return "#f59e0b" # Yellow
+ else:
+ return "#ef4444" # Red
+
+ html = f"""
+
+
+
+
{accessibility}
+
Accessibility
+
+
+
{consistency}
+
Consistency
+
+
+
{organization}
+
Organization
+
+
+ """
+
+ return html
+
+
+def format_priority_actions_v2(rule_results, final_synthesis, best_practices) -> str:
+ """Format priority actions HTML."""
+
+ actions = final_synthesis.top_3_actions if final_synthesis.top_3_actions else []
+
+ # If no synthesis actions, build from rule engine
+ if not actions and best_practices and best_practices.priority_fixes:
+ actions = best_practices.priority_fixes
+
+ if not actions:
+ # Default actions from rule engine
+ actions = []
+ if rule_results.aa_failures > 0:
+ actions.append({
+ "action": f"Fix {rule_results.aa_failures} colors failing AA compliance",
+ "impact": "high",
+ "effort": "30 min",
+ })
+ if not rule_results.typography.is_consistent:
+ actions.append({
+ "action": f"Align type scale to {rule_results.typography.recommendation} ({rule_results.typography.recommendation_name})",
+ "impact": "medium",
+ "effort": "1 hour",
+ })
+ if rule_results.color_stats.unique_count > 30:
+ actions.append({
+ "action": f"Consolidate {rule_results.color_stats.unique_count} colors to ~15 semantic colors",
+ "impact": "medium",
+ "effort": "2 hours",
+ })
+
+ html_items = []
+ for i, action in enumerate(actions[:3]):
+ impact = action.get('impact', 'medium')
+ border_color = "#ef4444" if impact == "high" else "#f59e0b" if impact == "medium" else "#10b981"
+ impact_bg = "#fee2e2" if impact == "high" else "#fef3c7" if impact == "medium" else "#dcfce7"
+ impact_text = "#991b1b" if impact == "high" else "#92400e" if impact == "medium" else "#166534"
+ icon = "π΄" if impact == "high" else "π‘" if impact == "medium" else "π’"
+
+ html_items.append(f"""
+
+
+
+
+ {icon} {action.get('action', 'N/A')}
+
+
+ {action.get('details', '')}
+
+
+
+
+ {impact.upper()}
+
+
+ {action.get('effort', '?')}
+
+
+
+
+ """)
+
+ return f"""
+
+
π― Priority Actions
+ {''.join(html_items)}
+
+ """
+
+
+def format_color_recommendations_table_v2(rule_results, brand_result, final_synthesis) -> list:
+ """Format color recommendations as table data."""
+
+ rows = []
+
+ # Add AA failures with fixes
+ for a in rule_results.accessibility:
+ if not a.passes_aa_normal and a.suggested_fix:
+ role = "brand.primary" if brand_result and brand_result.brand_primary.get("color") == a.hex_color else a.name
+ rows.append([
+ True, # Accept checkbox
+ role,
+ a.hex_color,
+ f"Fails AA ({a.contrast_on_white:.1f}:1)",
+ a.suggested_fix,
+ f"{a.suggested_fix_contrast:.1f}:1",
+ ])
+
+ # Add recommendations from synthesis
+ if final_synthesis and final_synthesis.color_recommendations:
+ for rec in final_synthesis.color_recommendations:
+ if rec.get("current") != rec.get("suggested"):
+ # Check if not already in rows
+ if not any(r[2] == rec.get("current") for r in rows):
+ rows.append([
+ rec.get("accept", True),
+ rec.get("role", "unknown"),
+ rec.get("current", ""),
+ rec.get("reason", ""),
+ rec.get("suggested", ""),
+ "",
+ ])
+
+ return rows
+
+
+def build_analysis_status(final_recs: dict, cost_tracking: dict, errors: list) -> str:
+ """Build status markdown from analysis results."""
+
+ lines = ["## π§ Multi-Agent Analysis Complete!"]
+ lines.append("")
+
+ # Cost summary
+ if cost_tracking:
+ total_cost = cost_tracking.get("total_cost", 0)
+ lines.append(f"### π° Cost Summary")
+ lines.append(f"**Total estimated cost:** ${total_cost:.4f}")
+ lines.append(f"*(Free tier: $0.10/mo | Pro: $2.00/mo)*")
+ lines.append("")
+
+ # Final recommendations
+ if final_recs and "final_recommendations" in final_recs:
+ recs = final_recs["final_recommendations"]
+ lines.append("### π Recommendations")
+
+ if recs.get("type_scale"):
+ lines.append(f"**Type Scale:** {recs['type_scale']}")
+ if recs.get("type_scale_rationale"):
+ lines.append(f" *{recs['type_scale_rationale'][:100]}*")
+
+ if recs.get("spacing_base"):
+ lines.append(f"**Spacing:** {recs['spacing_base']}")
+
+ lines.append("")
+
+ # Summary
+ if final_recs.get("summary"):
+ lines.append("### π Summary")
+ lines.append(final_recs["summary"])
+ lines.append("")
+
+ # Confidence
+ if final_recs.get("overall_confidence"):
+ lines.append(f"**Confidence:** {final_recs['overall_confidence']}%")
+
+ # Errors
+ if errors:
+ lines.append("")
+ lines.append("### β οΈ Warnings")
+ for err in errors[:3]:
+ lines.append(f"- {err[:100]}")
+
+ return "\n".join(lines)
+
+
+def format_multi_agent_comparison(llm1: dict, llm2: dict, final: dict) -> str:
+ """Format comparison from multi-agent analysis."""
+
+ lines = ["### π Multi-Agent Analysis Comparison"]
+ lines.append("")
+
+ # Agreements
+ if final.get("agreements"):
+ lines.append("#### β
Agreements (High Confidence)")
+ for a in final["agreements"][:5]:
+ topic = a.get("topic", "?")
+ finding = a.get("finding", "?")[:80]
+ lines.append(f"- **{topic}**: {finding}")
+ lines.append("")
+
+ # Disagreements and resolutions
+ if final.get("disagreements"):
+ lines.append("#### π Resolved Disagreements")
+ for d in final["disagreements"][:3]:
+ topic = d.get("topic", "?")
+ resolution = d.get("resolution", "?")[:100]
+ lines.append(f"- **{topic}**: {resolution}")
+ lines.append("")
+
+ # Score comparison
+ lines.append("#### π Score Comparison")
+ lines.append("")
+ lines.append("| Category | LLM 1 (Qwen) | LLM 2 (Llama) |")
+ lines.append("|----------|--------------|---------------|")
+
+ categories = ["typography", "colors", "accessibility", "spacing"]
+ for cat in categories:
+ llm1_score = llm1.get(cat, {}).get("score", "?") if isinstance(llm1.get(cat), dict) else "?"
+ llm2_score = llm2.get(cat, {}).get("score", "?") if isinstance(llm2.get(cat), dict) else "?"
+ lines.append(f"| {cat.title()} | {llm1_score}/10 | {llm2_score}/10 |")
+
+ return "\n".join(lines)
+
+
+def format_spacing_comparison_from_rules(rule_calculations: dict) -> list:
+ """Format spacing comparison from rule engine."""
+ if not rule_calculations:
+ return []
+
+ spacing_options = rule_calculations.get("spacing_options", {})
+
+ data = []
+ for i in range(10):
+ current = f"{(i+1) * 4}px" if i < 5 else f"{(i+1) * 8}px"
+ grid_8 = spacing_options.get("8px", [])
+ grid_4 = spacing_options.get("4px", [])
+
+ val_8 = f"{grid_8[i+1]}px" if i+1 < len(grid_8) else "β"
+ val_4 = f"{grid_4[i+1]}px" if i+1 < len(grid_4) else "β"
+
+ data.append([current, val_8, val_4])
+
+ return data
+
+
+def format_color_ramps_from_rules(rule_calculations: dict) -> str:
+ """Format color ramps from rule engine."""
+ if not rule_calculations:
+ return "*No color ramps generated*"
+
+ ramps = rule_calculations.get("color_ramps", {})
+ if not ramps:
+ return "*No color ramps generated*"
+
+ lines = ["### π Generated Color Ramps"]
+ lines.append("")
+
+ for name, ramp in list(ramps.items())[:6]:
+ lines.append(f"**{name}**")
+ if isinstance(ramp, list) and len(ramp) >= 10:
+ lines.append("| 50 | 100 | 200 | 300 | 400 | 500 | 600 | 700 | 800 | 900 |")
+ lines.append("|---|---|---|---|---|---|---|---|---|---|")
+ row = "| " + " | ".join([f"`{ramp[i]}`" for i in range(10)]) + " |"
+ lines.append(row)
+ lines.append("")
+
+ return "\n".join(lines)
+
+
+def get_detected_fonts() -> dict:
+ """Get detected font information."""
+ if not state.desktop_normalized:
+ return {"primary": "Unknown", "weights": []}
+
+ fonts = {}
+ weights = set()
+
+ for t in state.desktop_normalized.typography.values():
+ family = t.font_family
+ weight = t.font_weight
+
+ if family not in fonts:
+ fonts[family] = 0
+ fonts[family] += t.frequency
+
+ if weight:
+ try:
+ weights.add(int(weight))
+ except:
+ pass
+
+ primary = max(fonts.items(), key=lambda x: x[1])[0] if fonts else "Unknown"
+
+ return {
+ "primary": primary,
+ "weights": sorted(weights) if weights else [400],
+ "all_fonts": fonts,
+ }
+
+
+def get_base_font_size() -> int:
+ """Detect base font size from typography."""
+ if not state.desktop_normalized:
+ return 16
+
+ # Find most common size in body range (14-18px)
+ sizes = {}
+ for t in state.desktop_normalized.typography.values():
+ size_str = str(t.font_size).replace('px', '').replace('rem', '').replace('em', '')
+ try:
+ size = float(size_str)
+ if 14 <= size <= 18:
+ sizes[size] = sizes.get(size, 0) + t.frequency
+ except:
+ pass
+
+ if sizes:
+ return int(max(sizes.items(), key=lambda x: x[1])[0])
+ return 16
+
+
+def format_brand_comparison(recommendations) -> str:
+ """Format brand comparison as markdown table."""
+ if not recommendations.brand_analysis:
+ return "*Brand analysis not available*"
+
+ lines = [
+ "### π Design System Comparison (5 Top Brands)",
+ "",
+ "| Brand | Type Ratio | Base Size | Spacing | Notes |",
+ "|-------|------------|-----------|---------|-------|",
+ ]
+
+ for brand in recommendations.brand_analysis[:5]:
+ name = brand.get("brand", "Unknown")
+ ratio = brand.get("ratio", "?")
+ base = brand.get("base", "?")
+ spacing = brand.get("spacing", "?")
+ notes = brand.get("notes", "")[:50] + ("..." if len(brand.get("notes", "")) > 50 else "")
+ lines.append(f"| {name} | {ratio} | {base}px | {spacing} | {notes} |")
+
+ return "\n".join(lines)
+
+
+def format_font_families_display(fonts: dict) -> str:
+ """Format detected font families for display."""
+ lines = []
+
+ primary = fonts.get("primary", "Unknown")
+ weights = fonts.get("weights", [400])
+ all_fonts = fonts.get("all_fonts", {})
+
+ lines.append(f"### Primary Font: **{primary}**")
+ lines.append("")
+ lines.append(f"**Weights detected:** {', '.join(map(str, weights))}")
+ lines.append("")
+
+ if all_fonts and len(all_fonts) > 1:
+ lines.append("### All Fonts Detected")
+ lines.append("")
+ lines.append("| Font Family | Usage Count |")
+ lines.append("|-------------|-------------|")
+
+ sorted_fonts = sorted(all_fonts.items(), key=lambda x: -x[1])
+ for font, count in sorted_fonts[:5]:
+ lines.append(f"| {font} | {count:,} |")
+
+ lines.append("")
+ lines.append("*Note: This analysis focuses on English typography only.*")
+
+ return "\n".join(lines)
+
+
+def format_llm_color_recommendations_html(final_recs: dict, semantic_analysis: dict) -> str:
+ """Generate HTML showing LLM color recommendations with before/after comparison."""
+
+ if not final_recs:
+ return '''
+
+
No LLM recommendations available yet. Run analysis first.
+
+ '''
+
+ color_recs = final_recs.get("color_recommendations", {})
+ aa_fixes = final_recs.get("accessibility_fixes", [])
+
+ if not color_recs and not aa_fixes:
+ return '''
+
+
β
No color changes recommended. Your colors look good!
+
+ '''
+
+ # Build recommendations HTML
+ recs_html = ""
+
+ # Process color recommendations
+ for role, rec in color_recs.items():
+ if not isinstance(rec, dict):
+ continue
+ if role in ["generate_ramps_for", "changes_made"]:
+ continue
+
+ current = rec.get("current", "?")
+ suggested = rec.get("suggested", current)
+ action = rec.get("action", "keep")
+ rationale = rec.get("rationale", "")
+
+ if action == "keep" or suggested == current:
+ # No change needed
+ recs_html += f'''
+
+
+
+ {role}
+ {current}
+ β Keep
+
+
+ '''
+ else:
+ # Change suggested
+ recs_html += f'''
+
+
+
+
β
+
+
+
After
+
{suggested}
+
+
+
+ {role}
+ {rationale[:80]}...
+
+
+ '''
+
+ # Process accessibility fixes
+ for fix in aa_fixes:
+ if not isinstance(fix, dict):
+ continue
+
+ color = fix.get("color", "?")
+ role = fix.get("role", "unknown")
+ issue = fix.get("issue", "contrast issue")
+ fix_color = fix.get("fix", color)
+ current_contrast = fix.get("current_contrast", "?")
+ fixed_contrast = fix.get("fixed_contrast", "?")
+
+ if fix_color and fix_color != color:
+ recs_html += f'''
+
+
+
+
+
β οΈ {current_contrast}:1
+
{color}
+
+
β
+
+
+
β {fixed_contrast}:1
+
{fix_color}
+
+
+
+ {role}
+ π΄ {issue}
+
+
+ '''
+
+ if not recs_html:
+ return '''
+
+
β
No color changes recommended. Your colors look good!
+
+ '''
+
+ html = f'''
+
+
+
+ {recs_html}
+
+ '''
+
+ return html
+
+
+def format_llm_color_recommendations_table(final_recs: dict, semantic_analysis: dict) -> list:
+ """Generate table data for LLM color recommendations with accept/reject checkboxes."""
+
+ rows = []
+
+ if not final_recs:
+ return rows
+
+ color_recs = final_recs.get("color_recommendations", {})
+ aa_fixes = final_recs.get("accessibility_fixes", [])
+
+ # Process color recommendations
+ for role, rec in color_recs.items():
+ if not isinstance(rec, dict):
+ continue
+ if role in ["generate_ramps_for", "changes_made"]:
+ continue
+
+ current = rec.get("current", "?")
+ suggested = rec.get("suggested", current)
+ action = rec.get("action", "keep")
+ rationale = rec.get("rationale", "")[:50]
+
+ if action != "keep" and suggested != current:
+ # Calculate contrast improvement
+ try:
+ from core.color_utils import get_contrast_with_white
+ old_contrast = get_contrast_with_white(current)
+ new_contrast = get_contrast_with_white(suggested)
+ contrast_str = f"{old_contrast:.1f} β {new_contrast:.1f}"
+ except:
+ contrast_str = "?"
+
+ rows.append([
+ True, # Accept checkbox (default True)
+ role,
+ current,
+ rationale or action,
+ suggested,
+ contrast_str,
+ ])
+
+ # Process accessibility fixes
+ for fix in aa_fixes:
+ if not isinstance(fix, dict):
+ continue
+
+ color = fix.get("color", "?")
+ role = fix.get("role", "unknown")
+ issue = fix.get("issue", "contrast")[:40]
+ fix_color = fix.get("fix", color)
+ current_contrast = fix.get("current_contrast", "?")
+ fixed_contrast = fix.get("fixed_contrast", "?")
+
+ if fix_color and fix_color != color:
+ rows.append([
+ True, # Accept checkbox
+ f"{role} (AA fix)",
+ color,
+ issue,
+ fix_color,
+ f"{current_contrast}:1 β {fixed_contrast}:1",
+ ])
+
+ return rows
+
+
+def format_typography_comparison_viewport(normalized_tokens, base_size: int, viewport: str) -> list:
+ """Format typography comparison for a specific viewport."""
+ if not normalized_tokens:
+ return []
+
+ # Get current typography sorted by size
+ current_typo = list(normalized_tokens.typography.values())
+
+ # Parse and sort sizes
+ def parse_size(t):
+ size_str = str(t.font_size).replace('px', '').replace('rem', '').replace('em', '')
+ try:
+ return float(size_str)
+ except:
+ return 16
+
+ current_typo.sort(key=lambda t: -parse_size(t))
+ sizes = [parse_size(t) for t in current_typo]
+
+ # Use detected base or default
+ base = base_size if base_size else 16
+
+ # Scale factors for mobile (typically 0.85-0.9 of desktop)
+ mobile_factor = 0.875 if viewport == "mobile" else 1.0
+
+ # Token names (13 levels)
+ token_names = [
+ "display.2xl", "display.xl", "display.lg", "display.md",
+ "heading.xl", "heading.lg", "heading.md", "heading.sm",
+ "body.lg", "body.md", "body.sm",
+ "caption", "overline"
+ ]
+
+ # Generate scales - use base size and round to sensible values
+ def round_to_even(val):
+ """Round to even numbers for cleaner type scales."""
+ return int(round(val / 2) * 2)
+
+ scales = {
+ "1.2": [round_to_even(base * mobile_factor * (1.2 ** (8-i))) for i in range(13)],
+ "1.25": [round_to_even(base * mobile_factor * (1.25 ** (8-i))) for i in range(13)],
+ "1.333": [round_to_even(base * mobile_factor * (1.333 ** (8-i))) for i in range(13)],
+ }
+
+ # Build comparison table
+ data = []
+ for i, name in enumerate(token_names):
+ current = f"{int(sizes[i])}px" if i < len(sizes) else "β"
+ s12 = f"{scales['1.2'][i]}px"
+ s125 = f"{scales['1.25'][i]}px"
+ s133 = f"{scales['1.333'][i]}px"
+ keep = current
+ data.append([name, current, s12, s125, s133, keep])
+
+ return data
+
+
+def format_base_colors() -> str:
+ """Format base colors (detected) separately from ramps."""
+ if not state.desktop_normalized:
+ return "*No colors detected*"
+
+ colors = list(state.desktop_normalized.colors.values())
+ colors.sort(key=lambda c: -c.frequency)
+
+ lines = [
+ "### π¨ Base Colors (Detected)",
+ "",
+ "These are the primary colors extracted from your website:",
+ "",
+ "| Color | Hex | Role | Frequency | Contrast |",
+ "|-------|-----|------|-----------|----------|",
+ ]
+
+ for color in colors[:10]:
+ hex_val = color.value
+ role = "Primary" if color.suggested_name and "primary" in color.suggested_name.lower() else \
+ "Text" if color.suggested_name and "text" in color.suggested_name.lower() else \
+ "Background" if color.suggested_name and "background" in color.suggested_name.lower() else \
+ "Border" if color.suggested_name and "border" in color.suggested_name.lower() else \
+ "Accent"
+ freq = f"{color.frequency:,}"
+ contrast = f"{color.contrast_white:.1f}:1" if color.contrast_white else "β"
+
+ # Create a simple color indicator
+ lines.append(f"| π¦ | `{hex_val}` | {role} | {freq} | {contrast} |")
+
+ return "\n".join(lines)
+
+
+def format_color_ramps_visual(recommendations) -> str:
+ """Format color ramps with visual display showing all shades."""
+ if not state.desktop_normalized:
+ return "*No colors to display*"
+
+ colors = list(state.desktop_normalized.colors.values())
+ colors.sort(key=lambda c: -c.frequency)
+
+ lines = [
+ "### π Generated Color Ramps",
+ "",
+ "Full ramp (50-950) generated for each base color:",
+ "",
+ ]
+
+ from core.color_utils import generate_color_ramp
+
+ for color in colors[:6]: # Top 6 colors
+ hex_val = color.value
+ role = color.suggested_name.split('.')[1] if color.suggested_name and '.' in color.suggested_name else "color"
+
+ # Generate ramp
+ try:
+ ramp = generate_color_ramp(hex_val)
+
+ lines.append(f"**{role.upper()}** (base: `{hex_val}`)")
+ lines.append("")
+ lines.append("| 50 | 100 | 200 | 300 | 400 | 500 | 600 | 700 | 800 | 900 |")
+ lines.append("|---|---|---|---|---|---|---|---|---|---|")
+
+ # Create row with hex values
+ row = "|"
+ for i in range(10):
+ if i < len(ramp):
+ row += f" `{ramp[i]}` |"
+ else:
+ row += " β |"
+ lines.append(row)
+ lines.append("")
+
+ except Exception as e:
+ lines.append(f"**{role}** (`{hex_val}`) β Could not generate ramp: {str(e)}")
+ lines.append("")
+
+ return "\n".join(lines)
+
+
+def format_radius_with_tokens() -> str:
+ """Format radius with token name suggestions."""
+ if not state.desktop_normalized or not state.desktop_normalized.radius:
+ return "*No border radius values detected.*"
+
+ radii = list(state.desktop_normalized.radius.values())
+
+ lines = [
+ "### π Border Radius Tokens",
+ "",
+ "| Detected | Suggested Token | Usage |",
+ "|----------|-----------------|-------|",
+ ]
+
+ # Sort by pixel value
+ def parse_radius(r):
+ val = str(r.value).replace('px', '').replace('%', '')
+ try:
+ return float(val)
+ except:
+ return 999
+
+ radii.sort(key=lambda r: parse_radius(r))
+
+ token_map = {
+ (0, 2): ("radius.none", "Sharp corners"),
+ (2, 4): ("radius.xs", "Subtle rounding"),
+ (4, 6): ("radius.sm", "Small elements"),
+ (6, 10): ("radius.md", "Buttons, cards"),
+ (10, 16): ("radius.lg", "Modals, panels"),
+ (16, 32): ("radius.xl", "Large containers"),
+ (32, 100): ("radius.2xl", "Pill shapes"),
+ }
+
+ for r in radii[:8]:
+ val = str(r.value)
+ px = parse_radius(r)
+
+ if "%" in str(r.value) or px >= 50:
+ token = "radius.full"
+ usage = "Circles, avatars"
+ else:
+ token = "radius.md"
+ usage = "General use"
+ for (low, high), (t, u) in token_map.items():
+ if low <= px < high:
+ token = t
+ usage = u
+ break
+
+ lines.append(f"| {val} | `{token}` | {usage} |")
+
+ return "\n".join(lines)
+
+
+def format_shadows_with_tokens() -> str:
+ """Format shadows with token name suggestions."""
+ if not state.desktop_normalized or not state.desktop_normalized.shadows:
+ return "*No shadow values detected.*"
+
+ shadows = list(state.desktop_normalized.shadows.values())
+
+ lines = [
+ "### π«οΈ Shadow Tokens",
+ "",
+ "| Detected Value | Suggested Token | Use Case |",
+ "|----------------|-----------------|----------|",
+ ]
+
+ shadow_sizes = ["shadow.xs", "shadow.sm", "shadow.md", "shadow.lg", "shadow.xl", "shadow.2xl"]
+
+ for i, s in enumerate(shadows[:6]):
+ val = str(s.value)[:40] + ("..." if len(str(s.value)) > 40 else "")
+ token = shadow_sizes[i] if i < len(shadow_sizes) else f"shadow.custom-{i}"
+
+ # Guess use case based on index
+ use_cases = ["Subtle elevation", "Cards, dropdowns", "Modals, dialogs", "Popovers", "Floating elements", "Dramatic effect"]
+ use = use_cases[i] if i < len(use_cases) else "Custom"
+
+ lines.append(f"| `{val}` | `{token}` | {use} |")
+
+ return "\n".join(lines)
+
+
+def format_spacing_comparison(recommendations) -> list:
+ """Format spacing comparison table."""
+ if not state.desktop_normalized:
+ return []
+
+ # Get current spacing
+ current_spacing = list(state.desktop_normalized.spacing.values())
+ current_spacing.sort(key=lambda s: s.value_px)
+
+ data = []
+ for s in current_spacing[:10]:
+ current = f"{s.value_px}px"
+ grid_8 = f"{snap_to_grid(s.value_px, 8)}px"
+ grid_4 = f"{snap_to_grid(s.value_px, 4)}px"
+
+ # Mark if value fits
+ if s.value_px == snap_to_grid(s.value_px, 8):
+ grid_8 += " β"
+ if s.value_px == snap_to_grid(s.value_px, 4):
+ grid_4 += " β"
+
+ data.append([current, grid_8, grid_4])
+
+ return data
+
+
+def snap_to_grid(value: float, base: int) -> int:
+ """Snap value to grid."""
+ return round(value / base) * base
+
+
+def apply_selected_upgrades(type_choice: str, spacing_choice: str, apply_ramps: bool, color_recs_table: list = None):
+ """Apply selected upgrade options including LLM color recommendations."""
+ if not state.upgrade_recommendations:
+ return "β Run analysis first", ""
+
+ state.log("β¨ Applying selected upgrades...")
+
+ # Store selections
+ state.selected_upgrades = {
+ "type_scale": type_choice,
+ "spacing": spacing_choice,
+ "color_ramps": apply_ramps,
+ }
+
+ state.log(f" Type Scale: {type_choice}")
+ state.log(f" Spacing: {spacing_choice}")
+ state.log(f" Color Ramps: {'Yes' if apply_ramps else 'No'}")
+
+ # Process accepted color recommendations
+ accepted_color_changes = []
+ if color_recs_table:
+ state.log("")
+ state.log(" π¨ LLM Color Recommendations:")
+ for row in color_recs_table:
+ if len(row) >= 5:
+ accept = row[0] # Boolean checkbox
+ role = row[1] # Role name
+ current = row[2] # Current color
+ issue = row[3] # Issue description
+ suggested = row[4] # Suggested color
+
+ if accept and suggested and current != suggested:
+ accepted_color_changes.append({
+ "role": role,
+ "from": current,
+ "to": suggested,
+ "reason": issue,
+ })
+ state.log(f" ββ β
ACCEPTED: {role}")
+ state.log(f" β ββ {current} β {suggested}")
+ elif not accept:
+ state.log(f" ββ β REJECTED: {role} (keeping {current})")
+
+ # Store accepted changes
+ state.selected_upgrades["color_changes"] = accepted_color_changes
+
+ if accepted_color_changes:
+ state.log("")
+ state.log(f" π {len(accepted_color_changes)} color change(s) will be applied to export")
+
+ state.log("")
+ state.log("β
Upgrades applied! Proceed to Stage 3 for export.")
+
+ return "β
Upgrades applied! Proceed to Stage 3 to export.", state.get_logs()
+
+
+def export_stage1_json():
+ """Export Stage 1 tokens (as-is extraction) to JSON - FLAT structure for Figma Tokens Studio."""
+ if not state.desktop_normalized:
+ return json.dumps({"error": "No tokens extracted. Please run extraction first."}, indent=2)
+
+ # FLAT structure for Figma Tokens Studio compatibility
+ result = {
+ "metadata": {
+ "source_url": state.base_url,
+ "extracted_at": datetime.now().isoformat(),
+ "version": "v1-stage1-as-is",
+ "stage": "extraction",
+ "description": "Raw extracted tokens before upgrades - Figma Tokens Studio compatible",
+ },
+ "fonts": {},
+ "colors": {},
+ "typography": {}, # FLAT: font.display.xl.desktop, font.display.xl.mobile
+ "spacing": {}, # FLAT: space.1.desktop, space.1.mobile
+ "radius": {},
+ "shadows": {},
+ }
+
+ # =========================================================================
+ # FONTS
+ # =========================================================================
+ fonts_info = get_detected_fonts()
+ result["fonts"] = {
+ "primary": fonts_info.get("primary", "Unknown"),
+ "weights": fonts_info.get("weights", [400]),
+ }
+
+ # =========================================================================
+ # COLORS (viewport-agnostic - same across devices)
+ # =========================================================================
+ if state.desktop_normalized and state.desktop_normalized.colors:
+ for name, c in state.desktop_normalized.colors.items():
+ # Use semantic name or create one from value
+ base_name = c.suggested_name or name
+ # Clean up the name for Figma compatibility
+ clean_name = base_name.replace(" ", ".").replace("_", ".").lower()
+ if not clean_name.startswith("color."):
+ clean_name = f"color.{clean_name}"
+
+ result["colors"][clean_name] = {
+ "value": c.value,
+ "type": "color",
+ "source": "detected",
+ }
+
+ # =========================================================================
+ # TYPOGRAPHY - FLAT structure with viewport suffix
+ # =========================================================================
+ # Desktop typography
+ if state.desktop_normalized and state.desktop_normalized.typography:
+ for name, t in state.desktop_normalized.typography.items():
+ base_name = t.suggested_name or name
+ clean_name = base_name.replace(" ", ".").replace("_", ".").replace("-", ".").lower()
+ if not clean_name.startswith("font."):
+ clean_name = f"font.{clean_name}"
+
+ # Add .desktop suffix
+ token_key = f"{clean_name}.desktop"
+
+ result["typography"][token_key] = {
+ "value": t.font_size,
+ "type": "dimension",
+ "fontFamily": t.font_family,
+ "fontWeight": str(t.font_weight),
+ "lineHeight": t.line_height or "1.5",
+ "source": "detected",
+ }
+
+ # Mobile typography
+ if state.mobile_normalized and state.mobile_normalized.typography:
+ for name, t in state.mobile_normalized.typography.items():
+ base_name = t.suggested_name or name
+ clean_name = base_name.replace(" ", ".").replace("_", ".").replace("-", ".").lower()
+ if not clean_name.startswith("font."):
+ clean_name = f"font.{clean_name}"
+
+ # Add .mobile suffix
+ token_key = f"{clean_name}.mobile"
+
+ result["typography"][token_key] = {
+ "value": t.font_size,
+ "type": "dimension",
+ "fontFamily": t.font_family,
+ "fontWeight": str(t.font_weight),
+ "lineHeight": t.line_height or "1.5",
+ "source": "detected",
+ }
+
+ # =========================================================================
+ # SPACING - FLAT structure with viewport suffix
+ # =========================================================================
+ # Desktop spacing
+ if state.desktop_normalized and state.desktop_normalized.spacing:
+ for name, s in state.desktop_normalized.spacing.items():
+ base_name = s.suggested_name or name
+ clean_name = base_name.replace(" ", ".").replace("_", ".").replace("-", ".").lower()
+ if not clean_name.startswith("space."):
+ clean_name = f"space.{clean_name}"
+
+ # Add .desktop suffix
+ token_key = f"{clean_name}.desktop"
+
+ result["spacing"][token_key] = {
+ "value": s.value,
+ "type": "dimension",
+ "source": "detected",
+ }
+
+ # Mobile spacing
+ if state.mobile_normalized and state.mobile_normalized.spacing:
+ for name, s in state.mobile_normalized.spacing.items():
+ base_name = s.suggested_name or name
+ clean_name = base_name.replace(" ", ".").replace("_", ".").replace("-", ".").lower()
+ if not clean_name.startswith("space."):
+ clean_name = f"space.{clean_name}"
+
+ # Add .mobile suffix
+ token_key = f"{clean_name}.mobile"
+
+ result["spacing"][token_key] = {
+ "value": s.value,
+ "type": "dimension",
+ "source": "detected",
+ }
+
+ # =========================================================================
+ # RADIUS (viewport-agnostic)
+ # =========================================================================
+ if state.desktop_normalized and state.desktop_normalized.radius:
+ for name, r in state.desktop_normalized.radius.items():
+ clean_name = name.replace(" ", ".").replace("_", ".").replace("-", ".").lower()
+ if not clean_name.startswith("radius."):
+ clean_name = f"radius.{clean_name}"
+
+ result["radius"][clean_name] = {
+ "value": r.value,
+ "type": "dimension",
+ "source": "detected",
+ }
+
+ # =========================================================================
+ # SHADOWS (viewport-agnostic)
+ # =========================================================================
+ if state.desktop_normalized and state.desktop_normalized.shadows:
+ for name, s in state.desktop_normalized.shadows.items():
+ clean_name = name.replace(" ", ".").replace("_", ".").replace("-", ".").lower()
+ if not clean_name.startswith("shadow."):
+ clean_name = f"shadow.{clean_name}"
+
+ result["shadows"][clean_name] = {
+ "value": s.value,
+ "type": "boxShadow",
+ "source": "detected",
+ }
+
+ return json.dumps(result, indent=2, default=str)
+
+
+def export_tokens_json():
+ """Export final tokens with selected upgrades applied - FLAT structure for Figma Tokens Studio."""
+ if not state.desktop_normalized:
+ return json.dumps({"error": "No tokens extracted. Please run extraction first."}, indent=2)
+
+ # Get selected upgrades
+ upgrades = getattr(state, 'selected_upgrades', {})
+ type_scale_choice = upgrades.get('type_scale', 'Keep Current')
+ spacing_choice = upgrades.get('spacing', 'Keep Current')
+ apply_ramps = upgrades.get('color_ramps', True)
+
+ # Determine ratio from choice
+ ratio = None
+ if "1.2" in type_scale_choice:
+ ratio = 1.2
+ elif "1.25" in type_scale_choice:
+ ratio = 1.25
+ elif "1.333" in type_scale_choice:
+ ratio = 1.333
+
+ # Determine spacing base
+ spacing_base = None
+ if "8px" in spacing_choice:
+ spacing_base = 8
+ elif "4px" in spacing_choice:
+ spacing_base = 4
+
+ # FLAT structure for Figma Tokens Studio compatibility
+ result = {
+ "metadata": {
+ "source_url": state.base_url,
+ "extracted_at": datetime.now().isoformat(),
+ "version": "v2-upgraded",
+ "stage": "final",
+ "description": "Upgraded tokens - Figma Tokens Studio compatible",
+ "upgrades_applied": {
+ "type_scale": type_scale_choice,
+ "spacing": spacing_choice,
+ "color_ramps": apply_ramps,
+ },
+ },
+ "fonts": {},
+ "colors": {},
+ "typography": {}, # FLAT: font.display.xl.desktop, font.display.xl.mobile
+ "spacing": {}, # FLAT: space.1.desktop, space.1.mobile
+ "radius": {},
+ "shadows": {},
+ }
+
+ # =========================================================================
+ # FONTS
+ # =========================================================================
+ fonts_info = get_detected_fonts()
+ result["fonts"] = {
+ "primary": fonts_info.get("primary", "Unknown"),
+ "weights": fonts_info.get("weights", [400]),
+ }
+ primary_font = fonts_info.get("primary", "sans-serif")
+
+ # =========================================================================
+ # COLORS with optional ramps
+ # =========================================================================
+ if state.desktop_normalized and state.desktop_normalized.colors:
+ from core.color_utils import generate_color_ramp
+
+ for name, c in state.desktop_normalized.colors.items():
+ base_name = c.suggested_name or name
+ clean_name = base_name.replace(" ", ".").replace("_", ".").lower()
+ if not clean_name.startswith("color."):
+ clean_name = f"color.{clean_name}"
+
+ if apply_ramps:
+ # Generate full ramp (50-950)
+ try:
+ ramp = generate_color_ramp(c.value)
+ shades = ["50", "100", "200", "300", "400", "500", "600", "700", "800", "900", "950"]
+ for i, shade in enumerate(shades):
+ if i < len(ramp):
+ shade_key = f"{clean_name}.{shade}"
+ result["colors"][shade_key] = {
+ "value": ramp[i] if isinstance(ramp[i], str) else ramp[i].get("hex", c.value),
+ "type": "color",
+ "source": "upgraded" if shade != "500" else "detected",
+ }
+ except:
+ result["colors"][clean_name] = {
+ "value": c.value,
+ "type": "color",
+ "source": "detected",
+ }
+ else:
+ result["colors"][clean_name] = {
+ "value": c.value,
+ "type": "color",
+ "source": "detected",
+ }
+
+ # =========================================================================
+ # TYPOGRAPHY - FLAT structure with viewport suffix
+ # =========================================================================
+ base_size = get_base_font_size()
+ token_names = [
+ "font.display.2xl", "font.display.xl", "font.display.lg", "font.display.md",
+ "font.heading.xl", "font.heading.lg", "font.heading.md", "font.heading.sm",
+ "font.body.lg", "font.body.md", "font.body.sm", "font.caption", "font.overline"
+ ]
+
+ # Desktop typography
+ if ratio:
+ # Apply type scale
+ scales = [int(round(base_size * (ratio ** (8-i)) / 2) * 2) for i in range(13)]
+ for i, token_name in enumerate(token_names):
+ desktop_key = f"{token_name}.desktop"
+ result["typography"][desktop_key] = {
+ "value": f"{scales[i]}px",
+ "type": "dimension",
+ "fontFamily": primary_font,
+ "source": "upgraded",
+ }
+ elif state.desktop_normalized and state.desktop_normalized.typography:
+ # Keep original with flat structure
+ for name, t in state.desktop_normalized.typography.items():
+ base_name = t.suggested_name or name
+ clean_name = base_name.replace(" ", ".").replace("_", ".").replace("-", ".").lower()
+ if not clean_name.startswith("font."):
+ clean_name = f"font.{clean_name}"
+
+ desktop_key = f"{clean_name}.desktop"
+ result["typography"][desktop_key] = {
+ "value": t.font_size,
+ "type": "dimension",
+ "fontFamily": t.font_family,
+ "fontWeight": str(t.font_weight),
+ "lineHeight": t.line_height or "1.5",
+ "source": "detected",
+ }
+
+ # Mobile typography
+ if ratio:
+ # Apply type scale with mobile factor
+ mobile_factor = 0.875
+ scales = [int(round(base_size * mobile_factor * (ratio ** (8-i)) / 2) * 2) for i in range(13)]
+ for i, token_name in enumerate(token_names):
+ mobile_key = f"{token_name}.mobile"
+ result["typography"][mobile_key] = {
+ "value": f"{scales[i]}px",
+ "type": "dimension",
+ "fontFamily": primary_font,
+ "source": "upgraded",
+ }
+ elif state.mobile_normalized and state.mobile_normalized.typography:
+ for name, t in state.mobile_normalized.typography.items():
+ base_name = t.suggested_name or name
+ clean_name = base_name.replace(" ", ".").replace("_", ".").replace("-", ".").lower()
+ if not clean_name.startswith("font."):
+ clean_name = f"font.{clean_name}"
+
+ mobile_key = f"{clean_name}.mobile"
+ result["typography"][mobile_key] = {
+ "value": t.font_size,
+ "type": "dimension",
+ "fontFamily": t.font_family,
+ "fontWeight": str(t.font_weight),
+ "lineHeight": t.line_height or "1.5",
+ "source": "detected",
+ }
+
+ # =========================================================================
+ # SPACING - FLAT structure with viewport suffix
+ # =========================================================================
+ spacing_token_names = [
+ "space.1", "space.2", "space.3", "space.4", "space.5",
+ "space.6", "space.8", "space.10", "space.12", "space.16"
+ ]
+
+ if spacing_base:
+ # Generate grid-aligned spacing for both viewports
+ for i, token_name in enumerate(spacing_token_names):
+ value = spacing_base * (i + 1)
+
+ # Desktop
+ desktop_key = f"{token_name}.desktop"
+ result["spacing"][desktop_key] = {
+ "value": f"{value}px",
+ "type": "dimension",
+ "source": "upgraded",
+ }
+
+ # Mobile (same values)
+ mobile_key = f"{token_name}.mobile"
+ result["spacing"][mobile_key] = {
+ "value": f"{value}px",
+ "type": "dimension",
+ "source": "upgraded",
+ }
+ else:
+ # Keep original with flat structure
+ if state.desktop_normalized and state.desktop_normalized.spacing:
+ for name, s in state.desktop_normalized.spacing.items():
+ base_name = s.suggested_name or name
+ clean_name = base_name.replace(" ", ".").replace("_", ".").replace("-", ".").lower()
+ if not clean_name.startswith("space."):
+ clean_name = f"space.{clean_name}"
+
+ desktop_key = f"{clean_name}.desktop"
+ result["spacing"][desktop_key] = {
+ "value": s.value,
+ "type": "dimension",
+ "source": "detected",
+ }
+
+ if state.mobile_normalized and state.mobile_normalized.spacing:
+ for name, s in state.mobile_normalized.spacing.items():
+ base_name = s.suggested_name or name
+ clean_name = base_name.replace(" ", ".").replace("_", ".").replace("-", ".").lower()
+ if not clean_name.startswith("space."):
+ clean_name = f"space.{clean_name}"
+
+ mobile_key = f"{clean_name}.mobile"
+ result["spacing"][mobile_key] = {
+ "value": s.value,
+ "type": "dimension",
+ "source": "detected",
+ }
+
+ # =========================================================================
+ # RADIUS (viewport-agnostic)
+ # =========================================================================
+ if state.desktop_normalized and state.desktop_normalized.radius:
+ for name, r in state.desktop_normalized.radius.items():
+ clean_name = name.replace(" ", ".").replace("_", ".").replace("-", ".").lower()
+ if not clean_name.startswith("radius."):
+ clean_name = f"radius.{clean_name}"
+
+ result["radius"][clean_name] = {
+ "value": r.value,
+ "type": "dimension",
+ "source": "detected",
+ }
+
+ # =========================================================================
+ # SHADOWS (viewport-agnostic)
+ # =========================================================================
+ if state.desktop_normalized and state.desktop_normalized.shadows:
+ for name, s in state.desktop_normalized.shadows.items():
+ clean_name = name.replace(" ", ".").replace("_", ".").replace("-", ".").lower()
+ if not clean_name.startswith("shadow."):
+ clean_name = f"shadow.{clean_name}"
+
+ result["shadows"][clean_name] = {
+ "value": s.value,
+ "type": "boxShadow",
+ "source": "detected",
+ }
+
+ return json.dumps(result, indent=2, default=str)
+
+
+# =============================================================================
+# UI BUILDING
+# =============================================================================
+
+def create_ui():
+ """Create the Gradio interface with corporate branding."""
+
+ # Corporate theme customization
+ corporate_theme = gr.themes.Base(
+ primary_hue=gr.themes.colors.blue,
+ secondary_hue=gr.themes.colors.slate,
+ neutral_hue=gr.themes.colors.slate,
+ font=[gr.themes.GoogleFont("Inter"), "ui-sans-serif", "system-ui", "sans-serif"],
+ font_mono=[gr.themes.GoogleFont("JetBrains Mono"), "ui-monospace", "monospace"],
+ ).set(
+ # Colors
+ body_background_fill="#f8fafc",
+ body_background_fill_dark="#0f172a",
+ block_background_fill="white",
+ block_background_fill_dark="#1e293b",
+ block_border_color="#e2e8f0",
+ block_border_color_dark="#334155",
+ block_label_background_fill="#f1f5f9",
+ block_label_background_fill_dark="#1e293b",
+ block_title_text_color="#0f172a",
+ block_title_text_color_dark="#f1f5f9",
+
+ # Primary button
+ button_primary_background_fill="#2563eb",
+ button_primary_background_fill_hover="#1d4ed8",
+ button_primary_text_color="white",
+
+ # Secondary button
+ button_secondary_background_fill="#f1f5f9",
+ button_secondary_background_fill_hover="#e2e8f0",
+ button_secondary_text_color="#1e293b",
+
+ # Input fields
+ input_background_fill="#ffffff",
+ input_background_fill_dark="#1e293b",
+ input_border_color="#cbd5e1",
+ input_border_color_dark="#475569",
+
+ # Shadows and radius
+ block_shadow="0 1px 3px rgba(0,0,0,0.1)",
+ block_shadow_dark="0 1px 3px rgba(0,0,0,0.3)",
+ block_border_width="1px",
+ block_radius="8px",
+
+ # Text
+ body_text_color="#1e293b",
+ body_text_color_dark="#e2e8f0",
+ body_text_size="14px",
+ )
+
+ # Custom CSS for additional styling
+ custom_css = """
+ /* Global styles */
+ .gradio-container {
+ max-width: 1400px !important;
+ margin: 0 auto !important;
+ }
+
+ /* Header branding */
+ .app-header {
+ background: linear-gradient(135deg, #1e40af 0%, #3b82f6 100%);
+ padding: 24px 32px;
+ border-radius: 12px;
+ margin-bottom: 24px;
+ color: white;
+ }
+ .app-header h1 {
+ margin: 0 0 8px 0;
+ font-size: 28px;
+ font-weight: 700;
+ }
+ .app-header p {
+ margin: 0;
+ opacity: 0.9;
+ font-size: 14px;
+ }
+
+ /* Stage indicators */
+ .stage-header {
+ background: linear-gradient(90deg, #f1f5f9 0%, #ffffff 100%);
+ padding: 16px 20px;
+ border-radius: 8px;
+ border-left: 4px solid #2563eb;
+ margin-bottom: 16px;
+ }
+ .stage-header h2 {
+ margin: 0;
+ font-size: 18px;
+ color: #1e293b;
+ }
+
+ /* Log styling */
+ .log-container textarea {
+ font-family: 'JetBrains Mono', monospace !important;
+ font-size: 12px !important;
+ line-height: 1.6 !important;
+ background: #0f172a !important;
+ color: #e2e8f0 !important;
+ border-radius: 8px !important;
+ }
+
+ /* Color swatch */
+ .color-swatch {
+ display: inline-block;
+ width: 24px;
+ height: 24px;
+ border-radius: 4px;
+ margin-right: 8px;
+ vertical-align: middle;
+ border: 1px solid rgba(0,0,0,0.1);
+ }
+
+ /* Score badges */
+ .score-badge {
+ display: inline-block;
+ padding: 4px 12px;
+ border-radius: 20px;
+ font-weight: 600;
+ font-size: 13px;
+ }
+ .score-badge.high { background: #dcfce7; color: #166534; }
+ .score-badge.medium { background: #fef3c7; color: #92400e; }
+ .score-badge.low { background: #fee2e2; color: #991b1b; }
+
+ /* Benchmark cards */
+ .benchmark-card {
+ background: #f8fafc;
+ border: 1px solid #e2e8f0;
+ border-radius: 8px;
+ padding: 16px;
+ margin-bottom: 12px;
+ }
+ .benchmark-card.selected {
+ border-color: #2563eb;
+ background: #eff6ff;
+ }
+
+ /* Action items */
+ .action-item {
+ background: white;
+ border: 1px solid #e2e8f0;
+ border-radius: 8px;
+ padding: 16px;
+ margin-bottom: 8px;
+ }
+ .action-item.high-priority {
+ border-left: 4px solid #ef4444;
+ }
+ .action-item.medium-priority {
+ border-left: 4px solid #f59e0b;
+ }
+
+ /* Progress indicator */
+ .progress-bar {
+ height: 4px;
+ background: #e2e8f0;
+ border-radius: 2px;
+ overflow: hidden;
+ }
+ .progress-bar-fill {
+ height: 100%;
+ background: linear-gradient(90deg, #2563eb, #3b82f6);
+ transition: width 0.3s ease;
+ }
+
+ /* Accordion styling */
+ .accordion-header {
+ font-weight: 600 !important;
+ }
+
+ /* Table styling */
+ table {
+ border-collapse: collapse;
+ width: 100%;
+ }
+ th {
+ background: #f1f5f9;
+ padding: 12px;
+ text-align: left;
+ font-weight: 600;
+ border-bottom: 2px solid #e2e8f0;
+ }
+ td {
+ padding: 12px;
+ border-bottom: 1px solid #e2e8f0;
+ }
+
+ /* Dark mode adjustments */
+ .dark .stage-header {
+ background: linear-gradient(90deg, #1e293b 0%, #0f172a 100%);
+ border-left-color: #3b82f6;
+ }
+ .dark .stage-header h2 {
+ color: #f1f5f9;
+ }
+ .dark .benchmark-card {
+ background: #1e293b;
+ border-color: #334155;
+ }
+ .dark .action-item {
+ background: #1e293b;
+ border-color: #334155;
+ }
+ """
+
+ with gr.Blocks(
+ title="Design System Extractor v2",
+ theme=corporate_theme,
+ css=custom_css
+ ) as app:
+
+ # Header with branding
+ gr.HTML("""
+
+ """)
+
+ # =================================================================
+ # CONFIGURATION
+ # =================================================================
+
+ with gr.Accordion("βοΈ Configuration", open=not bool(HF_TOKEN_FROM_ENV)):
+ gr.Markdown("**HuggingFace Token** β Required for Stage 2 (AI upgrades)")
+ with gr.Row():
+ hf_token_input = gr.Textbox(
+ label="HF Token", placeholder="hf_xxxx", type="password",
+ scale=4, value=HF_TOKEN_FROM_ENV,
+ )
+ save_token_btn = gr.Button("πΎ Save", scale=1)
+ token_status = gr.Markdown("β
Token loaded" if HF_TOKEN_FROM_ENV else "β³ Enter token")
+
+ def save_token(token):
+ if token and len(token) > 10:
+ os.environ["HF_TOKEN"] = token.strip()
+ return "β
Token saved!"
+ return "β Invalid token"
+
+ save_token_btn.click(save_token, [hf_token_input], [token_status])
+
+ # =================================================================
+ # URL INPUT & PAGE DISCOVERY
+ # =================================================================
+
+ with gr.Accordion("π Step 1: Discover Pages", open=True):
+ gr.Markdown("Enter your website URL to discover pages for extraction.")
+
+ with gr.Row():
+ url_input = gr.Textbox(label="Website URL", placeholder="https://example.com", scale=4)
+ discover_btn = gr.Button("π Discover Pages", variant="primary", scale=1)
+
+ discover_status = gr.Markdown("")
+
+ with gr.Row():
+ log_output = gr.Textbox(label="π Log", lines=8, interactive=False)
+
+ pages_table = gr.Dataframe(
+ headers=["Select", "URL", "Title", "Type", "Status"],
+ datatype=["bool", "str", "str", "str", "str"],
+ label="Discovered Pages",
+ interactive=True,
+ visible=False,
+ )
+
+ extract_btn = gr.Button("π Extract Tokens (Desktop + Mobile)", variant="primary", visible=False)
+
+ # =================================================================
+ # STAGE 1: EXTRACTION REVIEW
+ # =================================================================
+
+ with gr.Accordion("π Stage 1: Review Extracted Tokens", open=False) as stage1_accordion:
+
+ extraction_status = gr.Markdown("")
+
+ gr.Markdown("""
+ **Review the extracted tokens.** Toggle between Desktop and Mobile viewports.
+ Accept or reject tokens, then proceed to Stage 2 for AI-powered upgrades.
+ """)
+
+ viewport_toggle = gr.Radio(
+ choices=["Desktop (1440px)", "Mobile (375px)"],
+ value="Desktop (1440px)",
+ label="Viewport",
+ )
+
+ with gr.Tabs():
+ with gr.Tab("π¨ Colors"):
+ colors_table = gr.Dataframe(
+ headers=["Accept", "Color", "Suggested Name", "Frequency", "Confidence", "Contrast", "AA", "Context"],
+ datatype=["bool", "str", "str", "number", "str", "str", "str", "str"],
+ label="Colors",
+ interactive=True,
+ )
+
+ with gr.Tab("π Typography"):
+ typography_table = gr.Dataframe(
+ headers=["Accept", "Font", "Size", "Weight", "Line Height", "Suggested Name", "Frequency", "Confidence"],
+ datatype=["bool", "str", "str", "str", "str", "str", "number", "str"],
+ label="Typography",
+ interactive=True,
+ )
+
+ with gr.Tab("π Spacing"):
+ spacing_table = gr.Dataframe(
+ headers=["Accept", "Value", "Pixels", "Suggested Name", "Frequency", "Base 8", "Confidence"],
+ datatype=["bool", "str", "str", "str", "number", "str", "str"],
+ label="Spacing",
+ interactive=True,
+ )
+
+ with gr.Tab("π Radius"):
+ radius_table = gr.Dataframe(
+ headers=["Accept", "Value", "Frequency", "Context"],
+ datatype=["bool", "str", "number", "str"],
+ label="Border Radius",
+ interactive=True,
+ )
+
+ # =============================================================
+ # VISUAL PREVIEWS (Stage 1) - AS-IS only, no enhancements
+ # =============================================================
+ gr.Markdown("---")
+ gr.Markdown("## ποΈ Visual Previews (AS-IS)")
+ gr.Markdown("*Raw extracted values from the website β no enhancements applied*")
+
+ with gr.Tabs():
+ with gr.Tab("π€ Typography"):
+ gr.Markdown("*Actual typography rendered with the detected font*")
+ stage1_typography_preview = gr.HTML(
+ value="Typography preview will appear after extraction...
",
+ label="Typography Preview"
+ )
+
+ with gr.Tab("π¨ Colors"):
+ gr.Markdown("*All detected colors (AS-IS β no generated ramps)*")
+ stage1_colors_preview = gr.HTML(
+ value="Colors preview will appear after extraction...
",
+ label="Colors Preview"
+ )
+
+ with gr.Tab("π§ Semantic Colors"):
+ gr.Markdown("*Colors categorized by usage: Brand, Text, Background, Border, Feedback*")
+ stage1_semantic_preview = gr.HTML(
+ value="Semantic color analysis will appear after extraction...
",
+ label="Semantic Colors Preview"
+ )
+
+ with gr.Tab("π Spacing"):
+ gr.Markdown("*All detected spacing values*")
+ stage1_spacing_preview = gr.HTML(
+ value="Spacing preview will appear after extraction...
",
+ label="Spacing Preview"
+ )
+
+ with gr.Tab("π Radius"):
+ gr.Markdown("*All detected border radius values*")
+ stage1_radius_preview = gr.HTML(
+ value="Radius preview will appear after extraction...
",
+ label="Radius Preview"
+ )
+
+ with gr.Tab("π Shadows"):
+ gr.Markdown("*All detected box shadow values*")
+ stage1_shadows_preview = gr.HTML(
+ value="Shadows preview will appear after extraction...
",
+ label="Shadows Preview"
+ )
+
+ with gr.Row():
+ proceed_stage2_btn = gr.Button("β‘οΈ Proceed to Stage 2: AI Upgrades", variant="primary")
+ download_stage1_btn = gr.Button("π₯ Download Stage 1 JSON", variant="secondary")
+
+ # =================================================================
+ # STAGE 2: AI UPGRADES
+ # =================================================================
+
+ with gr.Accordion("π§ Stage 2: AI-Powered Analysis", open=False) as stage2_accordion:
+
+ # Stage header
+ gr.HTML("""
+
+ """)
+
+ stage2_status = gr.Markdown("Click 'Analyze' to start AI-powered design system analysis.")
+
+ # =============================================================
+ # NEW ARCHITECTURE CONFIGURATION
+ # =============================================================
+ with gr.Accordion("βοΈ Analysis Configuration", open=True):
+
+ # Architecture explanation
+ gr.Markdown("""
+ ### ποΈ New Analysis Architecture
+
+ | Layer | Type | What It Does | Cost |
+ |-------|------|--------------|------|
+ | **Layer 1** | Rule Engine | Type scale, AA check, spacing grid, color stats | FREE |
+ | **Layer 2** | Benchmark Research | Fetch live specs via Firecrawl (24h cache) | ~$0.001 |
+ | **Layer 3** | LLM Agents | Brand ID, Benchmark Advisor, Best Practices | ~$0.002 |
+ | **Layer 4** | HEAD Synthesizer | Combine all β Final recommendations | ~$0.001 |
+
+ **Total Cost:** ~$0.003-0.004 per analysis
+ """)
+
+ gr.Markdown("---")
+
+ # Benchmark selection
+ gr.Markdown("### π Select Design Systems to Compare Against")
+ gr.Markdown("*Choose which design systems to benchmark your tokens against:*")
+
+ benchmark_checkboxes = gr.CheckboxGroup(
+ choices=[
+ ("π’ Material Design 3 (Google)", "material_design_3"),
+ ("π Apple HIG", "apple_hig"),
+ ("π Shopify Polaris", "shopify_polaris"),
+ ("π΅ Atlassian Design System", "atlassian_design"),
+ ("π· IBM Carbon", "ibm_carbon"),
+ ("π Tailwind CSS", "tailwind_css"),
+ ("π Ant Design", "ant_design"),
+ ("β‘ Chakra UI", "chakra_ui"),
+ ],
+ value=["material_design_3", "shopify_polaris", "atlassian_design"],
+ label="Benchmarks",
+ )
+
+ gr.Markdown("""
+
+ π‘ Tip: Select 2-4 benchmarks for best results. More benchmarks = longer analysis time.
+
+ π¦ Results are cached for 24 hours to speed up subsequent analyses.
+
+ """)
+
+ # Analyze button
+ with gr.Row():
+ analyze_btn_v2 = gr.Button(
+ "π Run Analysis (New Architecture)",
+ variant="primary",
+ size="lg",
+ scale=2
+ )
+ analyze_btn_legacy = gr.Button(
+ "π€ Legacy Analysis",
+ variant="secondary",
+ size="lg",
+ scale=1
+ )
+
+ # =============================================================
+ # ANALYSIS LOG
+ # =============================================================
+ with gr.Accordion("π Analysis Log", open=True):
+ stage2_log = gr.Textbox(
+ label="Log",
+ lines=20,
+ interactive=False,
+ elem_classes=["log-container"]
+ )
+
+ # =============================================================
+ # SCORES DASHBOARD
+ # =============================================================
+ gr.Markdown("---")
+ gr.Markdown("## π Analysis Results")
+
+ scores_dashboard = gr.HTML(
+ value="Scores will appear after analysis...
",
+ label="Scores"
+ )
+
+ # =============================================================
+ # PRIORITY ACTIONS
+ # =============================================================
+ priority_actions_html = gr.HTML(
+ value="Priority actions will appear after analysis...
",
+ label="Priority Actions"
+ )
+
+ # =============================================================
+ # BENCHMARK COMPARISON
+ # =============================================================
+ gr.Markdown("---")
+ benchmark_comparison_md = gr.Markdown("*Benchmark comparison will appear after analysis*")
+
+ # =============================================================
+ # COLOR RECOMMENDATIONS
+ # =============================================================
+ gr.Markdown("---")
+ gr.Markdown("## π¨ Color Recommendations")
+ gr.Markdown("*Accept or reject AI-suggested color changes:*")
+
+ color_recommendations_table = gr.Dataframe(
+ headers=["Accept", "Role", "Current", "Issue", "Suggested", "New Contrast"],
+ datatype=["bool", "str", "str", "str", "str", "str"],
+ label="Color Recommendations",
+ interactive=True,
+ row_count=(0, "dynamic"),
+ )
+
+ # =============================================================
+ # TYPOGRAPHY SECTION
+ # =============================================================
+ gr.Markdown("---")
+ gr.Markdown("## π Typography")
+
+ with gr.Accordion("ποΈ Typography Visual Preview", open=True):
+ stage2_typography_preview = gr.HTML(
+ value="Typography preview will appear after analysis...
",
+ label="Typography Preview"
+ )
+
+ with gr.Row():
+ with gr.Column(scale=2):
+ gr.Markdown("### π₯οΈ Desktop (1440px)")
+ typography_desktop = gr.Dataframe(
+ headers=["Token", "Current", "Scale 1.2", "Scale 1.25 β", "Scale 1.333", "Keep"],
+ datatype=["str", "str", "str", "str", "str", "str"],
+ label="Desktop Typography",
+ interactive=False,
+ )
+
+ with gr.Column(scale=2):
+ gr.Markdown("### π± Mobile (375px)")
+ typography_mobile = gr.Dataframe(
+ headers=["Token", "Current", "Scale 1.2", "Scale 1.25 β", "Scale 1.333", "Keep"],
+ datatype=["str", "str", "str", "str", "str", "str"],
+ label="Mobile Typography",
+ interactive=False,
+ )
+
+ with gr.Row():
+ with gr.Column():
+ gr.Markdown("### Select Type Scale Option")
+ type_scale_radio = gr.Radio(
+ choices=["Keep Current", "Scale 1.2 (Minor Third)", "Scale 1.25 (Major Third) β", "Scale 1.333 (Perfect Fourth)"],
+ value="Scale 1.25 (Major Third) β",
+ label="Type Scale",
+ interactive=True,
+ )
+ gr.Markdown("*Font family will be preserved. Sizes rounded to even numbers.*")
+
+ # =============================================================
+ # COLORS SECTION - Base Colors + Ramps + LLM Recommendations
+ # =============================================================
+ gr.Markdown("---")
+ gr.Markdown("## π¨ Colors")
+
+ # LLM Recommendations Section (NEW)
+ with gr.Accordion("π€ LLM Color Recommendations", open=True):
+ gr.Markdown("""
+ *The LLMs analyzed your colors and made these suggestions. Accept or reject each one.*
+ """)
+
+ llm_color_recommendations = gr.HTML(
+ value="LLM recommendations will appear after analysis...
",
+ label="LLM Recommendations"
+ )
+
+ # Accept/Reject table for color recommendations
+ color_recommendations_table = gr.Dataframe(
+ headers=["Accept", "Role", "Current", "Issue", "Suggested", "Contrast"],
+ datatype=["bool", "str", "str", "str", "str", "str"],
+ label="Color Recommendations",
+ interactive=True,
+ col_count=(6, "fixed"),
+ )
+
+ # Visual Preview
+ with gr.Accordion("ποΈ Color Ramps Visual Preview (Semantic Groups)", open=True):
+ stage2_color_ramps_preview = gr.HTML(
+ value="Color ramps preview will appear after analysis...
",
+ label="Color Ramps Preview"
+ )
+
+ base_colors_display = gr.Markdown("*Base colors will appear after analysis*")
+
+ gr.Markdown("---")
+
+ color_ramps_display = gr.Markdown("*Color ramps will appear after analysis*")
+
+ color_ramps_checkbox = gr.Checkbox(
+ label="β Generate color ramps (keeps base colors, adds 50-950 shades)",
+ value=True,
+ )
+
+ # =============================================================
+ # SPACING SECTION
+ # =============================================================
+ gr.Markdown("---")
+ gr.Markdown("## π Spacing (Rule-Based)")
+
+ with gr.Row():
+ with gr.Column(scale=2):
+ spacing_comparison = gr.Dataframe(
+ headers=["Current", "8px Grid", "4px Grid"],
+ datatype=["str", "str", "str"],
+ label="Spacing Comparison",
+ interactive=False,
+ )
+
+ with gr.Column(scale=1):
+ spacing_radio = gr.Radio(
+ choices=["Keep Current", "8px Base Grid β", "4px Base Grid"],
+ value="8px Base Grid β",
+ label="Spacing System",
+ interactive=True,
+ )
+
+ # =============================================================
+ # RADIUS SECTION
+ # =============================================================
+ gr.Markdown("---")
+ gr.Markdown("## π Border Radius (Rule-Based)")
+
+ radius_display = gr.Markdown("*Radius tokens will appear after analysis*")
+
+ # =============================================================
+ # SHADOWS SECTION
+ # =============================================================
+ gr.Markdown("---")
+ gr.Markdown("## π«οΈ Shadows (Rule-Based)")
+
+ shadows_display = gr.Markdown("*Shadow tokens will appear after analysis*")
+
+ # =============================================================
+ # APPLY SECTION
+ # =============================================================
+ gr.Markdown("---")
+
+ with gr.Row():
+ apply_upgrades_btn = gr.Button("β¨ Apply Selected Upgrades", variant="primary", scale=2)
+ reset_btn = gr.Button("β©οΈ Reset to Original", variant="secondary", scale=1)
+
+ apply_status = gr.Markdown("")
+
+ # =================================================================
+ # STAGE 3: EXPORT
+ # =================================================================
+
+ with gr.Accordion("π¦ Stage 3: Export", open=False):
+ gr.Markdown("""
+ Export your design tokens to JSON (compatible with Figma Tokens Studio).
+
+ - **Stage 1 JSON**: Raw extracted tokens (as-is)
+ - **Final JSON**: Upgraded tokens with selected improvements
+ """)
+
+ with gr.Row():
+ export_stage1_btn = gr.Button("π₯ Export Stage 1 (As-Is)", variant="secondary")
+ export_final_btn = gr.Button("π₯ Export Final (Upgraded)", variant="primary")
+
+ export_output = gr.Code(label="Tokens JSON", language="json", lines=25)
+
+ export_stage1_btn.click(export_stage1_json, outputs=[export_output])
+ export_final_btn.click(export_tokens_json, outputs=[export_output])
+
+ # =================================================================
+ # EVENT HANDLERS
+ # =================================================================
+
+ # Store data for viewport toggle
+ desktop_data = gr.State({})
+ mobile_data = gr.State({})
+
+ # Discover pages
+ discover_btn.click(
+ fn=discover_pages,
+ inputs=[url_input],
+ outputs=[discover_status, log_output, pages_table],
+ ).then(
+ fn=lambda: (gr.update(visible=True), gr.update(visible=True)),
+ outputs=[pages_table, extract_btn],
+ )
+
+ # Extract tokens
+ extract_btn.click(
+ fn=extract_tokens,
+ inputs=[pages_table],
+ outputs=[extraction_status, log_output, desktop_data, mobile_data,
+ stage1_typography_preview, stage1_colors_preview,
+ stage1_semantic_preview,
+ stage1_spacing_preview, stage1_radius_preview, stage1_shadows_preview],
+ ).then(
+ fn=lambda d: (d.get("colors", []), d.get("typography", []), d.get("spacing", [])),
+ inputs=[desktop_data],
+ outputs=[colors_table, typography_table, spacing_table],
+ ).then(
+ fn=lambda: gr.update(open=True),
+ outputs=[stage1_accordion],
+ )
+
+ # Viewport toggle
+ viewport_toggle.change(
+ fn=switch_viewport,
+ inputs=[viewport_toggle],
+ outputs=[colors_table, typography_table, spacing_table],
+ )
+
+ # Stage 2: NEW Architecture Analyze
+ analyze_btn_v2.click(
+ fn=run_stage2_analysis_v2,
+ inputs=[benchmark_checkboxes],
+ outputs=[
+ stage2_status,
+ stage2_log,
+ benchmark_comparison_md,
+ scores_dashboard,
+ priority_actions_html,
+ color_recommendations_table,
+ typography_desktop,
+ typography_mobile,
+ stage2_typography_preview,
+ ],
+ )
+
+ # Stage 2: Legacy Analyze (keep for backward compatibility)
+ analyze_btn_legacy.click(
+ fn=run_stage2_analysis,
+ inputs=[],
+ outputs=[stage2_status, stage2_log, benchmark_comparison_md, scores_dashboard,
+ typography_desktop, typography_mobile, spacing_comparison,
+ base_colors_display, color_ramps_display, radius_display, shadows_display,
+ stage2_typography_preview, stage2_color_ramps_preview,
+ llm_color_recommendations, color_recommendations_table],
+ )
+
+ # Stage 2: Apply upgrades
+ apply_upgrades_btn.click(
+ fn=apply_selected_upgrades,
+ inputs=[type_scale_radio, spacing_radio, color_ramps_checkbox, color_recommendations_table],
+ outputs=[apply_status, stage2_log],
+ )
+
+ # Stage 1: Download JSON
+ download_stage1_btn.click(
+ fn=export_stage1_json,
+ outputs=[export_output],
+ )
+
+ # Proceed to Stage 2 button
+ proceed_stage2_btn.click(
+ fn=lambda: gr.update(open=True),
+ outputs=[stage2_accordion],
+ )
+
+ # =================================================================
+ # FOOTER
+ # =================================================================
+
+ gr.Markdown("""
+ ---
+ **Design System Extractor v2** | Built with Playwright + Firecrawl + LangGraph + HuggingFace
+
+ *A semi-automated co-pilot for design system recovery and modernization.*
+
+ **New Architecture:** Rule Engine (FREE) + Benchmark Research (Firecrawl) + LLM Agents
+ """)
+
+ return app
+
+
+# =============================================================================
+# MAIN
+# =============================================================================
+
+if __name__ == "__main__":
+ app = create_ui()
+ app.launch(server_name="0.0.0.0", server_port=7860)