|
|
""" |
|
|
Design System Extractor v2 — Main Application |
|
|
============================================== |
|
|
|
|
|
Flow: |
|
|
1. User enters URL |
|
|
2. Agent 1 discovers pages → User confirms |
|
|
3. Agent 1 extracts tokens (Desktop + Mobile) |
|
|
4. Agent 2 normalizes tokens |
|
|
5. Stage 1 UI: User reviews tokens (accept/reject, Desktop↔Mobile toggle) |
|
|
6. Agent 3 proposes upgrades |
|
|
7. Stage 2 UI: User selects options with live preview |
|
|
8. Agent 4 generates JSON |
|
|
9. Stage 3 UI: User exports |
|
|
""" |
|
|
|
|
|
import os |
|
|
import asyncio |
|
|
import json |
|
|
import gradio as gr |
|
|
from datetime import datetime |
|
|
from typing import Optional |
|
|
|
|
|
|
|
|
HF_TOKEN_FROM_ENV = os.getenv("HF_TOKEN", "") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class AppState: |
|
|
"""Global application state.""" |
|
|
def __init__(self): |
|
|
self.reset() |
|
|
|
|
|
def reset(self): |
|
|
self.discovered_pages = [] |
|
|
self.base_url = "" |
|
|
self.desktop_raw = None |
|
|
self.mobile_raw = None |
|
|
self.desktop_normalized = None |
|
|
self.mobile_normalized = None |
|
|
self.upgrade_recommendations = None |
|
|
self.selected_upgrades = {} |
|
|
self.logs = [] |
|
|
|
|
|
def log(self, message: str): |
|
|
timestamp = datetime.now().strftime("%H:%M:%S") |
|
|
self.logs.append(f"[{timestamp}] {message}") |
|
|
if len(self.logs) > 100: |
|
|
self.logs.pop(0) |
|
|
|
|
|
def get_logs(self) -> str: |
|
|
return "\n".join(self.logs) |
|
|
|
|
|
state = AppState() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_crawler(): |
|
|
import agents.crawler |
|
|
return agents.crawler |
|
|
|
|
|
def get_extractor(): |
|
|
import agents.extractor |
|
|
return agents.extractor |
|
|
|
|
|
def get_normalizer(): |
|
|
import agents.normalizer |
|
|
return agents.normalizer |
|
|
|
|
|
def get_advisor(): |
|
|
import agents.advisor |
|
|
return agents.advisor |
|
|
|
|
|
def get_schema(): |
|
|
import core.token_schema |
|
|
return core.token_schema |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
async def discover_pages(url: str, progress=gr.Progress()): |
|
|
"""Discover pages from URL.""" |
|
|
state.reset() |
|
|
|
|
|
if not url or not url.startswith(("http://", "https://")): |
|
|
return "❌ Please enter a valid URL", "", None |
|
|
|
|
|
state.log(f"🚀 Starting discovery for: {url}") |
|
|
progress(0.1, desc="🔍 Discovering pages...") |
|
|
|
|
|
try: |
|
|
crawler = get_crawler() |
|
|
discoverer = crawler.PageDiscoverer() |
|
|
|
|
|
pages = await discoverer.discover(url) |
|
|
|
|
|
state.discovered_pages = pages |
|
|
state.base_url = url |
|
|
|
|
|
state.log(f"✅ Found {len(pages)} pages") |
|
|
|
|
|
|
|
|
pages_data = [] |
|
|
for page in pages: |
|
|
pages_data.append([ |
|
|
True, |
|
|
page.url, |
|
|
page.title if page.title else "(No title)", |
|
|
page.page_type.value, |
|
|
"✓" if not page.error else f"⚠ {page.error}" |
|
|
]) |
|
|
|
|
|
progress(1.0, desc="✅ Discovery complete!") |
|
|
|
|
|
status = f"✅ Found {len(pages)} pages. Review and click 'Extract Tokens' to continue." |
|
|
|
|
|
return status, state.get_logs(), pages_data |
|
|
|
|
|
except Exception as e: |
|
|
import traceback |
|
|
state.log(f"❌ Error: {str(e)}") |
|
|
return f"❌ Error: {str(e)}", state.get_logs(), None |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
async def extract_tokens(pages_data, progress=gr.Progress()): |
|
|
"""Extract tokens from selected pages (both viewports).""" |
|
|
|
|
|
state.log(f"📥 Received pages_data type: {type(pages_data)}") |
|
|
|
|
|
if pages_data is None: |
|
|
return "❌ Please discover pages first", state.get_logs(), None, None |
|
|
|
|
|
|
|
|
selected_urls = [] |
|
|
|
|
|
try: |
|
|
|
|
|
if hasattr(pages_data, 'iterrows'): |
|
|
state.log(f"📥 DataFrame with {len(pages_data)} rows, columns: {list(pages_data.columns)}") |
|
|
|
|
|
for idx, row in pages_data.iterrows(): |
|
|
|
|
|
try: |
|
|
|
|
|
is_selected = row.get('Select', row.iloc[0] if len(row) > 0 else False) |
|
|
url = row.get('URL', row.iloc[1] if len(row) > 1 else '') |
|
|
except: |
|
|
|
|
|
is_selected = row.iloc[0] if len(row) > 0 else False |
|
|
url = row.iloc[1] if len(row) > 1 else '' |
|
|
|
|
|
if is_selected and url: |
|
|
selected_urls.append(url) |
|
|
|
|
|
|
|
|
elif isinstance(pages_data, dict): |
|
|
state.log(f"📥 Dict with keys: {list(pages_data.keys())}") |
|
|
data = pages_data.get('data', []) |
|
|
for row in data: |
|
|
if isinstance(row, (list, tuple)) and len(row) >= 2 and row[0]: |
|
|
selected_urls.append(row[1]) |
|
|
|
|
|
|
|
|
elif isinstance(pages_data, (list, tuple)): |
|
|
state.log(f"📥 List with {len(pages_data)} items") |
|
|
for row in pages_data: |
|
|
if isinstance(row, (list, tuple)) and len(row) >= 2 and row[0]: |
|
|
selected_urls.append(row[1]) |
|
|
|
|
|
except Exception as e: |
|
|
state.log(f"❌ Error parsing pages_data: {str(e)}") |
|
|
import traceback |
|
|
state.log(traceback.format_exc()) |
|
|
|
|
|
state.log(f"📋 Found {len(selected_urls)} selected URLs") |
|
|
|
|
|
|
|
|
if not selected_urls and state.discovered_pages: |
|
|
state.log("⚠️ No URLs from table, using all discovered pages") |
|
|
selected_urls = [p.url for p in state.discovered_pages if not p.error][:10] |
|
|
|
|
|
if not selected_urls: |
|
|
return "❌ No pages selected. Please select pages or rediscover.", state.get_logs(), None, None |
|
|
|
|
|
|
|
|
selected_urls = selected_urls[:10] |
|
|
|
|
|
state.log(f"📋 Extracting from {len(selected_urls)} pages:") |
|
|
for url in selected_urls[:3]: |
|
|
state.log(f" • {url}") |
|
|
if len(selected_urls) > 3: |
|
|
state.log(f" ... and {len(selected_urls) - 3} more") |
|
|
|
|
|
progress(0.05, desc="🚀 Starting extraction...") |
|
|
|
|
|
try: |
|
|
schema = get_schema() |
|
|
extractor_mod = get_extractor() |
|
|
normalizer_mod = get_normalizer() |
|
|
|
|
|
|
|
|
state.log("") |
|
|
state.log("=" * 60) |
|
|
state.log("🖥️ DESKTOP EXTRACTION (1440px)") |
|
|
state.log("=" * 60) |
|
|
state.log("") |
|
|
state.log("📡 Enhanced extraction from 7 sources:") |
|
|
state.log(" 1. DOM computed styles (getComputedStyle)") |
|
|
state.log(" 2. CSS variables (:root { --color: })") |
|
|
state.log(" 3. SVG colors (fill, stroke)") |
|
|
state.log(" 4. Inline styles (style='color:')") |
|
|
state.log(" 5. Stylesheet rules (CSS files)") |
|
|
state.log(" 6. External CSS files (fetch & parse)") |
|
|
state.log(" 7. Page content scan (brute-force)") |
|
|
state.log("") |
|
|
|
|
|
progress(0.1, desc="🖥️ Extracting desktop tokens...") |
|
|
|
|
|
desktop_extractor = extractor_mod.TokenExtractor(viewport=schema.Viewport.DESKTOP) |
|
|
|
|
|
def desktop_progress(p): |
|
|
progress(0.1 + (p * 0.35), desc=f"🖥️ Desktop... {int(p*100)}%") |
|
|
|
|
|
state.desktop_raw = await desktop_extractor.extract(selected_urls, progress_callback=desktop_progress) |
|
|
|
|
|
|
|
|
state.log("📊 EXTRACTION RESULTS:") |
|
|
state.log(f" Colors: {len(state.desktop_raw.colors)} unique") |
|
|
state.log(f" Typography: {len(state.desktop_raw.typography)} styles") |
|
|
state.log(f" Spacing: {len(state.desktop_raw.spacing)} values") |
|
|
state.log(f" Radius: {len(state.desktop_raw.radius)} values") |
|
|
state.log(f" Shadows: {len(state.desktop_raw.shadows)} values") |
|
|
|
|
|
|
|
|
if hasattr(desktop_extractor, 'fg_bg_pairs') and desktop_extractor.fg_bg_pairs: |
|
|
state.fg_bg_pairs = desktop_extractor.fg_bg_pairs |
|
|
state.log(f" FG/BG Pairs: {len(state.fg_bg_pairs)} unique pairs for AA checking") |
|
|
else: |
|
|
state.fg_bg_pairs = [] |
|
|
|
|
|
|
|
|
if hasattr(desktop_extractor, 'css_variables') and desktop_extractor.css_variables: |
|
|
state.log("") |
|
|
state.log(f"🎨 CSS Variables found: {len(desktop_extractor.css_variables)}") |
|
|
for var_name, var_value in list(desktop_extractor.css_variables.items())[:5]: |
|
|
state.log(f" {var_name}: {var_value}") |
|
|
if len(desktop_extractor.css_variables) > 5: |
|
|
state.log(f" ... and {len(desktop_extractor.css_variables) - 5} more") |
|
|
|
|
|
|
|
|
if desktop_extractor.warnings: |
|
|
state.log("") |
|
|
state.log("⚠️ Warnings:") |
|
|
for w in desktop_extractor.warnings[:3]: |
|
|
state.log(f" {w}") |
|
|
|
|
|
|
|
|
state.log("") |
|
|
state.log("🔄 Normalizing (deduping, naming)...") |
|
|
state.desktop_normalized = normalizer_mod.normalize_tokens(state.desktop_raw) |
|
|
state.log(f" ✅ Normalized: {len(state.desktop_normalized.colors)} colors, {len(state.desktop_normalized.typography)} typography, {len(state.desktop_normalized.spacing)} spacing") |
|
|
|
|
|
|
|
|
state.log("") |
|
|
state.log("=" * 60) |
|
|
state.log("📱 MOBILE EXTRACTION (375px)") |
|
|
state.log("=" * 60) |
|
|
state.log("") |
|
|
|
|
|
progress(0.5, desc="📱 Extracting mobile tokens...") |
|
|
|
|
|
mobile_extractor = extractor_mod.TokenExtractor(viewport=schema.Viewport.MOBILE) |
|
|
|
|
|
def mobile_progress(p): |
|
|
progress(0.5 + (p * 0.35), desc=f"📱 Mobile... {int(p*100)}%") |
|
|
|
|
|
state.mobile_raw = await mobile_extractor.extract(selected_urls, progress_callback=mobile_progress) |
|
|
|
|
|
|
|
|
state.log("📊 EXTRACTION RESULTS:") |
|
|
state.log(f" Colors: {len(state.mobile_raw.colors)} unique") |
|
|
state.log(f" Typography: {len(state.mobile_raw.typography)} styles") |
|
|
state.log(f" Spacing: {len(state.mobile_raw.spacing)} values") |
|
|
state.log(f" Radius: {len(state.mobile_raw.radius)} values") |
|
|
state.log(f" Shadows: {len(state.mobile_raw.shadows)} values") |
|
|
|
|
|
|
|
|
state.log("") |
|
|
state.log("🔄 Normalizing...") |
|
|
state.mobile_normalized = normalizer_mod.normalize_tokens(state.mobile_raw) |
|
|
state.log(f" ✅ Normalized: {len(state.mobile_normalized.colors)} colors, {len(state.mobile_normalized.typography)} typography, {len(state.mobile_normalized.spacing)} spacing") |
|
|
|
|
|
|
|
|
progress(0.88, desc="🔥 Firecrawl CSS analysis...") |
|
|
|
|
|
try: |
|
|
from agents.firecrawl_extractor import extract_css_colors |
|
|
|
|
|
|
|
|
base_url = selected_urls[0] if selected_urls else state.base_url |
|
|
|
|
|
|
|
|
firecrawl_result = await extract_css_colors( |
|
|
url=base_url, |
|
|
api_key=None, |
|
|
log_callback=state.log |
|
|
) |
|
|
|
|
|
|
|
|
firecrawl_colors = firecrawl_result.get("colors", {}) |
|
|
|
|
|
if firecrawl_colors: |
|
|
state.log("") |
|
|
state.log("🔀 Merging Firecrawl colors with Playwright extraction...") |
|
|
|
|
|
|
|
|
new_colors_count = 0 |
|
|
|
|
|
for hex_val, color_data in firecrawl_colors.items(): |
|
|
|
|
|
existing = False |
|
|
for name, existing_color in state.desktop_normalized.colors.items(): |
|
|
if existing_color.value.lower() == hex_val.lower(): |
|
|
existing = True |
|
|
|
|
|
existing_color.frequency += color_data.get("frequency", 1) |
|
|
if "firecrawl" not in existing_color.contexts: |
|
|
existing_color.contexts.append("firecrawl") |
|
|
break |
|
|
|
|
|
if not existing: |
|
|
|
|
|
from core.token_schema import ColorToken, TokenSource, Confidence |
|
|
|
|
|
new_token = ColorToken( |
|
|
value=hex_val, |
|
|
frequency=color_data.get("frequency", 1), |
|
|
contexts=["firecrawl"] + color_data.get("contexts", []), |
|
|
elements=["css-file"], |
|
|
css_properties=color_data.get("sources", []), |
|
|
contrast_white=color_data.get("contrast_white", 0), |
|
|
contrast_black=color_data.get("contrast_black", 0), |
|
|
source=TokenSource.DETECTED, |
|
|
confidence=Confidence.MEDIUM, |
|
|
) |
|
|
|
|
|
|
|
|
new_token.suggested_name = f"color.firecrawl.{len(state.desktop_normalized.colors)}" |
|
|
|
|
|
state.desktop_normalized.colors[hex_val] = new_token |
|
|
new_colors_count += 1 |
|
|
|
|
|
state.log(f" ✅ Added {new_colors_count} new colors from Firecrawl") |
|
|
state.log(f" 📊 Total colors now: {len(state.desktop_normalized.colors)}") |
|
|
|
|
|
except Exception as e: |
|
|
state.log(f" ⚠️ Firecrawl extraction skipped: {str(e)}") |
|
|
|
|
|
|
|
|
progress(0.92, desc="🧠 Semantic color analysis...") |
|
|
|
|
|
semantic_result = {} |
|
|
semantic_preview_html = "" |
|
|
|
|
|
try: |
|
|
from agents.semantic_analyzer import SemanticColorAnalyzer, generate_semantic_preview_html |
|
|
|
|
|
|
|
|
semantic_analyzer = SemanticColorAnalyzer(llm_provider=None) |
|
|
|
|
|
|
|
|
semantic_result = semantic_analyzer.analyze_sync( |
|
|
colors=state.desktop_normalized.colors, |
|
|
log_callback=state.log |
|
|
) |
|
|
|
|
|
|
|
|
state.semantic_analysis = semantic_result |
|
|
|
|
|
|
|
|
semantic_preview_html = generate_semantic_preview_html(semantic_result) |
|
|
|
|
|
except Exception as e: |
|
|
state.log(f" ⚠️ Semantic analysis skipped: {str(e)}") |
|
|
import traceback |
|
|
state.log(traceback.format_exc()) |
|
|
|
|
|
progress(0.95, desc="📊 Preparing results...") |
|
|
|
|
|
|
|
|
desktop_data = format_tokens_for_display(state.desktop_normalized) |
|
|
mobile_data = format_tokens_for_display(state.mobile_normalized) |
|
|
|
|
|
|
|
|
state.log("") |
|
|
state.log("🎨 Generating AS-IS visual previews...") |
|
|
|
|
|
from core.preview_generator import ( |
|
|
generate_typography_preview_html, |
|
|
generate_colors_asis_preview_html, |
|
|
generate_spacing_asis_preview_html, |
|
|
generate_radius_asis_preview_html, |
|
|
generate_shadows_asis_preview_html, |
|
|
) |
|
|
|
|
|
|
|
|
fonts = get_detected_fonts() |
|
|
primary_font = fonts.get("primary", "Open Sans") |
|
|
|
|
|
|
|
|
typo_dict = {} |
|
|
for name, t in state.desktop_normalized.typography.items(): |
|
|
typo_dict[name] = { |
|
|
"font_size": t.font_size, |
|
|
"font_weight": t.font_weight, |
|
|
"line_height": t.line_height or "1.5", |
|
|
"letter_spacing": "0", |
|
|
} |
|
|
|
|
|
|
|
|
color_dict = {} |
|
|
for name, c in state.desktop_normalized.colors.items(): |
|
|
color_dict[name] = { |
|
|
"value": c.value, |
|
|
"frequency": c.frequency, |
|
|
"contexts": c.contexts[:3] if c.contexts else [], |
|
|
"elements": c.elements[:3] if c.elements else [], |
|
|
"css_properties": c.css_properties[:3] if c.css_properties else [], |
|
|
"contrast_white": c.contrast_white, |
|
|
"contrast_black": getattr(c, 'contrast_black', 0), |
|
|
} |
|
|
|
|
|
|
|
|
spacing_dict = {} |
|
|
for name, s in state.desktop_normalized.spacing.items(): |
|
|
spacing_dict[name] = { |
|
|
"value": s.value, |
|
|
"value_px": s.value_px, |
|
|
} |
|
|
|
|
|
|
|
|
radius_dict = {} |
|
|
for name, r in state.desktop_normalized.radius.items(): |
|
|
radius_dict[name] = {"value": r.value} |
|
|
|
|
|
|
|
|
shadow_dict = {} |
|
|
for name, s in state.desktop_normalized.shadows.items(): |
|
|
shadow_dict[name] = {"value": s.value} |
|
|
|
|
|
|
|
|
typography_preview_html = generate_typography_preview_html( |
|
|
typography_tokens=typo_dict, |
|
|
font_family=primary_font, |
|
|
sample_text="The quick brown fox jumps over the lazy dog", |
|
|
) |
|
|
|
|
|
|
|
|
colors_asis_preview_html = generate_colors_asis_preview_html( |
|
|
color_tokens=color_dict, |
|
|
) |
|
|
|
|
|
|
|
|
spacing_asis_preview_html = generate_spacing_asis_preview_html( |
|
|
spacing_tokens=spacing_dict, |
|
|
) |
|
|
|
|
|
|
|
|
radius_asis_preview_html = generate_radius_asis_preview_html( |
|
|
radius_tokens=radius_dict, |
|
|
) |
|
|
|
|
|
|
|
|
shadows_asis_preview_html = generate_shadows_asis_preview_html( |
|
|
shadow_tokens=shadow_dict, |
|
|
) |
|
|
|
|
|
state.log(" ✅ Typography preview generated") |
|
|
state.log(" ✅ Colors AS-IS preview generated (no ramps)") |
|
|
state.log(" ✅ Semantic color analysis preview generated") |
|
|
state.log(" ✅ Spacing AS-IS preview generated") |
|
|
state.log(" ✅ Radius AS-IS preview generated") |
|
|
state.log(" ✅ Shadows AS-IS preview generated") |
|
|
|
|
|
|
|
|
brand_count = len(semantic_result.get("brand", {})) |
|
|
text_count = len(semantic_result.get("text", {})) |
|
|
bg_count = len(semantic_result.get("background", {})) |
|
|
|
|
|
state.log("") |
|
|
state.log("=" * 50) |
|
|
state.log("✅ EXTRACTION COMPLETE!") |
|
|
state.log(f" Enhanced extraction captured:") |
|
|
state.log(f" • {len(state.desktop_normalized.colors)} colors (DOM + CSS vars + SVG + inline)") |
|
|
state.log(f" • {len(state.desktop_normalized.typography)} typography styles") |
|
|
state.log(f" • {len(state.desktop_normalized.spacing)} spacing values") |
|
|
state.log(f" • {len(state.desktop_normalized.radius)} radius values") |
|
|
state.log(f" • {len(state.desktop_normalized.shadows)} shadow values") |
|
|
state.log(f" Semantic Analysis:") |
|
|
state.log(f" • {brand_count} brand colors identified") |
|
|
state.log(f" • {text_count} text colors identified") |
|
|
state.log(f" • {bg_count} background colors identified") |
|
|
state.log("=" * 50) |
|
|
|
|
|
progress(1.0, desc="✅ Complete!") |
|
|
|
|
|
status = f"""## ✅ Extraction Complete! |
|
|
|
|
|
| Viewport | Colors | Typography | Spacing | Radius | Shadows | |
|
|
|----------|--------|------------|---------|--------|---------| |
|
|
| Desktop | {len(state.desktop_normalized.colors)} | {len(state.desktop_normalized.typography)} | {len(state.desktop_normalized.spacing)} | {len(state.desktop_normalized.radius)} | {len(state.desktop_normalized.shadows)} | |
|
|
| Mobile | {len(state.mobile_normalized.colors)} | {len(state.mobile_normalized.typography)} | {len(state.mobile_normalized.spacing)} | {len(state.mobile_normalized.radius)} | {len(state.mobile_normalized.shadows)} | |
|
|
|
|
|
**Primary Font:** {primary_font} |
|
|
|
|
|
**Semantic Analysis:** {brand_count} brand, {text_count} text, {bg_count} background colors |
|
|
|
|
|
**Enhanced Extraction:** DOM + CSS Variables + SVG + Inline + Stylesheets + Firecrawl |
|
|
|
|
|
**Next:** Review the tokens below. Accept or reject, then proceed to Stage 2. |
|
|
""" |
|
|
|
|
|
|
|
|
return ( |
|
|
status, |
|
|
state.get_logs(), |
|
|
desktop_data, |
|
|
mobile_data, |
|
|
typography_preview_html, |
|
|
colors_asis_preview_html, |
|
|
semantic_preview_html, |
|
|
spacing_asis_preview_html, |
|
|
radius_asis_preview_html, |
|
|
shadows_asis_preview_html, |
|
|
) |
|
|
|
|
|
except Exception as e: |
|
|
import traceback |
|
|
state.log(f"❌ Error: {str(e)}") |
|
|
state.log(traceback.format_exc()) |
|
|
return f"❌ Error: {str(e)}", state.get_logs(), None, None, "", "", "", "", "", "" |
|
|
|
|
|
|
|
|
def format_tokens_for_display(normalized) -> dict: |
|
|
"""Format normalized tokens for Gradio display.""" |
|
|
if normalized is None: |
|
|
return {"colors": [], "typography": [], "spacing": []} |
|
|
|
|
|
|
|
|
colors = [] |
|
|
color_items = list(normalized.colors.values()) if isinstance(normalized.colors, dict) else normalized.colors |
|
|
for c in sorted(color_items, key=lambda x: -x.frequency)[:50]: |
|
|
colors.append([ |
|
|
True, |
|
|
c.value, |
|
|
c.suggested_name or "", |
|
|
c.frequency, |
|
|
c.confidence.value if c.confidence else "medium", |
|
|
f"{c.contrast_white:.1f}:1" if c.contrast_white else "N/A", |
|
|
"✓" if c.wcag_aa_small_text else "✗", |
|
|
", ".join(c.contexts[:2]) if c.contexts else "", |
|
|
]) |
|
|
|
|
|
|
|
|
typography = [] |
|
|
typo_items = list(normalized.typography.values()) if isinstance(normalized.typography, dict) else normalized.typography |
|
|
for t in sorted(typo_items, key=lambda x: -x.frequency)[:30]: |
|
|
typography.append([ |
|
|
True, |
|
|
t.font_family, |
|
|
t.font_size, |
|
|
str(t.font_weight), |
|
|
t.line_height or "", |
|
|
t.suggested_name or "", |
|
|
t.frequency, |
|
|
t.confidence.value if t.confidence else "medium", |
|
|
]) |
|
|
|
|
|
|
|
|
spacing = [] |
|
|
spacing_items = list(normalized.spacing.values()) if isinstance(normalized.spacing, dict) else normalized.spacing |
|
|
for s in sorted(spacing_items, key=lambda x: x.value_px)[:20]: |
|
|
spacing.append([ |
|
|
True, |
|
|
s.value, |
|
|
f"{s.value_px}px", |
|
|
s.suggested_name or "", |
|
|
s.frequency, |
|
|
"✓" if s.fits_base_8 else "", |
|
|
s.confidence.value if s.confidence else "medium", |
|
|
]) |
|
|
|
|
|
return { |
|
|
"colors": colors, |
|
|
"typography": typography, |
|
|
"spacing": spacing, |
|
|
} |
|
|
|
|
|
|
|
|
def switch_viewport(viewport: str): |
|
|
"""Switch between desktop and mobile view.""" |
|
|
if viewport == "Desktop (1440px)": |
|
|
data = format_tokens_for_display(state.desktop_normalized) |
|
|
else: |
|
|
data = format_tokens_for_display(state.mobile_normalized) |
|
|
|
|
|
return data["colors"], data["typography"], data["spacing"] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
async def run_stage2_analysis(competitors_str: str = "", progress=gr.Progress()): |
|
|
"""Run multi-agent analysis on extracted tokens.""" |
|
|
|
|
|
if not state.desktop_normalized or not state.mobile_normalized: |
|
|
return ("❌ Please complete Stage 1 first", "", "", "", None, None, None, "", "", "", "") |
|
|
|
|
|
|
|
|
default_competitors = [ |
|
|
"Material Design 3", |
|
|
"Apple Human Interface Guidelines", |
|
|
"Shopify Polaris", |
|
|
"IBM Carbon", |
|
|
"Atlassian Design System" |
|
|
] |
|
|
|
|
|
if competitors_str and competitors_str.strip(): |
|
|
competitors = [c.strip() for c in competitors_str.split(",") if c.strip()] |
|
|
else: |
|
|
competitors = default_competitors |
|
|
|
|
|
progress(0.05, desc="🤖 Initializing multi-agent analysis...") |
|
|
|
|
|
try: |
|
|
|
|
|
from agents.stage2_graph import run_stage2_multi_agent |
|
|
|
|
|
|
|
|
desktop_dict = normalized_to_dict(state.desktop_normalized) |
|
|
mobile_dict = normalized_to_dict(state.mobile_normalized) |
|
|
|
|
|
|
|
|
progress(0.1, desc="🚀 Running parallel LLM analysis...") |
|
|
|
|
|
result = await run_stage2_multi_agent( |
|
|
desktop_tokens=desktop_dict, |
|
|
mobile_tokens=mobile_dict, |
|
|
competitors=competitors, |
|
|
log_callback=state.log, |
|
|
semantic_analysis=getattr(state, 'semantic_analysis', None), |
|
|
) |
|
|
|
|
|
progress(0.8, desc="📊 Processing results...") |
|
|
|
|
|
|
|
|
final_recs = result.get("final_recommendations", {}) |
|
|
llm1_analysis = result.get("llm1_analysis", {}) |
|
|
llm2_analysis = result.get("llm2_analysis", {}) |
|
|
rule_calculations = result.get("rule_calculations", {}) |
|
|
cost_tracking = result.get("cost_tracking", {}) |
|
|
|
|
|
|
|
|
state.upgrade_recommendations = final_recs |
|
|
state.multi_agent_result = result |
|
|
|
|
|
|
|
|
fonts = get_detected_fonts() |
|
|
base_size = get_base_font_size() |
|
|
|
|
|
progress(0.9, desc="📊 Formatting results...") |
|
|
|
|
|
|
|
|
status = build_analysis_status(final_recs, cost_tracking, result.get("errors", [])) |
|
|
|
|
|
|
|
|
brand_md = format_multi_agent_comparison(llm1_analysis, llm2_analysis, final_recs) |
|
|
|
|
|
|
|
|
font_families_md = format_font_families_display(fonts) |
|
|
|
|
|
|
|
|
typography_desktop_data = format_typography_comparison_viewport( |
|
|
state.desktop_normalized, base_size, "desktop" |
|
|
) |
|
|
typography_mobile_data = format_typography_comparison_viewport( |
|
|
state.mobile_normalized, base_size, "mobile" |
|
|
) |
|
|
|
|
|
|
|
|
spacing_data = format_spacing_comparison_from_rules(rule_calculations) |
|
|
|
|
|
|
|
|
base_colors_md = format_base_colors() |
|
|
color_ramps_md = format_color_ramps_from_rules(rule_calculations) |
|
|
|
|
|
|
|
|
radius_md = format_radius_with_tokens() |
|
|
|
|
|
|
|
|
shadows_md = format_shadows_with_tokens() |
|
|
|
|
|
|
|
|
state.log("") |
|
|
state.log("🎨 Generating visual previews...") |
|
|
|
|
|
from core.preview_generator import ( |
|
|
generate_typography_preview_html, |
|
|
generate_color_ramps_preview_html, |
|
|
generate_semantic_color_ramps_html |
|
|
) |
|
|
|
|
|
primary_font = fonts.get("primary", "Open Sans") |
|
|
|
|
|
|
|
|
typo_dict = {} |
|
|
for name, t in state.desktop_normalized.typography.items(): |
|
|
typo_dict[name] = { |
|
|
"font_size": t.font_size, |
|
|
"font_weight": t.font_weight, |
|
|
"line_height": t.line_height or "1.5", |
|
|
"letter_spacing": "0", |
|
|
} |
|
|
|
|
|
|
|
|
color_dict = {} |
|
|
for name, c in state.desktop_normalized.colors.items(): |
|
|
color_dict[name] = { |
|
|
"value": c.value, |
|
|
"frequency": c.frequency, |
|
|
} |
|
|
|
|
|
typography_preview_html = generate_typography_preview_html( |
|
|
typography_tokens=typo_dict, |
|
|
font_family=primary_font, |
|
|
sample_text="The quick brown fox jumps over the lazy dog", |
|
|
) |
|
|
|
|
|
|
|
|
semantic_analysis = getattr(state, 'semantic_analysis', None) |
|
|
if semantic_analysis: |
|
|
|
|
|
llm_color_recs = {} |
|
|
if final_recs and isinstance(final_recs, dict): |
|
|
llm_color_recs = final_recs.get("color_recommendations", {}) |
|
|
|
|
|
aa_fixes = final_recs.get("accessibility_fixes", []) |
|
|
if aa_fixes: |
|
|
llm_color_recs["changes_made"] = [ |
|
|
f"AA fix suggested for {f.get('color', '?')}" |
|
|
for f in aa_fixes if isinstance(f, dict) |
|
|
][:5] |
|
|
|
|
|
color_ramps_preview_html = generate_semantic_color_ramps_html( |
|
|
semantic_analysis=semantic_analysis, |
|
|
color_tokens=color_dict, |
|
|
llm_recommendations={"color_recommendations": llm_color_recs} if llm_color_recs else None, |
|
|
) |
|
|
state.log(" ✅ Semantic color ramps preview generated (with LLM recommendations)") |
|
|
else: |
|
|
color_ramps_preview_html = generate_color_ramps_preview_html( |
|
|
color_tokens=color_dict, |
|
|
) |
|
|
state.log(" ✅ Color ramps preview generated (no semantic data)") |
|
|
|
|
|
state.log(" ✅ Typography preview generated") |
|
|
|
|
|
|
|
|
llm_recs_html = format_llm_color_recommendations_html(final_recs, semantic_analysis) |
|
|
llm_recs_table = format_llm_color_recommendations_table(final_recs, semantic_analysis) |
|
|
|
|
|
state.log(" ✅ LLM recommendations formatted") |
|
|
|
|
|
progress(1.0, desc="✅ Analysis complete!") |
|
|
|
|
|
return (status, state.get_logs(), brand_md, font_families_md, |
|
|
typography_desktop_data, typography_mobile_data, spacing_data, |
|
|
base_colors_md, color_ramps_md, radius_md, shadows_md, |
|
|
typography_preview_html, color_ramps_preview_html, |
|
|
llm_recs_html, llm_recs_table) |
|
|
|
|
|
except Exception as e: |
|
|
import traceback |
|
|
state.log(f"❌ Error: {str(e)}") |
|
|
state.log(traceback.format_exc()) |
|
|
return (f"❌ Analysis failed: {str(e)}", state.get_logs(), "", "", None, None, None, "", "", "", "", "", "", "", []) |
|
|
|
|
|
|
|
|
def normalized_to_dict(normalized) -> dict: |
|
|
"""Convert NormalizedTokens to dict for workflow.""" |
|
|
if not normalized: |
|
|
return {} |
|
|
|
|
|
result = { |
|
|
"colors": {}, |
|
|
"typography": {}, |
|
|
"spacing": {}, |
|
|
"radius": {}, |
|
|
"shadows": {}, |
|
|
} |
|
|
|
|
|
|
|
|
for name, c in normalized.colors.items(): |
|
|
result["colors"][name] = { |
|
|
"value": c.value, |
|
|
"frequency": c.frequency, |
|
|
"suggested_name": c.suggested_name, |
|
|
"contrast_white": c.contrast_white, |
|
|
"contrast_black": c.contrast_black, |
|
|
} |
|
|
|
|
|
|
|
|
for name, t in normalized.typography.items(): |
|
|
result["typography"][name] = { |
|
|
"font_family": t.font_family, |
|
|
"font_size": t.font_size, |
|
|
"font_weight": t.font_weight, |
|
|
"line_height": t.line_height, |
|
|
"frequency": t.frequency, |
|
|
} |
|
|
|
|
|
|
|
|
for name, s in normalized.spacing.items(): |
|
|
result["spacing"][name] = { |
|
|
"value": s.value, |
|
|
"value_px": s.value_px, |
|
|
"frequency": s.frequency, |
|
|
} |
|
|
|
|
|
|
|
|
for name, r in normalized.radius.items(): |
|
|
result["radius"][name] = { |
|
|
"value": r.value, |
|
|
"frequency": r.frequency, |
|
|
} |
|
|
|
|
|
|
|
|
for name, s in normalized.shadows.items(): |
|
|
result["shadows"][name] = { |
|
|
"value": s.value, |
|
|
"frequency": s.frequency, |
|
|
} |
|
|
|
|
|
return result |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
async def run_stage2_analysis_v2( |
|
|
selected_benchmarks: list[str] = None, |
|
|
progress=gr.Progress() |
|
|
): |
|
|
""" |
|
|
Run Stage 2 analysis with new architecture: |
|
|
- Layer 1: Rule Engine (FREE) |
|
|
- Layer 2: Benchmark Research (Firecrawl + Cache) |
|
|
- Layer 3: LLM Agents (Brand ID, Benchmark Advisor, Best Practices) |
|
|
- Layer 4: HEAD Synthesizer |
|
|
|
|
|
Includes comprehensive error handling for graceful degradation. |
|
|
""" |
|
|
|
|
|
|
|
|
if not state.desktop_normalized or not state.mobile_normalized: |
|
|
return create_stage2_error_response("❌ Please complete Stage 1 first") |
|
|
|
|
|
|
|
|
if not selected_benchmarks or len(selected_benchmarks) == 0: |
|
|
selected_benchmarks = [ |
|
|
"material_design_3", |
|
|
"shopify_polaris", |
|
|
"atlassian_design", |
|
|
] |
|
|
|
|
|
state.log("") |
|
|
state.log("═" * 60) |
|
|
state.log("🚀 STAGE 2: MULTI-AGENT ANALYSIS") |
|
|
state.log("═" * 60) |
|
|
state.log(f" Started: {datetime.now().strftime('%H:%M:%S')}") |
|
|
state.log(f" Benchmarks: {', '.join(selected_benchmarks)}") |
|
|
state.log("") |
|
|
|
|
|
|
|
|
rule_results = None |
|
|
benchmark_comparisons = [] |
|
|
brand_result = None |
|
|
benchmark_advice = None |
|
|
best_practices = None |
|
|
final_synthesis = None |
|
|
|
|
|
progress(0.05, desc="⚙️ Running Rule Engine...") |
|
|
|
|
|
try: |
|
|
|
|
|
|
|
|
|
|
|
try: |
|
|
from core.rule_engine import run_rule_engine |
|
|
|
|
|
|
|
|
desktop_dict = normalized_to_dict(state.desktop_normalized) |
|
|
mobile_dict = normalized_to_dict(state.mobile_normalized) |
|
|
|
|
|
|
|
|
if not desktop_dict.get("colors") and not desktop_dict.get("typography"): |
|
|
raise ValueError("No tokens extracted from Stage 1") |
|
|
|
|
|
|
|
|
rule_results = run_rule_engine( |
|
|
typography_tokens=desktop_dict.get("typography", {}), |
|
|
color_tokens=desktop_dict.get("colors", {}), |
|
|
spacing_tokens=desktop_dict.get("spacing", {}), |
|
|
radius_tokens=desktop_dict.get("radius", {}), |
|
|
shadow_tokens=desktop_dict.get("shadows", {}), |
|
|
log_callback=state.log, |
|
|
fg_bg_pairs=getattr(state, 'fg_bg_pairs', None), |
|
|
) |
|
|
|
|
|
state.rule_engine_results = rule_results |
|
|
state.log("") |
|
|
state.log(" ✅ Rule Engine: SUCCESS") |
|
|
|
|
|
except Exception as e: |
|
|
state.log(f" ❌ Rule Engine FAILED: {str(e)[:100]}") |
|
|
state.log(" └─ Cannot proceed without rule engine results") |
|
|
import traceback |
|
|
state.log(traceback.format_exc()[:500]) |
|
|
return create_stage2_error_response(f"❌ Rule Engine failed: {str(e)}") |
|
|
|
|
|
progress(0.20, desc="🔬 Researching benchmarks...") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
try: |
|
|
from agents.benchmark_researcher import BenchmarkResearcher, FALLBACK_BENCHMARKS, BenchmarkData |
|
|
|
|
|
|
|
|
firecrawl_client = None |
|
|
try: |
|
|
from agents.firecrawl_extractor import get_firecrawl_client |
|
|
firecrawl_client = get_firecrawl_client() |
|
|
state.log(" ├─ Firecrawl client: Available") |
|
|
except Exception as fc_err: |
|
|
state.log(f" ├─ Firecrawl client: Not available ({str(fc_err)[:30]})") |
|
|
state.log(" │ └─ Will use cached/fallback data") |
|
|
|
|
|
|
|
|
hf_client = None |
|
|
try: |
|
|
from core.hf_inference import get_inference_client |
|
|
hf_client = get_inference_client() |
|
|
state.log(" ├─ HF client: Available") |
|
|
except Exception as hf_err: |
|
|
state.log(f" ├─ HF client: Not available ({str(hf_err)[:30]})") |
|
|
|
|
|
researcher = BenchmarkResearcher( |
|
|
firecrawl_client=firecrawl_client, |
|
|
hf_client=hf_client, |
|
|
) |
|
|
|
|
|
|
|
|
try: |
|
|
benchmarks = await researcher.research_selected_benchmarks( |
|
|
selected_keys=selected_benchmarks, |
|
|
log_callback=state.log, |
|
|
) |
|
|
except Exception as research_err: |
|
|
state.log(f" ⚠️ Research failed, using fallback: {str(research_err)[:50]}") |
|
|
|
|
|
benchmarks = [] |
|
|
for key in selected_benchmarks: |
|
|
if key in FALLBACK_BENCHMARKS: |
|
|
data = FALLBACK_BENCHMARKS[key] |
|
|
benchmarks.append(BenchmarkData( |
|
|
key=key, |
|
|
name=key.replace("_", " ").title(), |
|
|
short_name=key.split("_")[0].title(), |
|
|
vendor="", |
|
|
icon="📦", |
|
|
typography=data.get("typography", {}), |
|
|
spacing=data.get("spacing", {}), |
|
|
colors=data.get("colors", {}), |
|
|
fetched_at=datetime.now().isoformat(), |
|
|
confidence="fallback", |
|
|
best_for=[], |
|
|
)) |
|
|
|
|
|
|
|
|
if benchmarks and rule_results: |
|
|
benchmark_comparisons = researcher.compare_to_benchmarks( |
|
|
your_ratio=rule_results.typography.detected_ratio, |
|
|
your_base_size=int(rule_results.typography.base_size) if rule_results.typography.sizes_px else 16, |
|
|
your_spacing_grid=rule_results.spacing.detected_base, |
|
|
benchmarks=benchmarks, |
|
|
log_callback=state.log, |
|
|
) |
|
|
state.benchmark_comparisons = benchmark_comparisons |
|
|
state.log("") |
|
|
state.log(f" ✅ Benchmark Research: SUCCESS ({len(benchmarks)} systems)") |
|
|
else: |
|
|
state.log(" ⚠️ No benchmarks available for comparison") |
|
|
|
|
|
except Exception as e: |
|
|
state.log(f" ⚠️ Benchmark Research FAILED: {str(e)[:100]}") |
|
|
state.log(" └─ Continuing without benchmark comparison...") |
|
|
benchmark_comparisons = [] |
|
|
|
|
|
progress(0.40, desc="🤖 Running LLM Agents...") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
try: |
|
|
from agents.llm_agents import ( |
|
|
BrandIdentifierAgent, |
|
|
BenchmarkAdvisorAgent, |
|
|
BestPracticesValidatorAgent, |
|
|
BrandIdentification, |
|
|
BenchmarkAdvice, |
|
|
BestPracticesResult, |
|
|
) |
|
|
|
|
|
state.log("") |
|
|
state.log("═" * 60) |
|
|
state.log("🤖 LAYER 3: LLM ANALYSIS") |
|
|
state.log("═" * 60) |
|
|
|
|
|
|
|
|
if not hf_client: |
|
|
try: |
|
|
from core.hf_inference import get_inference_client |
|
|
hf_client = get_inference_client() |
|
|
except Exception: |
|
|
state.log(" ⚠️ HF client not available - skipping LLM agents") |
|
|
hf_client = None |
|
|
|
|
|
if hf_client: |
|
|
|
|
|
brand_agent = BrandIdentifierAgent(hf_client) |
|
|
benchmark_agent = BenchmarkAdvisorAgent(hf_client) |
|
|
best_practices_agent = BestPracticesValidatorAgent(hf_client) |
|
|
|
|
|
|
|
|
semantic_analysis = getattr(state, 'semantic_analysis', {}) |
|
|
desktop_dict = normalized_to_dict(state.desktop_normalized) |
|
|
|
|
|
|
|
|
|
|
|
try: |
|
|
brand_result = await brand_agent.analyze( |
|
|
color_tokens=desktop_dict.get("colors", {}), |
|
|
semantic_analysis=semantic_analysis, |
|
|
log_callback=state.log, |
|
|
) |
|
|
|
|
|
if brand_result: |
|
|
state.log(f" ├─ Brand Primary: {brand_result.primary_color or 'N/A'} ({brand_result.confidence or 'N/A'} confidence)") |
|
|
state.log(f" ├─ Brand Secondary: {brand_result.secondary_color or 'N/A'}") |
|
|
state.log(f" ├─ Palette Strategy: {brand_result.palette_strategy or 'N/A'}") |
|
|
state.log(f" └─ Cohesion Score: {brand_result.cohesion_score or 'N/A'}/10") |
|
|
except Exception as e: |
|
|
state.log(f" ⚠️ Brand Identifier failed: {str(e)[:120]}") |
|
|
brand_result = BrandIdentification() |
|
|
|
|
|
|
|
|
if benchmark_comparisons: |
|
|
try: |
|
|
benchmark_advice = await benchmark_agent.analyze( |
|
|
user_ratio=rule_results.typography.detected_ratio, |
|
|
user_base=int(rule_results.typography.base_size) if rule_results.typography.sizes_px else 16, |
|
|
user_spacing=rule_results.spacing.detected_base, |
|
|
benchmark_comparisons=benchmark_comparisons, |
|
|
log_callback=state.log, |
|
|
) |
|
|
|
|
|
if benchmark_advice: |
|
|
state.log(f" ├─ Recommended: {benchmark_advice.recommended_system or 'N/A'}") |
|
|
changes = getattr(benchmark_advice, 'changes_needed', []) or [] |
|
|
state.log(f" ├─ Changes Needed: {len(changes)}") |
|
|
if changes: |
|
|
state.log(f" └─ Key Change: {changes[0].get('what', 'N/A') if isinstance(changes[0], dict) else changes[0]}") |
|
|
except Exception as e: |
|
|
state.log(f" ⚠️ Benchmark Advisor failed: {str(e)[:120]}") |
|
|
benchmark_advice = BenchmarkAdvice() |
|
|
else: |
|
|
benchmark_advice = BenchmarkAdvice() |
|
|
|
|
|
|
|
|
try: |
|
|
best_practices = await best_practices_agent.analyze( |
|
|
rule_engine_results=rule_results, |
|
|
log_callback=state.log, |
|
|
) |
|
|
|
|
|
if best_practices: |
|
|
checks = getattr(best_practices, 'checks', []) or [] |
|
|
passing = sum(1 for c in checks if c.get('pass', False)) if checks else 0 |
|
|
failing = len(checks) - passing if checks else 0 |
|
|
state.log(f" ├─ Overall Score: {best_practices.overall_score or 'N/A'}/100") |
|
|
state.log(f" ├─ Passing: {passing} | Failing: {failing}") |
|
|
if checks: |
|
|
top_fail = next((c for c in checks if not c.get('pass', True)), None) |
|
|
if top_fail: |
|
|
state.log(f" └─ Top Fix: {top_fail.get('fix', top_fail.get('name', 'N/A'))[:60]}") |
|
|
except Exception as e: |
|
|
state.log(f" ⚠️ Best Practices Validator failed: {str(e)[:120]}") |
|
|
best_practices = BestPracticesResult(overall_score=rule_results.consistency_score) |
|
|
else: |
|
|
|
|
|
state.log(" └─ Using default values (no LLM)") |
|
|
brand_result = BrandIdentification() |
|
|
benchmark_advice = BenchmarkAdvice() |
|
|
best_practices = BestPracticesResult(overall_score=rule_results.consistency_score) |
|
|
|
|
|
except Exception as e: |
|
|
state.log(f" ⚠️ LLM Agents FAILED: {str(e)[:100]}") |
|
|
brand_result = BrandIdentification() if not brand_result else brand_result |
|
|
benchmark_advice = BenchmarkAdvice() if not benchmark_advice else benchmark_advice |
|
|
best_practices = BestPracticesResult(overall_score=rule_results.consistency_score if rule_results else 50) |
|
|
|
|
|
progress(0.70, desc="🧠 Synthesizing results...") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
try: |
|
|
from agents.llm_agents import HeadSynthesizerAgent, HeadSynthesis |
|
|
|
|
|
if hf_client and brand_result and benchmark_advice and best_practices: |
|
|
head_agent = HeadSynthesizerAgent(hf_client) |
|
|
|
|
|
try: |
|
|
final_synthesis = await head_agent.synthesize( |
|
|
rule_engine_results=rule_results, |
|
|
benchmark_comparisons=benchmark_comparisons, |
|
|
brand_identification=brand_result, |
|
|
benchmark_advice=benchmark_advice, |
|
|
best_practices=best_practices, |
|
|
log_callback=state.log, |
|
|
) |
|
|
if final_synthesis: |
|
|
state.log("") |
|
|
state.log(f" ✅ HEAD Synthesizer: COMPLETE") |
|
|
state.log(f" ├─ Scores: {final_synthesis.scores}") |
|
|
if final_synthesis.executive_summary: |
|
|
state.log(f" ├─ Summary: {final_synthesis.executive_summary[:100]}...") |
|
|
color_recs = getattr(final_synthesis, 'color_recommendations', {}) |
|
|
if color_recs: |
|
|
state.log(f" ├─ Color Recommendations: {len(color_recs)} suggested changes") |
|
|
if final_synthesis.top_3_actions: |
|
|
state.log(f" └─ Top Actions: {len(final_synthesis.top_3_actions)} priorities") |
|
|
except Exception as e: |
|
|
state.log(f" ⚠️ HEAD Synthesizer failed: {str(e)[:120]}") |
|
|
import traceback |
|
|
state.log(f" └─ {traceback.format_exc()[:200]}") |
|
|
final_synthesis = None |
|
|
|
|
|
|
|
|
if not final_synthesis: |
|
|
state.log(" └─ Creating fallback synthesis...") |
|
|
final_synthesis = create_fallback_synthesis( |
|
|
rule_results, benchmark_comparisons, brand_result, best_practices |
|
|
) |
|
|
|
|
|
state.final_synthesis = final_synthesis |
|
|
|
|
|
except Exception as e: |
|
|
state.log(f" ⚠️ Synthesis FAILED: {str(e)[:100]}") |
|
|
final_synthesis = create_fallback_synthesis( |
|
|
rule_results, benchmark_comparisons, brand_result, best_practices |
|
|
) |
|
|
state.final_synthesis = final_synthesis |
|
|
|
|
|
progress(0.85, desc="📊 Formatting results...") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
try: |
|
|
|
|
|
status_md = format_stage2_status_v2( |
|
|
rule_results=rule_results, |
|
|
final_synthesis=final_synthesis, |
|
|
best_practices=best_practices, |
|
|
) |
|
|
|
|
|
|
|
|
benchmark_md = format_benchmark_comparison_v2( |
|
|
benchmark_comparisons=benchmark_comparisons, |
|
|
benchmark_advice=benchmark_advice, |
|
|
) |
|
|
|
|
|
|
|
|
scores_html = format_scores_dashboard_v2( |
|
|
rule_results=rule_results, |
|
|
final_synthesis=final_synthesis, |
|
|
best_practices=best_practices, |
|
|
) |
|
|
|
|
|
|
|
|
actions_html = format_priority_actions_v2( |
|
|
rule_results=rule_results, |
|
|
final_synthesis=final_synthesis, |
|
|
best_practices=best_practices, |
|
|
) |
|
|
|
|
|
|
|
|
color_recs_table = format_color_recommendations_table_v2( |
|
|
rule_results=rule_results, |
|
|
brand_result=brand_result, |
|
|
final_synthesis=final_synthesis, |
|
|
) |
|
|
|
|
|
|
|
|
fonts = get_detected_fonts() |
|
|
base_size = get_base_font_size() |
|
|
|
|
|
typography_desktop_data = format_typography_comparison_viewport( |
|
|
state.desktop_normalized, base_size, "desktop" |
|
|
) |
|
|
typography_mobile_data = format_typography_comparison_viewport( |
|
|
state.mobile_normalized, base_size, "mobile" |
|
|
) |
|
|
|
|
|
|
|
|
typography_preview_html = "" |
|
|
color_ramps_preview_html = "" |
|
|
llm_recs_html = "" |
|
|
|
|
|
try: |
|
|
from core.preview_generator import ( |
|
|
generate_typography_preview_html, |
|
|
generate_semantic_color_ramps_html, |
|
|
generate_color_ramps_preview_html, |
|
|
) |
|
|
|
|
|
primary_font = fonts.get("primary", "Open Sans") |
|
|
desktop_typo_dict = { |
|
|
name: { |
|
|
"font_size": t.font_size, |
|
|
"font_weight": t.font_weight, |
|
|
"line_height": t.line_height, |
|
|
} |
|
|
for name, t in state.desktop_normalized.typography.items() |
|
|
} |
|
|
typography_preview_html = generate_typography_preview_html(desktop_typo_dict, primary_font) |
|
|
|
|
|
|
|
|
semantic_analysis = getattr(state, 'semantic_analysis', {}) |
|
|
desktop_dict_for_colors = normalized_to_dict(state.desktop_normalized) |
|
|
|
|
|
if semantic_analysis: |
|
|
color_ramps_preview_html = generate_semantic_color_ramps_html( |
|
|
semantic_analysis=semantic_analysis, |
|
|
color_tokens=desktop_dict_for_colors.get("colors", {}), |
|
|
) |
|
|
else: |
|
|
color_ramps_preview_html = generate_color_ramps_preview_html( |
|
|
color_tokens=desktop_dict_for_colors.get("colors", {}), |
|
|
) |
|
|
|
|
|
state.log(" ✅ Color ramps preview generated") |
|
|
|
|
|
except Exception as preview_err: |
|
|
state.log(f" ⚠️ Preview generation failed: {str(preview_err)[:80]}") |
|
|
typography_preview_html = typography_preview_html or "<div class='placeholder-msg'>Preview unavailable</div>" |
|
|
color_ramps_preview_html = "<div class='placeholder-msg'>Color ramps preview unavailable</div>" |
|
|
|
|
|
|
|
|
try: |
|
|
|
|
|
synth_recs = {} |
|
|
if final_synthesis: |
|
|
|
|
|
color_recs_dict = {} |
|
|
for rec in (final_synthesis.color_recommendations or []): |
|
|
if isinstance(rec, dict) and rec.get("role"): |
|
|
color_recs_dict[rec["role"]] = rec |
|
|
synth_recs["color_recommendations"] = color_recs_dict |
|
|
|
|
|
|
|
|
aa_fixes = [] |
|
|
if rule_results and rule_results.accessibility: |
|
|
for a in rule_results.accessibility: |
|
|
if not a.passes_aa_normal: |
|
|
aa_fixes.append(a.to_dict() if hasattr(a, 'to_dict') else {"color": str(a)}) |
|
|
synth_recs["accessibility_fixes"] = aa_fixes |
|
|
|
|
|
llm_recs_html = format_llm_color_recommendations_html( |
|
|
final_recs=synth_recs, |
|
|
semantic_analysis=getattr(state, 'semantic_analysis', {}), |
|
|
) |
|
|
except Exception as recs_err: |
|
|
state.log(f" ⚠️ LLM recs HTML failed: {str(recs_err)[:120]}") |
|
|
import traceback |
|
|
state.log(f" └─ {traceback.format_exc()[:200]}") |
|
|
llm_recs_html = "<div class='placeholder-msg'>LLM recommendations unavailable</div>" |
|
|
|
|
|
|
|
|
aa_failures_list = [] |
|
|
if rule_results and rule_results.accessibility: |
|
|
aa_failures_list = [ |
|
|
a.to_dict() for a in rule_results.accessibility |
|
|
if not a.passes_aa_normal |
|
|
] |
|
|
state.upgrade_recommendations = { |
|
|
"color_recommendations": (final_synthesis.color_recommendations if final_synthesis else []), |
|
|
"accessibility_fixes": aa_failures_list, |
|
|
"scores": (final_synthesis.scores if final_synthesis else {}), |
|
|
"top_3_actions": (final_synthesis.top_3_actions if final_synthesis else []), |
|
|
} |
|
|
|
|
|
except Exception as format_err: |
|
|
state.log(f" ⚠️ Formatting failed: {str(format_err)[:100]}") |
|
|
import traceback |
|
|
state.log(traceback.format_exc()[:500]) |
|
|
|
|
|
return ( |
|
|
f"⚠️ Analysis completed with formatting errors: {str(format_err)[:50]}", |
|
|
state.get_logs(), |
|
|
"*Benchmark comparison unavailable*", |
|
|
"<div class='placeholder-msg'>Scores unavailable</div>", |
|
|
"<div class='placeholder-msg'>Actions unavailable</div>", |
|
|
[], |
|
|
None, |
|
|
None, |
|
|
"<div class='placeholder-msg'>Typography preview unavailable</div>", |
|
|
"<div class='placeholder-msg'>Color ramps preview unavailable</div>", |
|
|
"<div class='placeholder-msg'>LLM recommendations unavailable</div>", |
|
|
) |
|
|
|
|
|
progress(0.95, desc="✅ Complete!") |
|
|
|
|
|
|
|
|
state.log("") |
|
|
state.log("═" * 60) |
|
|
state.log("📊 FINAL RESULTS") |
|
|
state.log("═" * 60) |
|
|
state.log("") |
|
|
overall_score = final_synthesis.scores.get('overall', rule_results.consistency_score) if final_synthesis else rule_results.consistency_score |
|
|
state.log(f" 🎯 OVERALL SCORE: {overall_score}/100") |
|
|
if final_synthesis and final_synthesis.scores: |
|
|
state.log(f" ├─ Accessibility: {final_synthesis.scores.get('accessibility', '?')}/100") |
|
|
state.log(f" ├─ Consistency: {final_synthesis.scores.get('consistency', '?')}/100") |
|
|
state.log(f" └─ Organization: {final_synthesis.scores.get('organization', '?')}/100") |
|
|
state.log("") |
|
|
if benchmark_comparisons: |
|
|
state.log(f" 🏆 Closest Benchmark: {benchmark_comparisons[0].benchmark.name if benchmark_comparisons else 'N/A'}") |
|
|
state.log("") |
|
|
state.log(" 🎯 TOP 3 ACTIONS:") |
|
|
if final_synthesis and final_synthesis.top_3_actions: |
|
|
for i, action in enumerate(final_synthesis.top_3_actions[:3]): |
|
|
impact = action.get('impact', 'medium') |
|
|
icon = "🔴" if impact == "high" else "🟡" if impact == "medium" else "🟢" |
|
|
state.log(f" │ {i+1}. {icon} {action.get('action', 'N/A')}") |
|
|
else: |
|
|
state.log(f" │ 1. 🔴 Fix {rule_results.aa_failures} AA compliance failures") |
|
|
state.log("") |
|
|
state.log("═" * 60) |
|
|
state.log(f" 💰 TOTAL COST: ~$0.003") |
|
|
state.log(f" ⏱️ COMPLETED: {datetime.now().strftime('%H:%M:%S')}") |
|
|
state.log("═" * 60) |
|
|
|
|
|
return ( |
|
|
status_md, |
|
|
state.get_logs(), |
|
|
benchmark_md, |
|
|
scores_html, |
|
|
actions_html, |
|
|
color_recs_table, |
|
|
typography_desktop_data, |
|
|
typography_mobile_data, |
|
|
typography_preview_html, |
|
|
color_ramps_preview_html, |
|
|
llm_recs_html, |
|
|
) |
|
|
|
|
|
except Exception as e: |
|
|
import traceback |
|
|
state.log(f"❌ Critical Error: {str(e)}") |
|
|
state.log(traceback.format_exc()) |
|
|
return create_stage2_error_response(f"❌ Analysis failed: {str(e)}") |
|
|
|
|
|
|
|
|
def create_fallback_synthesis(rule_results, benchmark_comparisons, brand_result, best_practices): |
|
|
"""Create a fallback synthesis when LLM synthesis fails.""" |
|
|
from agents.llm_agents import HeadSynthesis |
|
|
|
|
|
|
|
|
overall = rule_results.consistency_score if rule_results else 50 |
|
|
accessibility = max(0, 100 - (rule_results.aa_failures * 10)) if rule_results else 50 |
|
|
|
|
|
|
|
|
actions = [] |
|
|
if rule_results and rule_results.aa_failures > 0: |
|
|
actions.append({ |
|
|
"action": f"Fix {rule_results.aa_failures} colors failing AA compliance", |
|
|
"impact": "high", |
|
|
"effort": "30 min", |
|
|
}) |
|
|
if rule_results and not rule_results.typography.is_consistent: |
|
|
actions.append({ |
|
|
"action": f"Align type scale to {rule_results.typography.recommendation} ({rule_results.typography.recommendation_name})", |
|
|
"impact": "medium", |
|
|
"effort": "1 hour", |
|
|
}) |
|
|
if rule_results and rule_results.color_stats.unique_count > 30: |
|
|
actions.append({ |
|
|
"action": f"Consolidate {rule_results.color_stats.unique_count} colors to ~15 semantic colors", |
|
|
"impact": "medium", |
|
|
"effort": "2 hours", |
|
|
}) |
|
|
|
|
|
return HeadSynthesis( |
|
|
executive_summary=f"Your design system scores {overall}/100. Analysis completed with fallback synthesis.", |
|
|
scores={ |
|
|
"overall": overall, |
|
|
"accessibility": accessibility, |
|
|
"consistency": overall, |
|
|
"organization": 50, |
|
|
}, |
|
|
benchmark_fit={ |
|
|
"closest": benchmark_comparisons[0].benchmark.name if benchmark_comparisons else "Unknown", |
|
|
"similarity": f"{benchmark_comparisons[0].overall_match_pct:.0f}%" if benchmark_comparisons else "N/A", |
|
|
}, |
|
|
brand_analysis={ |
|
|
"primary": brand_result.brand_primary.get("color", "Unknown") if brand_result else "Unknown", |
|
|
"cohesion": brand_result.cohesion_score if brand_result else 5, |
|
|
}, |
|
|
top_3_actions=actions[:3], |
|
|
color_recommendations=[], |
|
|
type_scale_recommendation={ |
|
|
"current_ratio": rule_results.typography.detected_ratio if rule_results else 1.0, |
|
|
"recommended_ratio": rule_results.typography.recommendation if rule_results else 1.25, |
|
|
}, |
|
|
spacing_recommendation={ |
|
|
"current": f"{rule_results.spacing.detected_base}px" if rule_results else "Unknown", |
|
|
"recommended": f"{rule_results.spacing.recommendation}px" if rule_results else "8px", |
|
|
}, |
|
|
) |
|
|
|
|
|
|
|
|
def create_stage2_error_response(error_msg: str): |
|
|
"""Create error response tuple for Stage 2 (must match 11 outputs).""" |
|
|
return ( |
|
|
error_msg, |
|
|
state.get_logs(), |
|
|
"", |
|
|
f"<div class='placeholder-msg'>{error_msg}</div>", |
|
|
"", |
|
|
[], |
|
|
None, |
|
|
None, |
|
|
"", |
|
|
"", |
|
|
"", |
|
|
) |
|
|
|
|
|
|
|
|
def format_stage2_status_v2(rule_results, final_synthesis, best_practices) -> str: |
|
|
"""Format Stage 2 status with new architecture results.""" |
|
|
|
|
|
lines = [] |
|
|
lines.append("## ✅ Analysis Complete!") |
|
|
lines.append("") |
|
|
|
|
|
|
|
|
overall = final_synthesis.scores.get('overall', rule_results.consistency_score) |
|
|
lines.append(f"### 🎯 Overall Score: {overall}/100") |
|
|
lines.append("") |
|
|
|
|
|
|
|
|
if final_synthesis.executive_summary: |
|
|
lines.append(f"*{final_synthesis.executive_summary}*") |
|
|
lines.append("") |
|
|
|
|
|
|
|
|
lines.append("### 📊 Quick Stats") |
|
|
lines.append(f"- **AA Failures:** {rule_results.aa_failures}") |
|
|
lines.append(f"- **Type Scale:** {rule_results.typography.detected_ratio:.3f} ({rule_results.typography.scale_name})") |
|
|
lines.append(f"- **Spacing Grid:** {rule_results.spacing.detected_base}px ({rule_results.spacing.alignment_percentage:.0f}% aligned)") |
|
|
lines.append(f"- **Unique Colors:** {rule_results.color_stats.unique_count}") |
|
|
lines.append("") |
|
|
|
|
|
|
|
|
lines.append("### 💰 Cost") |
|
|
lines.append("**Total:** ~$0.003 (Rule Engine: $0 + LLM: ~$0.003)") |
|
|
|
|
|
return "\n".join(lines) |
|
|
|
|
|
|
|
|
def format_benchmark_comparison_v2(benchmark_comparisons, benchmark_advice) -> str: |
|
|
"""Format benchmark comparison results.""" |
|
|
|
|
|
if not benchmark_comparisons: |
|
|
return "*No benchmark comparison available*" |
|
|
|
|
|
lines = [] |
|
|
lines.append("## 📊 Benchmark Comparison") |
|
|
lines.append("") |
|
|
|
|
|
|
|
|
if benchmark_advice and benchmark_advice.recommended_benchmark_name: |
|
|
lines.append(f"### 🏆 Recommended: {benchmark_advice.recommended_benchmark_name}") |
|
|
if benchmark_advice.reasoning: |
|
|
lines.append(f"*{benchmark_advice.reasoning[:200]}*") |
|
|
lines.append("") |
|
|
|
|
|
|
|
|
lines.append("### 📈 Similarity Ranking") |
|
|
lines.append("") |
|
|
lines.append("| Rank | Design System | Match | Type Ratio | Base | Grid |") |
|
|
lines.append("|------|---------------|-------|------------|------|------|") |
|
|
|
|
|
medals = ["🥇", "🥈", "🥉"] |
|
|
for i, c in enumerate(benchmark_comparisons[:5]): |
|
|
medal = medals[i] if i < 3 else str(i+1) |
|
|
b = c.benchmark |
|
|
lines.append( |
|
|
f"| {medal} | {b.icon} {b.short_name} | {c.overall_match_pct:.0f}% | " |
|
|
f"{b.typography.get('scale_ratio', '?')} | {b.typography.get('base_size', '?')}px | " |
|
|
f"{b.spacing.get('base', '?')}px |" |
|
|
) |
|
|
|
|
|
lines.append("") |
|
|
|
|
|
|
|
|
if benchmark_advice and benchmark_advice.alignment_changes: |
|
|
lines.append("### 🔧 Changes to Align") |
|
|
for change in benchmark_advice.alignment_changes[:3]: |
|
|
lines.append(f"- **{change.get('change', '?')}**: {change.get('from', '?')} → {change.get('to', '?')} (effort: {change.get('effort', '?')})") |
|
|
|
|
|
return "\n".join(lines) |
|
|
|
|
|
|
|
|
def format_scores_dashboard_v2(rule_results, final_synthesis, best_practices) -> str: |
|
|
"""Format scores dashboard HTML.""" |
|
|
|
|
|
overall = final_synthesis.scores.get('overall', rule_results.consistency_score) |
|
|
accessibility = final_synthesis.scores.get('accessibility', 100 - (rule_results.aa_failures * 5)) |
|
|
consistency = final_synthesis.scores.get('consistency', rule_results.consistency_score) |
|
|
organization = final_synthesis.scores.get('organization', 50) |
|
|
|
|
|
def score_color(score): |
|
|
if score >= 80: |
|
|
return "#10b981" |
|
|
elif score >= 60: |
|
|
return "#f59e0b" |
|
|
else: |
|
|
return "#ef4444" |
|
|
|
|
|
html = f""" |
|
|
<style> |
|
|
.scores-grid {{ display: grid; grid-template-columns: repeat(4, 1fr); gap: 16px; margin: 20px 0; }} |
|
|
.score-card {{ border-radius: 12px; padding: 20px; text-align: center; }} |
|
|
.score-card-secondary {{ background: #f8fafc; border: 1px solid #e2e8f0; }} |
|
|
.score-card .score-label {{ font-size: 12px; color: #64748b; margin-top: 4px; }} |
|
|
.dark .score-card-secondary {{ background: #1e293b !important; border-color: #475569 !important; }} |
|
|
.dark .score-card .score-label {{ color: #94a3b8 !important; }} |
|
|
</style> |
|
|
<div class="scores-grid"> |
|
|
<div class="score-card" style="background: linear-gradient(135deg, {score_color(overall)}22 0%, {score_color(overall)}11 100%); |
|
|
border: 2px solid {score_color(overall)};"> |
|
|
<div style="font-size: 32px; font-weight: 700; color: {score_color(overall)};">{overall}</div> |
|
|
<div class="score-label">OVERALL</div> |
|
|
</div> |
|
|
<div class="score-card score-card-secondary"> |
|
|
<div style="font-size: 24px; font-weight: 600; color: {score_color(accessibility)};">{accessibility}</div> |
|
|
<div class="score-label">Accessibility</div> |
|
|
</div> |
|
|
<div class="score-card score-card-secondary"> |
|
|
<div style="font-size: 24px; font-weight: 600; color: {score_color(consistency)};">{consistency}</div> |
|
|
<div class="score-label">Consistency</div> |
|
|
</div> |
|
|
<div class="score-card score-card-secondary"> |
|
|
<div style="font-size: 24px; font-weight: 600; color: {score_color(organization)};">{organization}</div> |
|
|
<div class="score-label">Organization</div> |
|
|
</div> |
|
|
</div> |
|
|
""" |
|
|
|
|
|
return html |
|
|
|
|
|
|
|
|
def format_priority_actions_v2(rule_results, final_synthesis, best_practices) -> str: |
|
|
"""Format priority actions HTML.""" |
|
|
|
|
|
actions = final_synthesis.top_3_actions if final_synthesis.top_3_actions else [] |
|
|
|
|
|
|
|
|
if not actions and best_practices and best_practices.priority_fixes: |
|
|
actions = best_practices.priority_fixes |
|
|
|
|
|
if not actions: |
|
|
|
|
|
actions = [] |
|
|
if rule_results.aa_failures > 0: |
|
|
actions.append({ |
|
|
"action": f"Fix {rule_results.aa_failures} colors failing AA compliance", |
|
|
"impact": "high", |
|
|
"effort": "30 min", |
|
|
}) |
|
|
if not rule_results.typography.is_consistent: |
|
|
actions.append({ |
|
|
"action": f"Align type scale to {rule_results.typography.recommendation} ({rule_results.typography.recommendation_name})", |
|
|
"impact": "medium", |
|
|
"effort": "1 hour", |
|
|
}) |
|
|
if rule_results.color_stats.unique_count > 30: |
|
|
actions.append({ |
|
|
"action": f"Consolidate {rule_results.color_stats.unique_count} colors to ~15 semantic colors", |
|
|
"impact": "medium", |
|
|
"effort": "2 hours", |
|
|
}) |
|
|
|
|
|
html_items = [] |
|
|
for i, action in enumerate(actions[:3]): |
|
|
impact = action.get('impact', 'medium') |
|
|
border_color = "#ef4444" if impact == "high" else "#f59e0b" if impact == "medium" else "#10b981" |
|
|
impact_bg = "#fee2e2" if impact == "high" else "#fef3c7" if impact == "medium" else "#dcfce7" |
|
|
impact_text = "#991b1b" if impact == "high" else "#92400e" if impact == "medium" else "#166534" |
|
|
icon = "🔴" if impact == "high" else "🟡" if impact == "medium" else "🟢" |
|
|
|
|
|
html_items.append(f""" |
|
|
<div class="priority-action-card" style="border-left: 4px solid {border_color};"> |
|
|
<div style="display: flex; justify-content: space-between; align-items: flex-start;"> |
|
|
<div> |
|
|
<div class="priority-action-title"> |
|
|
{icon} {action.get('action', 'N/A')} |
|
|
</div> |
|
|
<div class="priority-action-detail"> |
|
|
{action.get('details', '')} |
|
|
</div> |
|
|
</div> |
|
|
<div style="display: flex; gap: 8px;"> |
|
|
<span style="background: {impact_bg}; color: {impact_text}; padding: 4px 8px; |
|
|
border-radius: 12px; font-size: 11px; font-weight: 600;"> |
|
|
{impact.upper()} |
|
|
</span> |
|
|
<span class="priority-effort-badge"> |
|
|
{action.get('effort', '?')} |
|
|
</span> |
|
|
</div> |
|
|
</div> |
|
|
</div> |
|
|
""") |
|
|
|
|
|
return f""" |
|
|
<style> |
|
|
.priority-actions-wrap {{ margin: 20px 0; }} |
|
|
.priority-actions-wrap h3 {{ margin-bottom: 16px; color: #1e293b; }} |
|
|
.priority-action-card {{ background: white; border: 1px solid #e2e8f0; border-radius: 8px; padding: 16px; margin-bottom: 12px; }} |
|
|
.priority-action-title {{ font-weight: 600; color: #1e293b; margin-bottom: 4px; }} |
|
|
.priority-action-detail {{ font-size: 13px; color: #64748b; }} |
|
|
.priority-effort-badge {{ background: #f1f5f9; color: #475569; padding: 4px 8px; border-radius: 12px; font-size: 11px; }} |
|
|
.dark .priority-actions-wrap h3 {{ color: #f1f5f9 !important; }} |
|
|
.dark .priority-action-card {{ background: #1e293b !important; border-color: #475569 !important; }} |
|
|
.dark .priority-action-title {{ color: #f1f5f9 !important; }} |
|
|
.dark .priority-action-detail {{ color: #94a3b8 !important; }} |
|
|
.dark .priority-effort-badge {{ background: #334155 !important; color: #cbd5e1 !important; }} |
|
|
</style> |
|
|
<div class="priority-actions-wrap"> |
|
|
<h3>🎯 Priority Actions</h3> |
|
|
{''.join(html_items)} |
|
|
</div> |
|
|
""" |
|
|
|
|
|
|
|
|
def format_color_recommendations_table_v2(rule_results, brand_result, final_synthesis) -> list: |
|
|
"""Format color recommendations as table data.""" |
|
|
|
|
|
rows = [] |
|
|
|
|
|
|
|
|
for a in rule_results.accessibility: |
|
|
if not a.passes_aa_normal and a.suggested_fix: |
|
|
role = "brand.primary" if brand_result and brand_result.brand_primary.get("color") == a.hex_color else a.name |
|
|
rows.append([ |
|
|
True, |
|
|
role, |
|
|
a.hex_color, |
|
|
f"Fails AA ({a.contrast_on_white:.1f}:1)", |
|
|
a.suggested_fix, |
|
|
f"{a.suggested_fix_contrast:.1f}:1", |
|
|
]) |
|
|
|
|
|
|
|
|
if final_synthesis and final_synthesis.color_recommendations: |
|
|
for rec in final_synthesis.color_recommendations: |
|
|
if rec.get("current") != rec.get("suggested"): |
|
|
|
|
|
if not any(r[2] == rec.get("current") for r in rows): |
|
|
rows.append([ |
|
|
rec.get("accept", True), |
|
|
rec.get("role", "unknown"), |
|
|
rec.get("current", ""), |
|
|
rec.get("reason", ""), |
|
|
rec.get("suggested", ""), |
|
|
"", |
|
|
]) |
|
|
|
|
|
return rows |
|
|
|
|
|
|
|
|
def build_analysis_status(final_recs: dict, cost_tracking: dict, errors: list) -> str: |
|
|
"""Build status markdown from analysis results.""" |
|
|
|
|
|
lines = ["## 🧠 Multi-Agent Analysis Complete!"] |
|
|
lines.append("") |
|
|
|
|
|
|
|
|
if cost_tracking: |
|
|
total_cost = cost_tracking.get("total_cost", 0) |
|
|
lines.append(f"### 💰 Cost Summary") |
|
|
lines.append(f"**Total estimated cost:** ${total_cost:.4f}") |
|
|
lines.append(f"*(Free tier: $0.10/mo | Pro: $2.00/mo)*") |
|
|
lines.append("") |
|
|
|
|
|
|
|
|
if final_recs and "final_recommendations" in final_recs: |
|
|
recs = final_recs["final_recommendations"] |
|
|
lines.append("### 📋 Recommendations") |
|
|
|
|
|
if recs.get("type_scale"): |
|
|
lines.append(f"**Type Scale:** {recs['type_scale']}") |
|
|
if recs.get("type_scale_rationale"): |
|
|
lines.append(f" *{recs['type_scale_rationale'][:100]}*") |
|
|
|
|
|
if recs.get("spacing_base"): |
|
|
lines.append(f"**Spacing:** {recs['spacing_base']}") |
|
|
|
|
|
lines.append("") |
|
|
|
|
|
|
|
|
if final_recs.get("summary"): |
|
|
lines.append("### 📝 Summary") |
|
|
lines.append(final_recs["summary"]) |
|
|
lines.append("") |
|
|
|
|
|
|
|
|
if final_recs.get("overall_confidence"): |
|
|
lines.append(f"**Confidence:** {final_recs['overall_confidence']}%") |
|
|
|
|
|
|
|
|
if errors: |
|
|
lines.append("") |
|
|
lines.append("### ⚠️ Warnings") |
|
|
for err in errors[:3]: |
|
|
lines.append(f"- {err[:100]}") |
|
|
|
|
|
return "\n".join(lines) |
|
|
|
|
|
|
|
|
def format_multi_agent_comparison(llm1: dict, llm2: dict, final: dict) -> str: |
|
|
"""Format comparison from multi-agent analysis.""" |
|
|
|
|
|
lines = ["### 📊 Multi-Agent Analysis Comparison"] |
|
|
lines.append("") |
|
|
|
|
|
|
|
|
if final.get("agreements"): |
|
|
lines.append("#### ✅ Agreements (High Confidence)") |
|
|
for a in final["agreements"][:5]: |
|
|
topic = a.get("topic", "?") |
|
|
finding = a.get("finding", "?")[:80] |
|
|
lines.append(f"- **{topic}**: {finding}") |
|
|
lines.append("") |
|
|
|
|
|
|
|
|
if final.get("disagreements"): |
|
|
lines.append("#### 🔄 Resolved Disagreements") |
|
|
for d in final["disagreements"][:3]: |
|
|
topic = d.get("topic", "?") |
|
|
resolution = d.get("resolution", "?")[:100] |
|
|
lines.append(f"- **{topic}**: {resolution}") |
|
|
lines.append("") |
|
|
|
|
|
|
|
|
lines.append("#### 📈 Score Comparison") |
|
|
lines.append("") |
|
|
lines.append("| Category | LLM 1 (Qwen) | LLM 2 (Llama) |") |
|
|
lines.append("|----------|--------------|---------------|") |
|
|
|
|
|
categories = ["typography", "colors", "accessibility", "spacing"] |
|
|
for cat in categories: |
|
|
llm1_score = llm1.get(cat, {}).get("score", "?") if isinstance(llm1.get(cat), dict) else "?" |
|
|
llm2_score = llm2.get(cat, {}).get("score", "?") if isinstance(llm2.get(cat), dict) else "?" |
|
|
lines.append(f"| {cat.title()} | {llm1_score}/10 | {llm2_score}/10 |") |
|
|
|
|
|
return "\n".join(lines) |
|
|
|
|
|
|
|
|
def format_spacing_comparison_from_rules(rule_calculations: dict) -> list: |
|
|
"""Format spacing comparison from rule engine.""" |
|
|
if not rule_calculations: |
|
|
return [] |
|
|
|
|
|
spacing_options = rule_calculations.get("spacing_options", {}) |
|
|
|
|
|
data = [] |
|
|
for i in range(10): |
|
|
current = f"{(i+1) * 4}px" if i < 5 else f"{(i+1) * 8}px" |
|
|
grid_8 = spacing_options.get("8px", []) |
|
|
grid_4 = spacing_options.get("4px", []) |
|
|
|
|
|
val_8 = f"{grid_8[i+1]}px" if i+1 < len(grid_8) else "—" |
|
|
val_4 = f"{grid_4[i+1]}px" if i+1 < len(grid_4) else "—" |
|
|
|
|
|
data.append([current, val_8, val_4]) |
|
|
|
|
|
return data |
|
|
|
|
|
|
|
|
def format_color_ramps_from_rules(rule_calculations: dict) -> str: |
|
|
"""Format color ramps from rule engine.""" |
|
|
if not rule_calculations: |
|
|
return "*No color ramps generated*" |
|
|
|
|
|
ramps = rule_calculations.get("color_ramps", {}) |
|
|
if not ramps: |
|
|
return "*No color ramps generated*" |
|
|
|
|
|
lines = ["### 🌈 Generated Color Ramps"] |
|
|
lines.append("") |
|
|
|
|
|
for name, ramp in list(ramps.items())[:6]: |
|
|
lines.append(f"**{name}**") |
|
|
if isinstance(ramp, list) and len(ramp) >= 10: |
|
|
lines.append("| 50 | 100 | 200 | 300 | 400 | 500 | 600 | 700 | 800 | 900 |") |
|
|
lines.append("|---|---|---|---|---|---|---|---|---|---|") |
|
|
row = "| " + " | ".join([f"`{ramp[i]}`" for i in range(10)]) + " |" |
|
|
lines.append(row) |
|
|
lines.append("") |
|
|
|
|
|
return "\n".join(lines) |
|
|
|
|
|
|
|
|
def get_detected_fonts() -> dict: |
|
|
"""Get detected font information.""" |
|
|
if not state.desktop_normalized: |
|
|
return {"primary": "Unknown", "weights": []} |
|
|
|
|
|
fonts = {} |
|
|
weights = set() |
|
|
|
|
|
for t in state.desktop_normalized.typography.values(): |
|
|
family = t.font_family |
|
|
weight = t.font_weight |
|
|
|
|
|
if family not in fonts: |
|
|
fonts[family] = 0 |
|
|
fonts[family] += t.frequency |
|
|
|
|
|
if weight: |
|
|
try: |
|
|
weights.add(int(weight)) |
|
|
except: |
|
|
pass |
|
|
|
|
|
primary = max(fonts.items(), key=lambda x: x[1])[0] if fonts else "Unknown" |
|
|
|
|
|
return { |
|
|
"primary": primary, |
|
|
"weights": sorted(weights) if weights else [400], |
|
|
"all_fonts": fonts, |
|
|
} |
|
|
|
|
|
|
|
|
def get_base_font_size() -> int: |
|
|
"""Detect base font size from typography.""" |
|
|
if not state.desktop_normalized: |
|
|
return 16 |
|
|
|
|
|
|
|
|
sizes = {} |
|
|
for t in state.desktop_normalized.typography.values(): |
|
|
size_str = str(t.font_size).replace('px', '').replace('rem', '').replace('em', '') |
|
|
try: |
|
|
size = float(size_str) |
|
|
if 14 <= size <= 18: |
|
|
sizes[size] = sizes.get(size, 0) + t.frequency |
|
|
except: |
|
|
pass |
|
|
|
|
|
if sizes: |
|
|
return int(max(sizes.items(), key=lambda x: x[1])[0]) |
|
|
return 16 |
|
|
|
|
|
|
|
|
def format_brand_comparison(recommendations) -> str: |
|
|
"""Format brand comparison as markdown table.""" |
|
|
if not recommendations.brand_analysis: |
|
|
return "*Brand analysis not available*" |
|
|
|
|
|
lines = [ |
|
|
"### 📊 Design System Comparison (5 Top Brands)", |
|
|
"", |
|
|
"| Brand | Type Ratio | Base Size | Spacing | Notes |", |
|
|
"|-------|------------|-----------|---------|-------|", |
|
|
] |
|
|
|
|
|
for brand in recommendations.brand_analysis[:5]: |
|
|
name = brand.get("brand", "Unknown") |
|
|
ratio = brand.get("ratio", "?") |
|
|
base = brand.get("base", "?") |
|
|
spacing = brand.get("spacing", "?") |
|
|
notes = brand.get("notes", "")[:50] + ("..." if len(brand.get("notes", "")) > 50 else "") |
|
|
lines.append(f"| {name} | {ratio} | {base}px | {spacing} | {notes} |") |
|
|
|
|
|
return "\n".join(lines) |
|
|
|
|
|
|
|
|
def format_font_families_display(fonts: dict) -> str: |
|
|
"""Format detected font families for display.""" |
|
|
lines = [] |
|
|
|
|
|
primary = fonts.get("primary", "Unknown") |
|
|
weights = fonts.get("weights", [400]) |
|
|
all_fonts = fonts.get("all_fonts", {}) |
|
|
|
|
|
lines.append(f"### Primary Font: **{primary}**") |
|
|
lines.append("") |
|
|
lines.append(f"**Weights detected:** {', '.join(map(str, weights))}") |
|
|
lines.append("") |
|
|
|
|
|
if all_fonts and len(all_fonts) > 1: |
|
|
lines.append("### All Fonts Detected") |
|
|
lines.append("") |
|
|
lines.append("| Font Family | Usage Count |") |
|
|
lines.append("|-------------|-------------|") |
|
|
|
|
|
sorted_fonts = sorted(all_fonts.items(), key=lambda x: -x[1]) |
|
|
for font, count in sorted_fonts[:5]: |
|
|
lines.append(f"| {font} | {count:,} |") |
|
|
|
|
|
lines.append("") |
|
|
lines.append("*Note: This analysis focuses on English typography only.*") |
|
|
|
|
|
return "\n".join(lines) |
|
|
|
|
|
|
|
|
def format_llm_color_recommendations_html(final_recs: dict, semantic_analysis: dict) -> str: |
|
|
"""Generate HTML showing LLM color recommendations with before/after comparison.""" |
|
|
|
|
|
if not final_recs: |
|
|
return ''' |
|
|
<div class="placeholder-msg" style="text-align: center;"> |
|
|
<p>No LLM recommendations available yet. Run analysis first.</p> |
|
|
</div> |
|
|
''' |
|
|
|
|
|
color_recs = final_recs.get("color_recommendations", {}) |
|
|
aa_fixes = final_recs.get("accessibility_fixes", []) |
|
|
|
|
|
if not color_recs and not aa_fixes: |
|
|
return ''' |
|
|
<div class="llm-no-recs" style="padding: 20px; border-radius: 8px; border: 1px solid #28a745; background: #d4edda;"> |
|
|
<p style="margin: 0; color: #155724;">✅ No color changes recommended. Your colors look good!</p> |
|
|
</div> |
|
|
<style> |
|
|
.dark .llm-no-recs { background: #14532d !important; border-color: #22c55e !important; } |
|
|
.dark .llm-no-recs p { color: #86efac !important; } |
|
|
</style> |
|
|
''' |
|
|
|
|
|
|
|
|
recs_html = "" |
|
|
|
|
|
|
|
|
for role, rec in color_recs.items(): |
|
|
if not isinstance(rec, dict): |
|
|
continue |
|
|
if role in ["generate_ramps_for", "changes_made"]: |
|
|
continue |
|
|
|
|
|
current = rec.get("current", "?") |
|
|
suggested = rec.get("suggested", current) |
|
|
action = rec.get("action", "keep") |
|
|
rationale = rec.get("rationale", "") |
|
|
|
|
|
if action == "keep" or suggested == current: |
|
|
|
|
|
recs_html += f''' |
|
|
<div class="llm-rec-row keep"> |
|
|
<div class="rec-color-box" style="background: {current};"></div> |
|
|
<div class="rec-details"> |
|
|
<span class="rec-role">{role}</span> |
|
|
<span class="rec-current">{current}</span> |
|
|
<span class="rec-action keep">✓ Keep</span> |
|
|
</div> |
|
|
</div> |
|
|
''' |
|
|
else: |
|
|
|
|
|
recs_html += f''' |
|
|
<div class="llm-rec-row change"> |
|
|
<div class="rec-comparison"> |
|
|
<div class="rec-before"> |
|
|
<div class="rec-color-box" style="background: {current};"></div> |
|
|
<span class="rec-label">Before</span> |
|
|
<span class="rec-hex">{current}</span> |
|
|
</div> |
|
|
<span class="rec-arrow">→</span> |
|
|
<div class="rec-after"> |
|
|
<div class="rec-color-box" style="background: {suggested};"></div> |
|
|
<span class="rec-label">After</span> |
|
|
<span class="rec-hex">{suggested}</span> |
|
|
</div> |
|
|
</div> |
|
|
<div class="rec-details"> |
|
|
<span class="rec-role">{role}</span> |
|
|
<span class="rec-rationale">{rationale[:80]}...</span> |
|
|
</div> |
|
|
</div> |
|
|
''' |
|
|
|
|
|
|
|
|
for fix in aa_fixes: |
|
|
if not isinstance(fix, dict): |
|
|
continue |
|
|
|
|
|
color = fix.get("color", "?") |
|
|
role = fix.get("role", "unknown") |
|
|
issue = fix.get("issue", "contrast issue") |
|
|
fix_color = fix.get("fix", color) |
|
|
current_contrast = fix.get("current_contrast", "?") |
|
|
fixed_contrast = fix.get("fixed_contrast", "?") |
|
|
|
|
|
if fix_color and fix_color != color: |
|
|
recs_html += f''' |
|
|
<div class="llm-rec-row aa-fix"> |
|
|
<div class="rec-comparison"> |
|
|
<div class="rec-before"> |
|
|
<div class="rec-color-box" style="background: {color};"></div> |
|
|
<span class="rec-label">⚠️ {current_contrast}:1</span> |
|
|
<span class="rec-hex">{color}</span> |
|
|
</div> |
|
|
<span class="rec-arrow">→</span> |
|
|
<div class="rec-after"> |
|
|
<div class="rec-color-box" style="background: {fix_color};"></div> |
|
|
<span class="rec-label">✓ {fixed_contrast}:1</span> |
|
|
<span class="rec-hex">{fix_color}</span> |
|
|
</div> |
|
|
</div> |
|
|
<div class="rec-details"> |
|
|
<span class="rec-role">{role}</span> |
|
|
<span class="rec-issue">🔴 {issue}</span> |
|
|
</div> |
|
|
</div> |
|
|
''' |
|
|
|
|
|
if not recs_html: |
|
|
return ''' |
|
|
<div class="llm-no-recs" style="padding: 20px; border-radius: 8px; border: 1px solid #28a745; background: #d4edda;"> |
|
|
<p style="margin: 0; color: #155724;">✅ No color changes recommended. Your colors look good!</p> |
|
|
</div> |
|
|
<style> |
|
|
.dark .llm-no-recs { background: #14532d !important; border-color: #22c55e !important; } |
|
|
.dark .llm-no-recs p { color: #86efac !important; } |
|
|
</style> |
|
|
''' |
|
|
|
|
|
html = f''' |
|
|
<style> |
|
|
.llm-recs-container {{ |
|
|
font-family: system-ui, -apple-system, sans-serif; |
|
|
background: #f5f5f5 !important; |
|
|
border-radius: 12px; |
|
|
padding: 16px; |
|
|
}} |
|
|
|
|
|
.llm-rec-row {{ |
|
|
display: flex; |
|
|
align-items: center; |
|
|
padding: 12px; |
|
|
margin-bottom: 12px; |
|
|
border-radius: 8px; |
|
|
background: #ffffff !important; |
|
|
border: 1px solid #e0e0e0 !important; |
|
|
}} |
|
|
|
|
|
.llm-rec-row.change {{ |
|
|
border-left: 4px solid #f59e0b !important; |
|
|
}} |
|
|
|
|
|
.llm-rec-row.aa-fix {{ |
|
|
border-left: 4px solid #dc2626 !important; |
|
|
background: #fef2f2 !important; |
|
|
}} |
|
|
|
|
|
.llm-rec-row.keep {{ |
|
|
border-left: 4px solid #22c55e !important; |
|
|
background: #f0fdf4 !important; |
|
|
}} |
|
|
|
|
|
.rec-comparison {{ |
|
|
display: flex; |
|
|
align-items: center; |
|
|
gap: 12px; |
|
|
margin-right: 20px; |
|
|
}} |
|
|
|
|
|
.rec-before, .rec-after {{ |
|
|
display: flex; |
|
|
flex-direction: column; |
|
|
align-items: center; |
|
|
gap: 4px; |
|
|
}} |
|
|
|
|
|
.rec-color-box {{ |
|
|
width: 48px; |
|
|
height: 48px; |
|
|
border-radius: 8px; |
|
|
border: 2px solid rgba(0,0,0,0.15) !important; |
|
|
box-shadow: 0 2px 4px rgba(0,0,0,0.1); |
|
|
}} |
|
|
|
|
|
.rec-label {{ |
|
|
font-size: 11px; |
|
|
font-weight: 600; |
|
|
color: #666 !important; |
|
|
}} |
|
|
|
|
|
.rec-hex {{ |
|
|
font-family: 'SF Mono', Monaco, monospace; |
|
|
font-size: 11px; |
|
|
color: #333 !important; |
|
|
}} |
|
|
|
|
|
.rec-arrow {{ |
|
|
font-size: 20px; |
|
|
color: #666 !important; |
|
|
font-weight: bold; |
|
|
}} |
|
|
|
|
|
.rec-details {{ |
|
|
flex: 1; |
|
|
display: flex; |
|
|
flex-direction: column; |
|
|
gap: 4px; |
|
|
}} |
|
|
|
|
|
.rec-role {{ |
|
|
font-weight: 700; |
|
|
font-size: 14px; |
|
|
color: #1a1a1a !important; |
|
|
}} |
|
|
|
|
|
.rec-action {{ |
|
|
font-size: 12px; |
|
|
padding: 2px 8px; |
|
|
border-radius: 4px; |
|
|
}} |
|
|
|
|
|
.rec-action.keep {{ |
|
|
background: #dcfce7 !important; |
|
|
color: #166534 !important; |
|
|
}} |
|
|
|
|
|
.rec-rationale {{ |
|
|
font-size: 12px; |
|
|
color: #666 !important; |
|
|
}} |
|
|
|
|
|
.rec-issue {{ |
|
|
font-size: 12px; |
|
|
color: #991b1b !important; |
|
|
font-weight: 500; |
|
|
}} |
|
|
|
|
|
/* Dark mode */ |
|
|
.dark .llm-recs-container {{ background: #0f172a !important; }} |
|
|
.dark .llm-rec-row {{ background: #1e293b !important; border-color: #475569 !important; }} |
|
|
.dark .llm-rec-row.aa-fix {{ background: #450a0a !important; }} |
|
|
.dark .llm-rec-row.keep {{ background: #14532d !important; }} |
|
|
.dark .rec-label {{ color: #94a3b8 !important; }} |
|
|
.dark .rec-hex {{ color: #cbd5e1 !important; }} |
|
|
.dark .rec-arrow {{ color: #94a3b8 !important; }} |
|
|
.dark .rec-role {{ color: #f1f5f9 !important; }} |
|
|
.dark .rec-rationale {{ color: #94a3b8 !important; }} |
|
|
.dark .rec-issue {{ color: #fca5a5 !important; }} |
|
|
.dark .rec-action.keep {{ background: #14532d !important; color: #86efac !important; }} |
|
|
</style> |
|
|
|
|
|
<div class="llm-recs-container"> |
|
|
{recs_html} |
|
|
</div> |
|
|
''' |
|
|
|
|
|
return html |
|
|
|
|
|
|
|
|
def format_llm_color_recommendations_table(final_recs: dict, semantic_analysis: dict) -> list: |
|
|
"""Generate table data for LLM color recommendations with accept/reject checkboxes.""" |
|
|
|
|
|
rows = [] |
|
|
|
|
|
if not final_recs: |
|
|
return rows |
|
|
|
|
|
color_recs = final_recs.get("color_recommendations", {}) |
|
|
aa_fixes = final_recs.get("accessibility_fixes", []) |
|
|
|
|
|
|
|
|
for role, rec in color_recs.items(): |
|
|
if not isinstance(rec, dict): |
|
|
continue |
|
|
if role in ["generate_ramps_for", "changes_made"]: |
|
|
continue |
|
|
|
|
|
current = rec.get("current", "?") |
|
|
suggested = rec.get("suggested", current) |
|
|
action = rec.get("action", "keep") |
|
|
rationale = rec.get("rationale", "")[:50] |
|
|
|
|
|
if action != "keep" and suggested != current: |
|
|
|
|
|
try: |
|
|
from core.color_utils import get_contrast_with_white |
|
|
old_contrast = get_contrast_with_white(current) |
|
|
new_contrast = get_contrast_with_white(suggested) |
|
|
contrast_str = f"{old_contrast:.1f} → {new_contrast:.1f}" |
|
|
except: |
|
|
contrast_str = "?" |
|
|
|
|
|
rows.append([ |
|
|
True, |
|
|
role, |
|
|
current, |
|
|
rationale or action, |
|
|
suggested, |
|
|
contrast_str, |
|
|
]) |
|
|
|
|
|
|
|
|
for fix in aa_fixes: |
|
|
if not isinstance(fix, dict): |
|
|
continue |
|
|
|
|
|
color = fix.get("color", "?") |
|
|
role = fix.get("role", "unknown") |
|
|
issue = fix.get("issue", "contrast")[:40] |
|
|
fix_color = fix.get("fix", color) |
|
|
current_contrast = fix.get("current_contrast", "?") |
|
|
fixed_contrast = fix.get("fixed_contrast", "?") |
|
|
|
|
|
if fix_color and fix_color != color: |
|
|
rows.append([ |
|
|
True, |
|
|
f"{role} (AA fix)", |
|
|
color, |
|
|
issue, |
|
|
fix_color, |
|
|
f"{current_contrast}:1 → {fixed_contrast}:1", |
|
|
]) |
|
|
|
|
|
return rows |
|
|
|
|
|
|
|
|
def format_typography_comparison_viewport(normalized_tokens, base_size: int, viewport: str) -> list: |
|
|
"""Format typography comparison for a specific viewport.""" |
|
|
if not normalized_tokens: |
|
|
return [] |
|
|
|
|
|
|
|
|
current_typo = list(normalized_tokens.typography.values()) |
|
|
|
|
|
|
|
|
def parse_size(t): |
|
|
size_str = str(t.font_size).replace('px', '').replace('rem', '').replace('em', '') |
|
|
try: |
|
|
return float(size_str) |
|
|
except: |
|
|
return 16 |
|
|
|
|
|
current_typo.sort(key=lambda t: -parse_size(t)) |
|
|
sizes = [parse_size(t) for t in current_typo] |
|
|
|
|
|
|
|
|
base = base_size if base_size else 16 |
|
|
|
|
|
|
|
|
mobile_factor = 0.875 if viewport == "mobile" else 1.0 |
|
|
|
|
|
|
|
|
token_names = [ |
|
|
"display.2xl", "display.xl", "display.lg", "display.md", |
|
|
"heading.xl", "heading.lg", "heading.md", "heading.sm", |
|
|
"body.lg", "body.md", "body.sm", |
|
|
"caption", "overline" |
|
|
] |
|
|
|
|
|
|
|
|
def round_to_even(val): |
|
|
"""Round to even numbers for cleaner type scales.""" |
|
|
return int(round(val / 2) * 2) |
|
|
|
|
|
scales = { |
|
|
"1.2": [round_to_even(base * mobile_factor * (1.2 ** (8-i))) for i in range(13)], |
|
|
"1.25": [round_to_even(base * mobile_factor * (1.25 ** (8-i))) for i in range(13)], |
|
|
"1.333": [round_to_even(base * mobile_factor * (1.333 ** (8-i))) for i in range(13)], |
|
|
} |
|
|
|
|
|
|
|
|
data = [] |
|
|
for i, name in enumerate(token_names): |
|
|
current = f"{int(sizes[i])}px" if i < len(sizes) else "—" |
|
|
s12 = f"{scales['1.2'][i]}px" |
|
|
s125 = f"{scales['1.25'][i]}px" |
|
|
s133 = f"{scales['1.333'][i]}px" |
|
|
keep = current |
|
|
data.append([name, current, s12, s125, s133, keep]) |
|
|
|
|
|
return data |
|
|
|
|
|
|
|
|
def format_base_colors() -> str: |
|
|
"""Format base colors (detected) separately from ramps.""" |
|
|
if not state.desktop_normalized: |
|
|
return "*No colors detected*" |
|
|
|
|
|
colors = list(state.desktop_normalized.colors.values()) |
|
|
colors.sort(key=lambda c: -c.frequency) |
|
|
|
|
|
lines = [ |
|
|
"### 🎨 Base Colors (Detected)", |
|
|
"", |
|
|
"These are the primary colors extracted from your website:", |
|
|
"", |
|
|
"| Color | Hex | Role | Frequency | Contrast |", |
|
|
"|-------|-----|------|-----------|----------|", |
|
|
] |
|
|
|
|
|
for color in colors[:10]: |
|
|
hex_val = color.value |
|
|
role = "Primary" if color.suggested_name and "primary" in color.suggested_name.lower() else \ |
|
|
"Text" if color.suggested_name and "text" in color.suggested_name.lower() else \ |
|
|
"Background" if color.suggested_name and "background" in color.suggested_name.lower() else \ |
|
|
"Border" if color.suggested_name and "border" in color.suggested_name.lower() else \ |
|
|
"Accent" |
|
|
freq = f"{color.frequency:,}" |
|
|
contrast = f"{color.contrast_white:.1f}:1" if color.contrast_white else "—" |
|
|
|
|
|
|
|
|
lines.append(f"| 🟦 | `{hex_val}` | {role} | {freq} | {contrast} |") |
|
|
|
|
|
return "\n".join(lines) |
|
|
|
|
|
|
|
|
def format_color_ramps_visual(recommendations) -> str: |
|
|
"""Format color ramps with visual display showing all shades.""" |
|
|
if not state.desktop_normalized: |
|
|
return "*No colors to display*" |
|
|
|
|
|
colors = list(state.desktop_normalized.colors.values()) |
|
|
colors.sort(key=lambda c: -c.frequency) |
|
|
|
|
|
lines = [ |
|
|
"### 🌈 Generated Color Ramps", |
|
|
"", |
|
|
"Full ramp (50-950) generated for each base color:", |
|
|
"", |
|
|
] |
|
|
|
|
|
from core.color_utils import generate_color_ramp |
|
|
|
|
|
for color in colors[:6]: |
|
|
hex_val = color.value |
|
|
role = color.suggested_name.split('.')[1] if color.suggested_name and '.' in color.suggested_name else "color" |
|
|
|
|
|
|
|
|
try: |
|
|
ramp = generate_color_ramp(hex_val) |
|
|
|
|
|
lines.append(f"**{role.upper()}** (base: `{hex_val}`)") |
|
|
lines.append("") |
|
|
lines.append("| 50 | 100 | 200 | 300 | 400 | 500 | 600 | 700 | 800 | 900 |") |
|
|
lines.append("|---|---|---|---|---|---|---|---|---|---|") |
|
|
|
|
|
|
|
|
row = "|" |
|
|
for i in range(10): |
|
|
if i < len(ramp): |
|
|
row += f" `{ramp[i]}` |" |
|
|
else: |
|
|
row += " — |" |
|
|
lines.append(row) |
|
|
lines.append("") |
|
|
|
|
|
except Exception as e: |
|
|
lines.append(f"**{role}** (`{hex_val}`) — Could not generate ramp: {str(e)}") |
|
|
lines.append("") |
|
|
|
|
|
return "\n".join(lines) |
|
|
|
|
|
|
|
|
def format_radius_with_tokens() -> str: |
|
|
"""Format radius with token name suggestions.""" |
|
|
if not state.desktop_normalized or not state.desktop_normalized.radius: |
|
|
return "*No border radius values detected.*" |
|
|
|
|
|
radii = list(state.desktop_normalized.radius.values()) |
|
|
|
|
|
lines = [ |
|
|
"### 🔘 Border Radius Tokens", |
|
|
"", |
|
|
"| Detected | Suggested Token | Usage |", |
|
|
"|----------|-----------------|-------|", |
|
|
] |
|
|
|
|
|
|
|
|
def parse_radius(r): |
|
|
val = str(r.value).replace('px', '').replace('%', '') |
|
|
try: |
|
|
return float(val) |
|
|
except: |
|
|
return 999 |
|
|
|
|
|
radii.sort(key=lambda r: parse_radius(r)) |
|
|
|
|
|
token_map = { |
|
|
(0, 2): ("radius.none", "Sharp corners"), |
|
|
(2, 4): ("radius.xs", "Subtle rounding"), |
|
|
(4, 6): ("radius.sm", "Small elements"), |
|
|
(6, 10): ("radius.md", "Buttons, cards"), |
|
|
(10, 16): ("radius.lg", "Modals, panels"), |
|
|
(16, 32): ("radius.xl", "Large containers"), |
|
|
(32, 100): ("radius.2xl", "Pill shapes"), |
|
|
} |
|
|
|
|
|
for r in radii[:8]: |
|
|
val = str(r.value) |
|
|
px = parse_radius(r) |
|
|
|
|
|
if "%" in str(r.value) or px >= 50: |
|
|
token = "radius.full" |
|
|
usage = "Circles, avatars" |
|
|
else: |
|
|
token = "radius.md" |
|
|
usage = "General use" |
|
|
for (low, high), (t, u) in token_map.items(): |
|
|
if low <= px < high: |
|
|
token = t |
|
|
usage = u |
|
|
break |
|
|
|
|
|
lines.append(f"| {val} | `{token}` | {usage} |") |
|
|
|
|
|
return "\n".join(lines) |
|
|
|
|
|
|
|
|
def format_shadows_with_tokens() -> str: |
|
|
"""Format shadows with token name suggestions.""" |
|
|
if not state.desktop_normalized or not state.desktop_normalized.shadows: |
|
|
return "*No shadow values detected.*" |
|
|
|
|
|
shadows = list(state.desktop_normalized.shadows.values()) |
|
|
|
|
|
lines = [ |
|
|
"### 🌫️ Shadow Tokens", |
|
|
"", |
|
|
"| Detected Value | Suggested Token | Use Case |", |
|
|
"|----------------|-----------------|----------|", |
|
|
] |
|
|
|
|
|
shadow_sizes = ["shadow.xs", "shadow.sm", "shadow.md", "shadow.lg", "shadow.xl", "shadow.2xl"] |
|
|
|
|
|
for i, s in enumerate(shadows[:6]): |
|
|
val = str(s.value)[:40] + ("..." if len(str(s.value)) > 40 else "") |
|
|
token = shadow_sizes[i] if i < len(shadow_sizes) else f"shadow.custom-{i}" |
|
|
|
|
|
|
|
|
use_cases = ["Subtle elevation", "Cards, dropdowns", "Modals, dialogs", "Popovers", "Floating elements", "Dramatic effect"] |
|
|
use = use_cases[i] if i < len(use_cases) else "Custom" |
|
|
|
|
|
lines.append(f"| `{val}` | `{token}` | {use} |") |
|
|
|
|
|
return "\n".join(lines) |
|
|
|
|
|
|
|
|
def format_spacing_comparison(recommendations) -> list: |
|
|
"""Format spacing comparison table.""" |
|
|
if not state.desktop_normalized: |
|
|
return [] |
|
|
|
|
|
|
|
|
current_spacing = list(state.desktop_normalized.spacing.values()) |
|
|
current_spacing.sort(key=lambda s: s.value_px) |
|
|
|
|
|
data = [] |
|
|
for s in current_spacing[:10]: |
|
|
current = f"{s.value_px}px" |
|
|
grid_8 = f"{snap_to_grid(s.value_px, 8)}px" |
|
|
grid_4 = f"{snap_to_grid(s.value_px, 4)}px" |
|
|
|
|
|
|
|
|
if s.value_px == snap_to_grid(s.value_px, 8): |
|
|
grid_8 += " ✓" |
|
|
if s.value_px == snap_to_grid(s.value_px, 4): |
|
|
grid_4 += " ✓" |
|
|
|
|
|
data.append([current, grid_8, grid_4]) |
|
|
|
|
|
return data |
|
|
|
|
|
|
|
|
def snap_to_grid(value: float, base: int) -> int: |
|
|
"""Snap value to grid.""" |
|
|
return round(value / base) * base |
|
|
|
|
|
|
|
|
def apply_selected_upgrades(type_choice: str, spacing_choice: str, apply_ramps: bool, color_recs_table: list = None): |
|
|
"""Apply selected upgrade options including LLM color recommendations.""" |
|
|
if not state.upgrade_recommendations: |
|
|
return "❌ Run analysis first", "" |
|
|
|
|
|
state.log("✨ Applying selected upgrades...") |
|
|
|
|
|
|
|
|
state.selected_upgrades = { |
|
|
"type_scale": type_choice, |
|
|
"spacing": spacing_choice, |
|
|
"color_ramps": apply_ramps, |
|
|
} |
|
|
|
|
|
state.log(f" Type Scale: {type_choice}") |
|
|
state.log(f" Spacing: {spacing_choice}") |
|
|
state.log(f" Color Ramps: {'Yes' if apply_ramps else 'No'}") |
|
|
|
|
|
|
|
|
accepted_color_changes = [] |
|
|
if color_recs_table: |
|
|
state.log("") |
|
|
state.log(" 🎨 LLM Color Recommendations:") |
|
|
for row in color_recs_table: |
|
|
if len(row) >= 5: |
|
|
accept = row[0] |
|
|
role = row[1] |
|
|
current = row[2] |
|
|
issue = row[3] |
|
|
suggested = row[4] |
|
|
|
|
|
if accept and suggested and current != suggested: |
|
|
accepted_color_changes.append({ |
|
|
"role": role, |
|
|
"from": current, |
|
|
"to": suggested, |
|
|
"reason": issue, |
|
|
}) |
|
|
state.log(f" ├─ ✅ ACCEPTED: {role}") |
|
|
state.log(f" │ └─ {current} → {suggested}") |
|
|
elif not accept: |
|
|
state.log(f" ├─ ❌ REJECTED: {role} (keeping {current})") |
|
|
|
|
|
|
|
|
state.selected_upgrades["color_changes"] = accepted_color_changes |
|
|
|
|
|
if accepted_color_changes: |
|
|
state.log("") |
|
|
state.log(f" 📊 {len(accepted_color_changes)} color change(s) will be applied to export") |
|
|
|
|
|
state.log("") |
|
|
state.log("✅ Upgrades applied! Proceed to Stage 3 for export.") |
|
|
|
|
|
return "✅ Upgrades applied! Proceed to Stage 3 to export.", state.get_logs() |
|
|
|
|
|
|
|
|
def export_stage1_json(): |
|
|
"""Export Stage 1 tokens (as-is extraction) to JSON - FLAT structure for Figma Tokens Studio.""" |
|
|
if not state.desktop_normalized: |
|
|
return json.dumps({"error": "No tokens extracted. Please run extraction first."}, indent=2) |
|
|
|
|
|
|
|
|
result = { |
|
|
"metadata": { |
|
|
"source_url": state.base_url, |
|
|
"extracted_at": datetime.now().isoformat(), |
|
|
"version": "v1-stage1-as-is", |
|
|
"stage": "extraction", |
|
|
"description": "Raw extracted tokens before upgrades - Figma Tokens Studio compatible", |
|
|
}, |
|
|
"fonts": {}, |
|
|
"colors": {}, |
|
|
"typography": {}, |
|
|
"spacing": {}, |
|
|
"radius": {}, |
|
|
"shadows": {}, |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
fonts_info = get_detected_fonts() |
|
|
result["fonts"] = { |
|
|
"primary": fonts_info.get("primary", "Unknown"), |
|
|
"weights": fonts_info.get("weights", [400]), |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if state.desktop_normalized and state.desktop_normalized.colors: |
|
|
for name, c in state.desktop_normalized.colors.items(): |
|
|
|
|
|
base_name = c.suggested_name or name |
|
|
|
|
|
clean_name = base_name.replace(" ", ".").replace("_", ".").lower() |
|
|
if not clean_name.startswith("color."): |
|
|
clean_name = f"color.{clean_name}" |
|
|
|
|
|
result["colors"][clean_name] = { |
|
|
"value": c.value, |
|
|
"type": "color", |
|
|
"source": "detected", |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if state.desktop_normalized and state.desktop_normalized.typography: |
|
|
for name, t in state.desktop_normalized.typography.items(): |
|
|
base_name = t.suggested_name or name |
|
|
clean_name = base_name.replace(" ", ".").replace("_", ".").replace("-", ".").lower() |
|
|
if not clean_name.startswith("font."): |
|
|
clean_name = f"font.{clean_name}" |
|
|
|
|
|
|
|
|
token_key = f"{clean_name}.desktop" |
|
|
|
|
|
result["typography"][token_key] = { |
|
|
"value": t.font_size, |
|
|
"type": "dimension", |
|
|
"fontFamily": t.font_family, |
|
|
"fontWeight": str(t.font_weight), |
|
|
"lineHeight": t.line_height or "1.5", |
|
|
"source": "detected", |
|
|
} |
|
|
|
|
|
|
|
|
if state.mobile_normalized and state.mobile_normalized.typography: |
|
|
for name, t in state.mobile_normalized.typography.items(): |
|
|
base_name = t.suggested_name or name |
|
|
clean_name = base_name.replace(" ", ".").replace("_", ".").replace("-", ".").lower() |
|
|
if not clean_name.startswith("font."): |
|
|
clean_name = f"font.{clean_name}" |
|
|
|
|
|
|
|
|
token_key = f"{clean_name}.mobile" |
|
|
|
|
|
result["typography"][token_key] = { |
|
|
"value": t.font_size, |
|
|
"type": "dimension", |
|
|
"fontFamily": t.font_family, |
|
|
"fontWeight": str(t.font_weight), |
|
|
"lineHeight": t.line_height or "1.5", |
|
|
"source": "detected", |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if state.desktop_normalized and state.desktop_normalized.spacing: |
|
|
for name, s in state.desktop_normalized.spacing.items(): |
|
|
base_name = s.suggested_name or name |
|
|
clean_name = base_name.replace(" ", ".").replace("_", ".").replace("-", ".").lower() |
|
|
if not clean_name.startswith("space."): |
|
|
clean_name = f"space.{clean_name}" |
|
|
|
|
|
|
|
|
token_key = f"{clean_name}.desktop" |
|
|
|
|
|
result["spacing"][token_key] = { |
|
|
"value": s.value, |
|
|
"type": "dimension", |
|
|
"source": "detected", |
|
|
} |
|
|
|
|
|
|
|
|
if state.mobile_normalized and state.mobile_normalized.spacing: |
|
|
for name, s in state.mobile_normalized.spacing.items(): |
|
|
base_name = s.suggested_name or name |
|
|
clean_name = base_name.replace(" ", ".").replace("_", ".").replace("-", ".").lower() |
|
|
if not clean_name.startswith("space."): |
|
|
clean_name = f"space.{clean_name}" |
|
|
|
|
|
|
|
|
token_key = f"{clean_name}.mobile" |
|
|
|
|
|
result["spacing"][token_key] = { |
|
|
"value": s.value, |
|
|
"type": "dimension", |
|
|
"source": "detected", |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if state.desktop_normalized and state.desktop_normalized.radius: |
|
|
for name, r in state.desktop_normalized.radius.items(): |
|
|
clean_name = name.replace(" ", ".").replace("_", ".").replace("-", ".").lower() |
|
|
if not clean_name.startswith("radius."): |
|
|
clean_name = f"radius.{clean_name}" |
|
|
|
|
|
result["radius"][clean_name] = { |
|
|
"value": r.value, |
|
|
"type": "dimension", |
|
|
"source": "detected", |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if state.desktop_normalized and state.desktop_normalized.shadows: |
|
|
for name, s in state.desktop_normalized.shadows.items(): |
|
|
clean_name = name.replace(" ", ".").replace("_", ".").replace("-", ".").lower() |
|
|
if not clean_name.startswith("shadow."): |
|
|
clean_name = f"shadow.{clean_name}" |
|
|
|
|
|
result["shadows"][clean_name] = { |
|
|
"value": s.value, |
|
|
"type": "boxShadow", |
|
|
"source": "detected", |
|
|
} |
|
|
|
|
|
return json.dumps(result, indent=2, default=str) |
|
|
|
|
|
|
|
|
def export_tokens_json(): |
|
|
"""Export final tokens with selected upgrades applied - FLAT structure for Figma Tokens Studio.""" |
|
|
if not state.desktop_normalized: |
|
|
return json.dumps({"error": "No tokens extracted. Please run extraction first."}, indent=2) |
|
|
|
|
|
|
|
|
upgrades = getattr(state, 'selected_upgrades', {}) |
|
|
type_scale_choice = upgrades.get('type_scale', 'Keep Current') |
|
|
spacing_choice = upgrades.get('spacing', 'Keep Current') |
|
|
apply_ramps = upgrades.get('color_ramps', True) |
|
|
|
|
|
|
|
|
ratio = None |
|
|
if "1.2" in type_scale_choice: |
|
|
ratio = 1.2 |
|
|
elif "1.25" in type_scale_choice: |
|
|
ratio = 1.25 |
|
|
elif "1.333" in type_scale_choice: |
|
|
ratio = 1.333 |
|
|
|
|
|
|
|
|
spacing_base = None |
|
|
if "8px" in spacing_choice: |
|
|
spacing_base = 8 |
|
|
elif "4px" in spacing_choice: |
|
|
spacing_base = 4 |
|
|
|
|
|
|
|
|
result = { |
|
|
"metadata": { |
|
|
"source_url": state.base_url, |
|
|
"extracted_at": datetime.now().isoformat(), |
|
|
"version": "v2-upgraded", |
|
|
"stage": "final", |
|
|
"description": "Upgraded tokens - Figma Tokens Studio compatible", |
|
|
"upgrades_applied": { |
|
|
"type_scale": type_scale_choice, |
|
|
"spacing": spacing_choice, |
|
|
"color_ramps": apply_ramps, |
|
|
}, |
|
|
}, |
|
|
"fonts": {}, |
|
|
"colors": {}, |
|
|
"typography": {}, |
|
|
"spacing": {}, |
|
|
"radius": {}, |
|
|
"shadows": {}, |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
fonts_info = get_detected_fonts() |
|
|
result["fonts"] = { |
|
|
"primary": fonts_info.get("primary", "Unknown"), |
|
|
"weights": fonts_info.get("weights", [400]), |
|
|
} |
|
|
primary_font = fonts_info.get("primary", "sans-serif") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if state.desktop_normalized and state.desktop_normalized.colors: |
|
|
from core.color_utils import generate_color_ramp |
|
|
|
|
|
for name, c in state.desktop_normalized.colors.items(): |
|
|
base_name = c.suggested_name or name |
|
|
clean_name = base_name.replace(" ", ".").replace("_", ".").lower() |
|
|
if not clean_name.startswith("color."): |
|
|
clean_name = f"color.{clean_name}" |
|
|
|
|
|
if apply_ramps: |
|
|
|
|
|
try: |
|
|
ramp = generate_color_ramp(c.value) |
|
|
shades = ["50", "100", "200", "300", "400", "500", "600", "700", "800", "900", "950"] |
|
|
for i, shade in enumerate(shades): |
|
|
if i < len(ramp): |
|
|
shade_key = f"{clean_name}.{shade}" |
|
|
result["colors"][shade_key] = { |
|
|
"value": ramp[i] if isinstance(ramp[i], str) else ramp[i].get("hex", c.value), |
|
|
"type": "color", |
|
|
"source": "upgraded" if shade != "500" else "detected", |
|
|
} |
|
|
except: |
|
|
result["colors"][clean_name] = { |
|
|
"value": c.value, |
|
|
"type": "color", |
|
|
"source": "detected", |
|
|
} |
|
|
else: |
|
|
result["colors"][clean_name] = { |
|
|
"value": c.value, |
|
|
"type": "color", |
|
|
"source": "detected", |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
base_size = get_base_font_size() |
|
|
token_names = [ |
|
|
"font.display.2xl", "font.display.xl", "font.display.lg", "font.display.md", |
|
|
"font.heading.xl", "font.heading.lg", "font.heading.md", "font.heading.sm", |
|
|
"font.body.lg", "font.body.md", "font.body.sm", "font.caption", "font.overline" |
|
|
] |
|
|
|
|
|
|
|
|
if ratio: |
|
|
|
|
|
scales = [int(round(base_size * (ratio ** (8-i)) / 2) * 2) for i in range(13)] |
|
|
for i, token_name in enumerate(token_names): |
|
|
desktop_key = f"{token_name}.desktop" |
|
|
result["typography"][desktop_key] = { |
|
|
"value": f"{scales[i]}px", |
|
|
"type": "dimension", |
|
|
"fontFamily": primary_font, |
|
|
"source": "upgraded", |
|
|
} |
|
|
elif state.desktop_normalized and state.desktop_normalized.typography: |
|
|
|
|
|
for name, t in state.desktop_normalized.typography.items(): |
|
|
base_name = t.suggested_name or name |
|
|
clean_name = base_name.replace(" ", ".").replace("_", ".").replace("-", ".").lower() |
|
|
if not clean_name.startswith("font."): |
|
|
clean_name = f"font.{clean_name}" |
|
|
|
|
|
desktop_key = f"{clean_name}.desktop" |
|
|
result["typography"][desktop_key] = { |
|
|
"value": t.font_size, |
|
|
"type": "dimension", |
|
|
"fontFamily": t.font_family, |
|
|
"fontWeight": str(t.font_weight), |
|
|
"lineHeight": t.line_height or "1.5", |
|
|
"source": "detected", |
|
|
} |
|
|
|
|
|
|
|
|
if ratio: |
|
|
|
|
|
mobile_factor = 0.875 |
|
|
scales = [int(round(base_size * mobile_factor * (ratio ** (8-i)) / 2) * 2) for i in range(13)] |
|
|
for i, token_name in enumerate(token_names): |
|
|
mobile_key = f"{token_name}.mobile" |
|
|
result["typography"][mobile_key] = { |
|
|
"value": f"{scales[i]}px", |
|
|
"type": "dimension", |
|
|
"fontFamily": primary_font, |
|
|
"source": "upgraded", |
|
|
} |
|
|
elif state.mobile_normalized and state.mobile_normalized.typography: |
|
|
for name, t in state.mobile_normalized.typography.items(): |
|
|
base_name = t.suggested_name or name |
|
|
clean_name = base_name.replace(" ", ".").replace("_", ".").replace("-", ".").lower() |
|
|
if not clean_name.startswith("font."): |
|
|
clean_name = f"font.{clean_name}" |
|
|
|
|
|
mobile_key = f"{clean_name}.mobile" |
|
|
result["typography"][mobile_key] = { |
|
|
"value": t.font_size, |
|
|
"type": "dimension", |
|
|
"fontFamily": t.font_family, |
|
|
"fontWeight": str(t.font_weight), |
|
|
"lineHeight": t.line_height or "1.5", |
|
|
"source": "detected", |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spacing_token_names = [ |
|
|
"space.1", "space.2", "space.3", "space.4", "space.5", |
|
|
"space.6", "space.8", "space.10", "space.12", "space.16" |
|
|
] |
|
|
|
|
|
if spacing_base: |
|
|
|
|
|
for i, token_name in enumerate(spacing_token_names): |
|
|
value = spacing_base * (i + 1) |
|
|
|
|
|
|
|
|
desktop_key = f"{token_name}.desktop" |
|
|
result["spacing"][desktop_key] = { |
|
|
"value": f"{value}px", |
|
|
"type": "dimension", |
|
|
"source": "upgraded", |
|
|
} |
|
|
|
|
|
|
|
|
mobile_key = f"{token_name}.mobile" |
|
|
result["spacing"][mobile_key] = { |
|
|
"value": f"{value}px", |
|
|
"type": "dimension", |
|
|
"source": "upgraded", |
|
|
} |
|
|
else: |
|
|
|
|
|
if state.desktop_normalized and state.desktop_normalized.spacing: |
|
|
for name, s in state.desktop_normalized.spacing.items(): |
|
|
base_name = s.suggested_name or name |
|
|
clean_name = base_name.replace(" ", ".").replace("_", ".").replace("-", ".").lower() |
|
|
if not clean_name.startswith("space."): |
|
|
clean_name = f"space.{clean_name}" |
|
|
|
|
|
desktop_key = f"{clean_name}.desktop" |
|
|
result["spacing"][desktop_key] = { |
|
|
"value": s.value, |
|
|
"type": "dimension", |
|
|
"source": "detected", |
|
|
} |
|
|
|
|
|
if state.mobile_normalized and state.mobile_normalized.spacing: |
|
|
for name, s in state.mobile_normalized.spacing.items(): |
|
|
base_name = s.suggested_name or name |
|
|
clean_name = base_name.replace(" ", ".").replace("_", ".").replace("-", ".").lower() |
|
|
if not clean_name.startswith("space."): |
|
|
clean_name = f"space.{clean_name}" |
|
|
|
|
|
mobile_key = f"{clean_name}.mobile" |
|
|
result["spacing"][mobile_key] = { |
|
|
"value": s.value, |
|
|
"type": "dimension", |
|
|
"source": "detected", |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if state.desktop_normalized and state.desktop_normalized.radius: |
|
|
for name, r in state.desktop_normalized.radius.items(): |
|
|
clean_name = name.replace(" ", ".").replace("_", ".").replace("-", ".").lower() |
|
|
if not clean_name.startswith("radius."): |
|
|
clean_name = f"radius.{clean_name}" |
|
|
|
|
|
result["radius"][clean_name] = { |
|
|
"value": r.value, |
|
|
"type": "dimension", |
|
|
"source": "detected", |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if state.desktop_normalized and state.desktop_normalized.shadows: |
|
|
for name, s in state.desktop_normalized.shadows.items(): |
|
|
clean_name = name.replace(" ", ".").replace("_", ".").replace("-", ".").lower() |
|
|
if not clean_name.startswith("shadow."): |
|
|
clean_name = f"shadow.{clean_name}" |
|
|
|
|
|
result["shadows"][clean_name] = { |
|
|
"value": s.value, |
|
|
"type": "boxShadow", |
|
|
"source": "detected", |
|
|
} |
|
|
|
|
|
return json.dumps(result, indent=2, default=str) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def create_ui(): |
|
|
"""Create the Gradio interface with corporate branding.""" |
|
|
|
|
|
|
|
|
corporate_theme = gr.themes.Base( |
|
|
primary_hue=gr.themes.colors.blue, |
|
|
secondary_hue=gr.themes.colors.slate, |
|
|
neutral_hue=gr.themes.colors.slate, |
|
|
font=[gr.themes.GoogleFont("Inter"), "ui-sans-serif", "system-ui", "sans-serif"], |
|
|
font_mono=[gr.themes.GoogleFont("JetBrains Mono"), "ui-monospace", "monospace"], |
|
|
).set( |
|
|
|
|
|
body_background_fill="#f8fafc", |
|
|
body_background_fill_dark="#0f172a", |
|
|
block_background_fill="white", |
|
|
block_background_fill_dark="#1e293b", |
|
|
block_border_color="#e2e8f0", |
|
|
block_border_color_dark="#334155", |
|
|
block_label_background_fill="#f1f5f9", |
|
|
block_label_background_fill_dark="#1e293b", |
|
|
block_title_text_color="#0f172a", |
|
|
block_title_text_color_dark="#f1f5f9", |
|
|
|
|
|
|
|
|
button_primary_background_fill="#2563eb", |
|
|
button_primary_background_fill_hover="#1d4ed8", |
|
|
button_primary_text_color="white", |
|
|
|
|
|
|
|
|
button_secondary_background_fill="#f1f5f9", |
|
|
button_secondary_background_fill_hover="#e2e8f0", |
|
|
button_secondary_text_color="#1e293b", |
|
|
|
|
|
|
|
|
input_background_fill="#ffffff", |
|
|
input_background_fill_dark="#1e293b", |
|
|
input_border_color="#cbd5e1", |
|
|
input_border_color_dark="#475569", |
|
|
|
|
|
|
|
|
block_shadow="0 1px 3px rgba(0,0,0,0.1)", |
|
|
block_shadow_dark="0 1px 3px rgba(0,0,0,0.3)", |
|
|
block_border_width="1px", |
|
|
block_radius="8px", |
|
|
|
|
|
|
|
|
body_text_color="#1e293b", |
|
|
body_text_color_dark="#e2e8f0", |
|
|
body_text_size="14px", |
|
|
) |
|
|
|
|
|
|
|
|
custom_css = """ |
|
|
/* Global styles */ |
|
|
.gradio-container { |
|
|
max-width: 1400px !important; |
|
|
margin: 0 auto !important; |
|
|
} |
|
|
|
|
|
/* Header branding */ |
|
|
.app-header { |
|
|
background: linear-gradient(135deg, #1e40af 0%, #3b82f6 100%); |
|
|
padding: 24px 32px; |
|
|
border-radius: 12px; |
|
|
margin-bottom: 24px; |
|
|
color: white; |
|
|
} |
|
|
.app-header h1 { |
|
|
margin: 0 0 8px 0; |
|
|
font-size: 28px; |
|
|
font-weight: 700; |
|
|
} |
|
|
.app-header p { |
|
|
margin: 0; |
|
|
opacity: 0.9; |
|
|
font-size: 14px; |
|
|
} |
|
|
|
|
|
/* Stage indicators */ |
|
|
.stage-header { |
|
|
background: linear-gradient(90deg, #f1f5f9 0%, #ffffff 100%); |
|
|
padding: 16px 20px; |
|
|
border-radius: 8px; |
|
|
border-left: 4px solid #2563eb; |
|
|
margin-bottom: 16px; |
|
|
} |
|
|
.stage-header h2 { |
|
|
margin: 0; |
|
|
font-size: 18px; |
|
|
color: #1e293b; |
|
|
} |
|
|
|
|
|
/* Log styling */ |
|
|
.log-container textarea { |
|
|
font-family: 'JetBrains Mono', monospace !important; |
|
|
font-size: 12px !important; |
|
|
line-height: 1.6 !important; |
|
|
background: #0f172a !important; |
|
|
color: #e2e8f0 !important; |
|
|
border-radius: 8px !important; |
|
|
} |
|
|
|
|
|
/* Color swatch */ |
|
|
.color-swatch { |
|
|
display: inline-block; |
|
|
width: 24px; |
|
|
height: 24px; |
|
|
border-radius: 4px; |
|
|
margin-right: 8px; |
|
|
vertical-align: middle; |
|
|
border: 1px solid rgba(0,0,0,0.1); |
|
|
} |
|
|
|
|
|
/* Score badges */ |
|
|
.score-badge { |
|
|
display: inline-block; |
|
|
padding: 4px 12px; |
|
|
border-radius: 20px; |
|
|
font-weight: 600; |
|
|
font-size: 13px; |
|
|
} |
|
|
.score-badge.high { background: #dcfce7; color: #166534; } |
|
|
.score-badge.medium { background: #fef3c7; color: #92400e; } |
|
|
.score-badge.low { background: #fee2e2; color: #991b1b; } |
|
|
|
|
|
/* Benchmark cards */ |
|
|
.benchmark-card { |
|
|
background: #f8fafc; |
|
|
border: 1px solid #e2e8f0; |
|
|
border-radius: 8px; |
|
|
padding: 16px; |
|
|
margin-bottom: 12px; |
|
|
} |
|
|
.benchmark-card.selected { |
|
|
border-color: #2563eb; |
|
|
background: #eff6ff; |
|
|
} |
|
|
|
|
|
/* Action items */ |
|
|
.action-item { |
|
|
background: white; |
|
|
border: 1px solid #e2e8f0; |
|
|
border-radius: 8px; |
|
|
padding: 16px; |
|
|
margin-bottom: 8px; |
|
|
} |
|
|
.action-item.high-priority { |
|
|
border-left: 4px solid #ef4444; |
|
|
} |
|
|
.action-item.medium-priority { |
|
|
border-left: 4px solid #f59e0b; |
|
|
} |
|
|
|
|
|
/* Progress indicator */ |
|
|
.progress-bar { |
|
|
height: 4px; |
|
|
background: #e2e8f0; |
|
|
border-radius: 2px; |
|
|
overflow: hidden; |
|
|
} |
|
|
.progress-bar-fill { |
|
|
height: 100%; |
|
|
background: linear-gradient(90deg, #2563eb, #3b82f6); |
|
|
transition: width 0.3s ease; |
|
|
} |
|
|
|
|
|
/* Accordion styling */ |
|
|
.accordion-header { |
|
|
font-weight: 600 !important; |
|
|
} |
|
|
|
|
|
/* Table styling */ |
|
|
table { |
|
|
border-collapse: collapse; |
|
|
width: 100%; |
|
|
} |
|
|
th { |
|
|
background: #f1f5f9; |
|
|
color: #1e293b; |
|
|
padding: 12px; |
|
|
text-align: left; |
|
|
font-weight: 600; |
|
|
border-bottom: 2px solid #e2e8f0; |
|
|
} |
|
|
td { |
|
|
padding: 12px; |
|
|
color: #1e293b; |
|
|
border-bottom: 1px solid #e2e8f0; |
|
|
} |
|
|
|
|
|
/* Placeholder messages */ |
|
|
.placeholder-msg { |
|
|
padding: 20px; |
|
|
background: #f5f5f5; |
|
|
border-radius: 8px; |
|
|
color: #666; |
|
|
} |
|
|
.placeholder-msg.placeholder-lg { |
|
|
padding: 40px; |
|
|
text-align: center; |
|
|
} |
|
|
|
|
|
/* Progress bar */ |
|
|
.progress-bar { |
|
|
background: #e2e8f0; |
|
|
} |
|
|
|
|
|
/* Dark mode adjustments */ |
|
|
.dark .stage-header { |
|
|
background: linear-gradient(90deg, #1e293b 0%, #0f172a 100%); |
|
|
border-left-color: #3b82f6; |
|
|
} |
|
|
.dark .stage-header h2 { |
|
|
color: #f1f5f9; |
|
|
} |
|
|
.dark .stage-header-subtitle, |
|
|
.dark .tip-text { |
|
|
color: #94a3b8 !important; |
|
|
} |
|
|
.dark .benchmark-card { |
|
|
background: #1e293b; |
|
|
border-color: #334155; |
|
|
} |
|
|
.dark .action-item { |
|
|
background: #1e293b; |
|
|
border-color: #475569; |
|
|
color: #e2e8f0; |
|
|
} |
|
|
/* Dark mode: Placeholder messages */ |
|
|
.dark .placeholder-msg { |
|
|
background: #1e293b !important; |
|
|
color: #94a3b8 !important; |
|
|
} |
|
|
/* Dark mode: Progress bar */ |
|
|
.dark .progress-bar { |
|
|
background: #334155 !important; |
|
|
} |
|
|
/* Dark mode: Gradio Dataframe tables */ |
|
|
.dark table th { |
|
|
background: #1e293b !important; |
|
|
color: #e2e8f0 !important; |
|
|
border-bottom-color: #475569 !important; |
|
|
} |
|
|
.dark table td { |
|
|
color: #e2e8f0 !important; |
|
|
border-bottom-color: #334155 !important; |
|
|
} |
|
|
.dark table tr { |
|
|
background: #0f172a !important; |
|
|
} |
|
|
.dark table tr:nth-child(even) { |
|
|
background: #1e293b !important; |
|
|
} |
|
|
/* Dark mode: HTML preview tables (typography, benchmarks) */ |
|
|
.dark .typography-preview { |
|
|
background: #1e293b !important; |
|
|
} |
|
|
.dark .typography-preview th { |
|
|
background: #334155 !important; |
|
|
color: #e2e8f0 !important; |
|
|
border-bottom-color: #475569 !important; |
|
|
} |
|
|
.dark .typography-preview td { |
|
|
color: #e2e8f0 !important; |
|
|
} |
|
|
.dark .typography-preview .meta-row { |
|
|
background: #1e293b !important; |
|
|
border-top-color: #334155 !important; |
|
|
} |
|
|
.dark .typography-preview .scale-name, |
|
|
.dark .typography-preview .scale-label { |
|
|
color: #f1f5f9 !important; |
|
|
background: #475569 !important; |
|
|
} |
|
|
.dark .typography-preview .meta { |
|
|
color: #cbd5e1 !important; |
|
|
} |
|
|
.dark .typography-preview .preview-cell { |
|
|
background: #0f172a !important; |
|
|
border-bottom-color: #334155 !important; |
|
|
} |
|
|
.dark .typography-preview .preview-text { |
|
|
color: #f1f5f9 !important; |
|
|
} |
|
|
.dark .typography-preview tr:hover .preview-cell { |
|
|
background: #1e293b !important; |
|
|
} |
|
|
|
|
|
/* Dark mode: Colors AS-IS preview */ |
|
|
.dark .colors-asis-header { |
|
|
color: #e2e8f0 !important; |
|
|
background: #1e293b !important; |
|
|
} |
|
|
.dark .colors-asis-preview { |
|
|
background: #0f172a !important; |
|
|
} |
|
|
.dark .color-row-asis { |
|
|
background: #1e293b !important; |
|
|
border-color: #475569 !important; |
|
|
} |
|
|
.dark .color-name-asis { |
|
|
color: #f1f5f9 !important; |
|
|
} |
|
|
.dark .frequency { |
|
|
color: #cbd5e1 !important; |
|
|
} |
|
|
.dark .color-meta-asis .aa-pass { |
|
|
color: #22c55e !important; |
|
|
background: #14532d !important; |
|
|
} |
|
|
.dark .color-meta-asis .aa-fail { |
|
|
color: #f87171 !important; |
|
|
background: #450a0a !important; |
|
|
} |
|
|
.dark .context-badge { |
|
|
background: #334155 !important; |
|
|
color: #e2e8f0 !important; |
|
|
} |
|
|
|
|
|
/* Dark mode: Color ramps preview */ |
|
|
.dark .color-ramps-preview { |
|
|
background: #0f172a !important; |
|
|
} |
|
|
.dark .ramps-header-info { |
|
|
color: #e2e8f0 !important; |
|
|
background: #1e293b !important; |
|
|
} |
|
|
.dark .ramp-header { |
|
|
background: #1e293b !important; |
|
|
} |
|
|
.dark .ramp-header-label { |
|
|
color: #cbd5e1 !important; |
|
|
} |
|
|
.dark .color-row { |
|
|
background: #1e293b !important; |
|
|
border-color: #475569 !important; |
|
|
} |
|
|
.dark .color-name { |
|
|
color: #f1f5f9 !important; |
|
|
background: #475569 !important; |
|
|
} |
|
|
.dark .color-hex { |
|
|
color: #cbd5e1 !important; |
|
|
} |
|
|
|
|
|
/* Dark mode: Spacing preview */ |
|
|
.dark .spacing-asis-preview { |
|
|
background: #0f172a !important; |
|
|
} |
|
|
.dark .spacing-row-asis { |
|
|
background: #1e293b !important; |
|
|
} |
|
|
.dark .spacing-label { |
|
|
color: #f1f5f9 !important; |
|
|
} |
|
|
|
|
|
/* Dark mode: Radius preview */ |
|
|
.dark .radius-asis-preview { |
|
|
background: #0f172a !important; |
|
|
} |
|
|
.dark .radius-item { |
|
|
background: #1e293b !important; |
|
|
} |
|
|
.dark .radius-label { |
|
|
color: #f1f5f9 !important; |
|
|
} |
|
|
|
|
|
/* Dark mode: Shadows preview */ |
|
|
.dark .shadows-asis-preview { |
|
|
background: #0f172a !important; |
|
|
} |
|
|
.dark .shadow-item { |
|
|
background: #1e293b !important; |
|
|
} |
|
|
.dark .shadow-box { |
|
|
background: #334155 !important; |
|
|
} |
|
|
.dark .shadow-label { |
|
|
color: #f1f5f9 !important; |
|
|
} |
|
|
.dark .shadow-value { |
|
|
color: #94a3b8 !important; |
|
|
} |
|
|
|
|
|
/* Dark mode: Semantic color ramps */ |
|
|
.dark .sem-ramps-preview { |
|
|
background: #0f172a !important; |
|
|
} |
|
|
.dark .sem-category { |
|
|
background: #1e293b !important; |
|
|
border-color: #475569 !important; |
|
|
} |
|
|
.dark .sem-cat-title { |
|
|
color: #f1f5f9 !important; |
|
|
border-bottom-color: #475569 !important; |
|
|
} |
|
|
.dark .sem-color-row { |
|
|
background: #0f172a !important; |
|
|
border-color: #334155 !important; |
|
|
} |
|
|
.dark .sem-role { |
|
|
color: #f1f5f9 !important; |
|
|
} |
|
|
.dark .sem-hex { |
|
|
color: #cbd5e1 !important; |
|
|
} |
|
|
.dark .llm-rec { |
|
|
background: #422006 !important; |
|
|
border-color: #b45309 !important; |
|
|
} |
|
|
.dark .rec-label { |
|
|
color: #fbbf24 !important; |
|
|
} |
|
|
.dark .rec-issue { |
|
|
color: #fde68a !important; |
|
|
} |
|
|
.dark .rec-arrow { |
|
|
color: #fbbf24 !important; |
|
|
} |
|
|
.dark .llm-summary { |
|
|
background: #1e3a5f !important; |
|
|
border-color: #3b82f6 !important; |
|
|
} |
|
|
.dark .llm-summary h4 { |
|
|
color: #93c5fd !important; |
|
|
} |
|
|
.dark .llm-summary ul, |
|
|
.dark .llm-summary li { |
|
|
color: #bfdbfe !important; |
|
|
} |
|
|
|
|
|
/* Dark mode: Score badges */ |
|
|
.dark .score-badge.high { background: #14532d; color: #86efac; } |
|
|
.dark .score-badge.medium { background: #422006; color: #fde68a; } |
|
|
.dark .score-badge.low { background: #450a0a; color: #fca5a5; } |
|
|
|
|
|
/* Dark mode: Benchmark & action cards */ |
|
|
.dark .benchmark-card.selected { |
|
|
border-color: #3b82f6; |
|
|
background: #1e3a5f; |
|
|
} |
|
|
.dark .action-item.high-priority { |
|
|
border-left-color: #ef4444; |
|
|
} |
|
|
.dark .action-item.medium-priority { |
|
|
border-left-color: #f59e0b; |
|
|
} |
|
|
|
|
|
/* Dark mode: Gradio markdown rendered tables */ |
|
|
.dark .prose table th, |
|
|
.dark .markdown-text table th { |
|
|
background: #1e293b !important; |
|
|
color: #e2e8f0 !important; |
|
|
border-color: #475569 !important; |
|
|
} |
|
|
.dark .prose table td, |
|
|
.dark .markdown-text table td { |
|
|
color: #e2e8f0 !important; |
|
|
border-color: #334155 !important; |
|
|
} |
|
|
.dark .prose table tr, |
|
|
.dark .markdown-text table tr { |
|
|
background: #0f172a !important; |
|
|
} |
|
|
.dark .prose table tr:nth-child(even), |
|
|
.dark .markdown-text table tr:nth-child(even) { |
|
|
background: #1e293b !important; |
|
|
} |
|
|
|
|
|
/* Dark mode: Generic text in HTML components */ |
|
|
.dark .gradio-html p, |
|
|
.dark .gradio-html span, |
|
|
.dark .gradio-html div { |
|
|
color: #e2e8f0; |
|
|
} |
|
|
""" |
|
|
|
|
|
with gr.Blocks( |
|
|
title="Design System Extractor v2", |
|
|
theme=corporate_theme, |
|
|
css=custom_css |
|
|
) as app: |
|
|
|
|
|
|
|
|
gr.HTML(""" |
|
|
<div class="app-header"> |
|
|
<h1>🎨 Design System Extractor v2</h1> |
|
|
<p>Reverse-engineer design systems from live websites • AI-powered analysis • Figma-ready export</p> |
|
|
</div> |
|
|
""") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with gr.Accordion("⚙️ Configuration", open=not bool(HF_TOKEN_FROM_ENV)): |
|
|
gr.Markdown("**HuggingFace Token** — Required for Stage 2 (AI upgrades)") |
|
|
with gr.Row(): |
|
|
hf_token_input = gr.Textbox( |
|
|
label="HF Token", placeholder="hf_xxxx", type="password", |
|
|
scale=4, value=HF_TOKEN_FROM_ENV, |
|
|
) |
|
|
save_token_btn = gr.Button("💾 Save", scale=1) |
|
|
token_status = gr.Markdown("✅ Token loaded" if HF_TOKEN_FROM_ENV else "⏳ Enter token") |
|
|
|
|
|
def save_token(token): |
|
|
if token and len(token) > 10: |
|
|
os.environ["HF_TOKEN"] = token.strip() |
|
|
return "✅ Token saved!" |
|
|
return "❌ Invalid token" |
|
|
|
|
|
save_token_btn.click(save_token, [hf_token_input], [token_status]) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with gr.Accordion("🔍 Step 1: Discover Pages", open=True): |
|
|
gr.Markdown("Enter your website URL to discover pages for extraction.") |
|
|
|
|
|
with gr.Row(): |
|
|
url_input = gr.Textbox(label="Website URL", placeholder="https://example.com", scale=4) |
|
|
discover_btn = gr.Button("🔍 Discover Pages", variant="primary", scale=1) |
|
|
|
|
|
discover_status = gr.Markdown("") |
|
|
|
|
|
with gr.Row(): |
|
|
log_output = gr.Textbox(label="📋 Log", lines=8, interactive=False) |
|
|
|
|
|
pages_table = gr.Dataframe( |
|
|
headers=["Select", "URL", "Title", "Type", "Status"], |
|
|
datatype=["bool", "str", "str", "str", "str"], |
|
|
label="Discovered Pages", |
|
|
interactive=True, |
|
|
visible=False, |
|
|
) |
|
|
|
|
|
extract_btn = gr.Button("🚀 Extract Tokens (Desktop + Mobile)", variant="primary", visible=False) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with gr.Accordion("📊 Stage 1: Review Extracted Tokens", open=False) as stage1_accordion: |
|
|
|
|
|
extraction_status = gr.Markdown("") |
|
|
|
|
|
gr.Markdown(""" |
|
|
**Review the extracted tokens.** Toggle between Desktop and Mobile viewports. |
|
|
Accept or reject tokens, then proceed to Stage 2 for AI-powered upgrades. |
|
|
""") |
|
|
|
|
|
viewport_toggle = gr.Radio( |
|
|
choices=["Desktop (1440px)", "Mobile (375px)"], |
|
|
value="Desktop (1440px)", |
|
|
label="Viewport", |
|
|
) |
|
|
|
|
|
with gr.Tabs(): |
|
|
with gr.Tab("🎨 Colors"): |
|
|
colors_table = gr.Dataframe( |
|
|
headers=["Accept", "Color", "Suggested Name", "Frequency", "Confidence", "Contrast", "AA", "Context"], |
|
|
datatype=["bool", "str", "str", "number", "str", "str", "str", "str"], |
|
|
label="Colors", |
|
|
interactive=True, |
|
|
) |
|
|
|
|
|
with gr.Tab("📝 Typography"): |
|
|
typography_table = gr.Dataframe( |
|
|
headers=["Accept", "Font", "Size", "Weight", "Line Height", "Suggested Name", "Frequency", "Confidence"], |
|
|
datatype=["bool", "str", "str", "str", "str", "str", "number", "str"], |
|
|
label="Typography", |
|
|
interactive=True, |
|
|
) |
|
|
|
|
|
with gr.Tab("📏 Spacing"): |
|
|
spacing_table = gr.Dataframe( |
|
|
headers=["Accept", "Value", "Pixels", "Suggested Name", "Frequency", "Base 8", "Confidence"], |
|
|
datatype=["bool", "str", "str", "str", "number", "str", "str"], |
|
|
label="Spacing", |
|
|
interactive=True, |
|
|
) |
|
|
|
|
|
with gr.Tab("🔘 Radius"): |
|
|
radius_table = gr.Dataframe( |
|
|
headers=["Accept", "Value", "Frequency", "Context"], |
|
|
datatype=["bool", "str", "number", "str"], |
|
|
label="Border Radius", |
|
|
interactive=True, |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
gr.Markdown("---") |
|
|
gr.Markdown("## 👁️ Visual Previews (AS-IS)") |
|
|
gr.Markdown("*Raw extracted values from the website — no enhancements applied*") |
|
|
|
|
|
with gr.Tabs(): |
|
|
with gr.Tab("🔤 Typography"): |
|
|
gr.Markdown("*Actual typography rendered with the detected font*") |
|
|
stage1_typography_preview = gr.HTML( |
|
|
value="<div class='placeholder-msg'>Typography preview will appear after extraction...</div>", |
|
|
label="Typography Preview" |
|
|
) |
|
|
|
|
|
with gr.Tab("🎨 Colors"): |
|
|
gr.Markdown("*All detected colors (AS-IS — no generated ramps)*") |
|
|
stage1_colors_preview = gr.HTML( |
|
|
value="<div class='placeholder-msg'>Colors preview will appear after extraction...</div>", |
|
|
label="Colors Preview" |
|
|
) |
|
|
|
|
|
with gr.Tab("🧠 Semantic Colors"): |
|
|
gr.Markdown("*Colors categorized by usage: Brand, Text, Background, Border, Feedback*") |
|
|
stage1_semantic_preview = gr.HTML( |
|
|
value="<div class='placeholder-msg'>Semantic color analysis will appear after extraction...</div>", |
|
|
label="Semantic Colors Preview" |
|
|
) |
|
|
|
|
|
with gr.Tab("📏 Spacing"): |
|
|
gr.Markdown("*All detected spacing values*") |
|
|
stage1_spacing_preview = gr.HTML( |
|
|
value="<div class='placeholder-msg'>Spacing preview will appear after extraction...</div>", |
|
|
label="Spacing Preview" |
|
|
) |
|
|
|
|
|
with gr.Tab("🔘 Radius"): |
|
|
gr.Markdown("*All detected border radius values*") |
|
|
stage1_radius_preview = gr.HTML( |
|
|
value="<div class='placeholder-msg'>Radius preview will appear after extraction...</div>", |
|
|
label="Radius Preview" |
|
|
) |
|
|
|
|
|
with gr.Tab("🌑 Shadows"): |
|
|
gr.Markdown("*All detected box shadow values*") |
|
|
stage1_shadows_preview = gr.HTML( |
|
|
value="<div class='placeholder-msg'>Shadows preview will appear after extraction...</div>", |
|
|
label="Shadows Preview" |
|
|
) |
|
|
|
|
|
with gr.Row(): |
|
|
proceed_stage2_btn = gr.Button("➡️ Proceed to Stage 2: AI Upgrades", variant="primary") |
|
|
download_stage1_btn = gr.Button("📥 Download Stage 1 JSON", variant="secondary") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with gr.Accordion("🧠 Stage 2: AI-Powered Analysis", open=False) as stage2_accordion: |
|
|
|
|
|
|
|
|
gr.HTML(""" |
|
|
<div class="stage-header"> |
|
|
<h2>🧠 Stage 2: Multi-Agent Analysis</h2> |
|
|
<p class="stage-header-subtitle" style="color: #64748b; margin-top: 4px;">Rule Engine + Benchmark Research + LLM Agents</p> |
|
|
</div> |
|
|
""") |
|
|
|
|
|
stage2_status = gr.Markdown("Click 'Analyze' to start AI-powered design system analysis.") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with gr.Accordion("⚙️ Analysis Configuration", open=True): |
|
|
|
|
|
|
|
|
gr.Markdown(""" |
|
|
### 🏗️ New Analysis Architecture |
|
|
|
|
|
| Layer | Type | What It Does | Cost | |
|
|
|-------|------|--------------|------| |
|
|
| **Layer 1** | Rule Engine | Type scale, AA check, spacing grid, color stats | FREE | |
|
|
| **Layer 2** | Benchmark Research | Fetch live specs via Firecrawl (24h cache) | ~$0.001 | |
|
|
| **Layer 3** | LLM Agents | Brand ID, Benchmark Advisor, Best Practices | ~$0.002 | |
|
|
| **Layer 4** | HEAD Synthesizer | Combine all → Final recommendations | ~$0.001 | |
|
|
|
|
|
**Total Cost:** ~$0.003-0.004 per analysis |
|
|
""") |
|
|
|
|
|
gr.Markdown("---") |
|
|
|
|
|
|
|
|
gr.Markdown("### 📊 Select Design Systems to Compare Against") |
|
|
gr.Markdown("*Choose which design systems to benchmark your tokens against:*") |
|
|
|
|
|
benchmark_checkboxes = gr.CheckboxGroup( |
|
|
choices=[ |
|
|
("🟢 Material Design 3 (Google)", "material_design_3"), |
|
|
("🍎 Apple HIG", "apple_hig"), |
|
|
("🛒 Shopify Polaris", "shopify_polaris"), |
|
|
("🔵 Atlassian Design System", "atlassian_design"), |
|
|
("🔷 IBM Carbon", "ibm_carbon"), |
|
|
("🌊 Tailwind CSS", "tailwind_css"), |
|
|
("🐜 Ant Design", "ant_design"), |
|
|
("⚡ Chakra UI", "chakra_ui"), |
|
|
], |
|
|
value=["material_design_3", "shopify_polaris", "atlassian_design"], |
|
|
label="Benchmarks", |
|
|
) |
|
|
|
|
|
gr.Markdown(""" |
|
|
<small class="tip-text" style="color: #64748b;"> |
|
|
💡 <b>Tip:</b> Select 2-4 benchmarks for best results. More benchmarks = longer analysis time. |
|
|
<br> |
|
|
📦 Results are cached for 24 hours to speed up subsequent analyses. |
|
|
</small> |
|
|
""") |
|
|
|
|
|
|
|
|
with gr.Row(): |
|
|
analyze_btn_v2 = gr.Button( |
|
|
"🚀 Run Analysis (New Architecture)", |
|
|
variant="primary", |
|
|
size="lg", |
|
|
scale=2 |
|
|
) |
|
|
analyze_btn_legacy = gr.Button( |
|
|
"🤖 Legacy Analysis", |
|
|
variant="secondary", |
|
|
size="lg", |
|
|
scale=1 |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with gr.Accordion("📋 Analysis Log", open=True): |
|
|
stage2_log = gr.Textbox( |
|
|
label="Log", |
|
|
lines=20, |
|
|
interactive=False, |
|
|
elem_classes=["log-container"] |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
gr.Markdown("---") |
|
|
gr.Markdown("## 📊 Analysis Results") |
|
|
|
|
|
scores_dashboard = gr.HTML( |
|
|
value="<div class='placeholder-msg placeholder-lg'>Scores will appear after analysis...</div>", |
|
|
label="Scores" |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
priority_actions_html = gr.HTML( |
|
|
value="<div class='placeholder-msg'>Priority actions will appear after analysis...</div>", |
|
|
label="Priority Actions" |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
gr.Markdown("---") |
|
|
benchmark_comparison_md = gr.Markdown("*Benchmark comparison will appear after analysis*") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
gr.Markdown("---") |
|
|
gr.Markdown("## 🎨 Color Recommendations") |
|
|
gr.Markdown("*Accept or reject AI-suggested color changes:*") |
|
|
|
|
|
color_recommendations_table = gr.Dataframe( |
|
|
headers=["Accept", "Role", "Current", "Issue", "Suggested", "New Contrast"], |
|
|
datatype=["bool", "str", "str", "str", "str", "str"], |
|
|
label="Color Recommendations", |
|
|
interactive=True, |
|
|
row_count=(0, "dynamic"), |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
gr.Markdown("---") |
|
|
gr.Markdown("## 📐 Typography") |
|
|
|
|
|
with gr.Accordion("👁️ Typography Visual Preview", open=True): |
|
|
stage2_typography_preview = gr.HTML( |
|
|
value="<div class='placeholder-msg'>Typography preview will appear after analysis...</div>", |
|
|
label="Typography Preview" |
|
|
) |
|
|
|
|
|
with gr.Row(): |
|
|
with gr.Column(scale=2): |
|
|
gr.Markdown("### 🖥️ Desktop (1440px)") |
|
|
typography_desktop = gr.Dataframe( |
|
|
headers=["Token", "Current", "Scale 1.2", "Scale 1.25 ⭐", "Scale 1.333", "Keep"], |
|
|
datatype=["str", "str", "str", "str", "str", "str"], |
|
|
label="Desktop Typography", |
|
|
interactive=False, |
|
|
) |
|
|
|
|
|
with gr.Column(scale=2): |
|
|
gr.Markdown("### 📱 Mobile (375px)") |
|
|
typography_mobile = gr.Dataframe( |
|
|
headers=["Token", "Current", "Scale 1.2", "Scale 1.25 ⭐", "Scale 1.333", "Keep"], |
|
|
datatype=["str", "str", "str", "str", "str", "str"], |
|
|
label="Mobile Typography", |
|
|
interactive=False, |
|
|
) |
|
|
|
|
|
with gr.Row(): |
|
|
with gr.Column(): |
|
|
gr.Markdown("### Select Type Scale Option") |
|
|
type_scale_radio = gr.Radio( |
|
|
choices=["Keep Current", "Scale 1.2 (Minor Third)", "Scale 1.25 (Major Third) ⭐", "Scale 1.333 (Perfect Fourth)"], |
|
|
value="Scale 1.25 (Major Third) ⭐", |
|
|
label="Type Scale", |
|
|
interactive=True, |
|
|
) |
|
|
gr.Markdown("*Font family will be preserved. Sizes rounded to even numbers.*") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
gr.Markdown("---") |
|
|
gr.Markdown("## 🎨 Colors") |
|
|
|
|
|
|
|
|
with gr.Accordion("🤖 LLM Color Recommendations", open=True): |
|
|
gr.Markdown(""" |
|
|
*The LLMs analyzed your colors and made these suggestions. Accept or reject each one.* |
|
|
""") |
|
|
|
|
|
llm_color_recommendations = gr.HTML( |
|
|
value="<div class='placeholder-msg'>LLM recommendations will appear after analysis...</div>", |
|
|
label="LLM Recommendations" |
|
|
) |
|
|
|
|
|
|
|
|
color_recommendations_table = gr.Dataframe( |
|
|
headers=["Accept", "Role", "Current", "Issue", "Suggested", "Contrast"], |
|
|
datatype=["bool", "str", "str", "str", "str", "str"], |
|
|
label="Color Recommendations", |
|
|
interactive=True, |
|
|
col_count=(6, "fixed"), |
|
|
) |
|
|
|
|
|
|
|
|
with gr.Accordion("👁️ Color Ramps Visual Preview (Semantic Groups)", open=True): |
|
|
stage2_color_ramps_preview = gr.HTML( |
|
|
value="<div class='placeholder-msg'>Color ramps preview will appear after analysis...</div>", |
|
|
label="Color Ramps Preview" |
|
|
) |
|
|
|
|
|
base_colors_display = gr.Markdown("*Base colors will appear after analysis*") |
|
|
|
|
|
gr.Markdown("---") |
|
|
|
|
|
color_ramps_display = gr.Markdown("*Color ramps will appear after analysis*") |
|
|
|
|
|
color_ramps_checkbox = gr.Checkbox( |
|
|
label="✓ Generate color ramps (keeps base colors, adds 50-950 shades)", |
|
|
value=True, |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
gr.Markdown("---") |
|
|
gr.Markdown("## 📏 Spacing (Rule-Based)") |
|
|
|
|
|
with gr.Row(): |
|
|
with gr.Column(scale=2): |
|
|
spacing_comparison = gr.Dataframe( |
|
|
headers=["Current", "8px Grid", "4px Grid"], |
|
|
datatype=["str", "str", "str"], |
|
|
label="Spacing Comparison", |
|
|
interactive=False, |
|
|
) |
|
|
|
|
|
with gr.Column(scale=1): |
|
|
spacing_radio = gr.Radio( |
|
|
choices=["Keep Current", "8px Base Grid ⭐", "4px Base Grid"], |
|
|
value="8px Base Grid ⭐", |
|
|
label="Spacing System", |
|
|
interactive=True, |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
gr.Markdown("---") |
|
|
gr.Markdown("## 🔘 Border Radius (Rule-Based)") |
|
|
|
|
|
radius_display = gr.Markdown("*Radius tokens will appear after analysis*") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
gr.Markdown("---") |
|
|
gr.Markdown("## 🌫️ Shadows (Rule-Based)") |
|
|
|
|
|
shadows_display = gr.Markdown("*Shadow tokens will appear after analysis*") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
gr.Markdown("---") |
|
|
|
|
|
with gr.Row(): |
|
|
apply_upgrades_btn = gr.Button("✨ Apply Selected Upgrades", variant="primary", scale=2) |
|
|
reset_btn = gr.Button("↩️ Reset to Original", variant="secondary", scale=1) |
|
|
|
|
|
apply_status = gr.Markdown("") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with gr.Accordion("📦 Stage 3: Export", open=False): |
|
|
gr.Markdown(""" |
|
|
Export your design tokens to JSON (compatible with Figma Tokens Studio). |
|
|
|
|
|
- **Stage 1 JSON**: Raw extracted tokens (as-is) |
|
|
- **Final JSON**: Upgraded tokens with selected improvements |
|
|
""") |
|
|
|
|
|
with gr.Row(): |
|
|
export_stage1_btn = gr.Button("📥 Export Stage 1 (As-Is)", variant="secondary") |
|
|
export_final_btn = gr.Button("📥 Export Final (Upgraded)", variant="primary") |
|
|
|
|
|
export_output = gr.Code(label="Tokens JSON", language="json", lines=25) |
|
|
|
|
|
export_stage1_btn.click(export_stage1_json, outputs=[export_output]) |
|
|
export_final_btn.click(export_tokens_json, outputs=[export_output]) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
desktop_data = gr.State({}) |
|
|
mobile_data = gr.State({}) |
|
|
|
|
|
|
|
|
discover_btn.click( |
|
|
fn=discover_pages, |
|
|
inputs=[url_input], |
|
|
outputs=[discover_status, log_output, pages_table], |
|
|
).then( |
|
|
fn=lambda: (gr.update(visible=True), gr.update(visible=True)), |
|
|
outputs=[pages_table, extract_btn], |
|
|
) |
|
|
|
|
|
|
|
|
extract_btn.click( |
|
|
fn=extract_tokens, |
|
|
inputs=[pages_table], |
|
|
outputs=[extraction_status, log_output, desktop_data, mobile_data, |
|
|
stage1_typography_preview, stage1_colors_preview, |
|
|
stage1_semantic_preview, |
|
|
stage1_spacing_preview, stage1_radius_preview, stage1_shadows_preview], |
|
|
).then( |
|
|
fn=lambda d: (d.get("colors", []), d.get("typography", []), d.get("spacing", [])), |
|
|
inputs=[desktop_data], |
|
|
outputs=[colors_table, typography_table, spacing_table], |
|
|
).then( |
|
|
fn=lambda: gr.update(open=True), |
|
|
outputs=[stage1_accordion], |
|
|
) |
|
|
|
|
|
|
|
|
viewport_toggle.change( |
|
|
fn=switch_viewport, |
|
|
inputs=[viewport_toggle], |
|
|
outputs=[colors_table, typography_table, spacing_table], |
|
|
) |
|
|
|
|
|
|
|
|
analyze_btn_v2.click( |
|
|
fn=run_stage2_analysis_v2, |
|
|
inputs=[benchmark_checkboxes], |
|
|
outputs=[ |
|
|
stage2_status, |
|
|
stage2_log, |
|
|
benchmark_comparison_md, |
|
|
scores_dashboard, |
|
|
priority_actions_html, |
|
|
color_recommendations_table, |
|
|
typography_desktop, |
|
|
typography_mobile, |
|
|
stage2_typography_preview, |
|
|
stage2_color_ramps_preview, |
|
|
llm_color_recommendations, |
|
|
], |
|
|
) |
|
|
|
|
|
|
|
|
analyze_btn_legacy.click( |
|
|
fn=run_stage2_analysis, |
|
|
inputs=[], |
|
|
outputs=[stage2_status, stage2_log, benchmark_comparison_md, scores_dashboard, |
|
|
typography_desktop, typography_mobile, spacing_comparison, |
|
|
base_colors_display, color_ramps_display, radius_display, shadows_display, |
|
|
stage2_typography_preview, stage2_color_ramps_preview, |
|
|
llm_color_recommendations, color_recommendations_table], |
|
|
) |
|
|
|
|
|
|
|
|
apply_upgrades_btn.click( |
|
|
fn=apply_selected_upgrades, |
|
|
inputs=[type_scale_radio, spacing_radio, color_ramps_checkbox, color_recommendations_table], |
|
|
outputs=[apply_status, stage2_log], |
|
|
) |
|
|
|
|
|
|
|
|
download_stage1_btn.click( |
|
|
fn=export_stage1_json, |
|
|
outputs=[export_output], |
|
|
) |
|
|
|
|
|
|
|
|
proceed_stage2_btn.click( |
|
|
fn=lambda: gr.update(open=True), |
|
|
outputs=[stage2_accordion], |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
gr.Markdown(""" |
|
|
--- |
|
|
**Design System Extractor v2** | Built with Playwright + Firecrawl + LangGraph + HuggingFace |
|
|
|
|
|
*A semi-automated co-pilot for design system recovery and modernization.* |
|
|
|
|
|
**New Architecture:** Rule Engine (FREE) + Benchmark Research (Firecrawl) + LLM Agents |
|
|
""") |
|
|
|
|
|
return app |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
app = create_ui() |
|
|
app.launch(server_name="0.0.0.0", server_port=7860) |
|
|
|