riazmo commited on
Commit
7ca38c3
Β·
verified Β·
1 Parent(s): 5b6916d

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +609 -0
app.py ADDED
@@ -0,0 +1,609 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Design System Extractor v2 β€” Main Application
3
+ ==============================================
4
+
5
+ Flow:
6
+ 1. User enters URL
7
+ 2. Agent 1 discovers pages β†’ User confirms
8
+ 3. Agent 1 extracts tokens (Desktop + Mobile)
9
+ 4. Agent 2 normalizes tokens
10
+ 5. Stage 1 UI: User reviews tokens (accept/reject, Desktop↔Mobile toggle)
11
+ 6. Agent 3 proposes upgrades
12
+ 7. Stage 2 UI: User selects options with live preview
13
+ 8. Agent 4 generates JSON
14
+ 9. Stage 3 UI: User exports
15
+ """
16
+
17
+ import os
18
+ import asyncio
19
+ import json
20
+ import gradio as gr
21
+ from datetime import datetime
22
+ from typing import Optional
23
+
24
+ # Get HF token from environment
25
+ HF_TOKEN_FROM_ENV = os.getenv("HF_TOKEN", "")
26
+
27
+ # =============================================================================
28
+ # GLOBAL STATE
29
+ # =============================================================================
30
+
31
+ class AppState:
32
+ """Global application state."""
33
+ def __init__(self):
34
+ self.reset()
35
+
36
+ def reset(self):
37
+ self.discovered_pages = []
38
+ self.base_url = ""
39
+ self.desktop_raw = None # ExtractedTokens
40
+ self.mobile_raw = None # ExtractedTokens
41
+ self.desktop_normalized = None # NormalizedTokens
42
+ self.mobile_normalized = None # NormalizedTokens
43
+ self.logs = []
44
+
45
+ def log(self, message: str):
46
+ timestamp = datetime.now().strftime("%H:%M:%S")
47
+ self.logs.append(f"[{timestamp}] {message}")
48
+ if len(self.logs) > 100:
49
+ self.logs.pop(0)
50
+
51
+ def get_logs(self) -> str:
52
+ return "\n".join(self.logs)
53
+
54
+ state = AppState()
55
+
56
+
57
+ # =============================================================================
58
+ # LAZY IMPORTS
59
+ # =============================================================================
60
+
61
+ def get_crawler():
62
+ import agents.crawler
63
+ return agents.crawler
64
+
65
+ def get_extractor():
66
+ import agents.extractor
67
+ return agents.extractor
68
+
69
+ def get_normalizer():
70
+ import agents.normalizer
71
+ return agents.normalizer
72
+
73
+ def get_schema():
74
+ import core.token_schema
75
+ return core.token_schema
76
+
77
+
78
+ # =============================================================================
79
+ # PHASE 1: DISCOVER PAGES
80
+ # =============================================================================
81
+
82
+ async def discover_pages(url: str, progress=gr.Progress()):
83
+ """Discover pages from URL."""
84
+ state.reset()
85
+
86
+ if not url or not url.startswith(("http://", "https://")):
87
+ return "❌ Please enter a valid URL", "", None
88
+
89
+ state.log(f"πŸš€ Starting discovery for: {url}")
90
+ progress(0.1, desc="πŸ” Discovering pages...")
91
+
92
+ try:
93
+ crawler = get_crawler()
94
+ discoverer = crawler.PageDiscoverer()
95
+
96
+ pages = await discoverer.discover(url)
97
+
98
+ state.discovered_pages = pages
99
+ state.base_url = url
100
+
101
+ state.log(f"βœ… Found {len(pages)} pages")
102
+
103
+ # Format for display
104
+ pages_data = []
105
+ for page in pages:
106
+ pages_data.append([
107
+ True, # Selected by default
108
+ page.url,
109
+ page.title if page.title else "(No title)",
110
+ page.page_type.value,
111
+ "βœ“" if not page.error else f"⚠ {page.error}"
112
+ ])
113
+
114
+ progress(1.0, desc="βœ… Discovery complete!")
115
+
116
+ status = f"βœ… Found {len(pages)} pages. Review and click 'Extract Tokens' to continue."
117
+
118
+ return status, state.get_logs(), pages_data
119
+
120
+ except Exception as e:
121
+ import traceback
122
+ state.log(f"❌ Error: {str(e)}")
123
+ return f"❌ Error: {str(e)}", state.get_logs(), None
124
+
125
+
126
+ # =============================================================================
127
+ # PHASE 2: EXTRACT TOKENS
128
+ # =============================================================================
129
+
130
+ async def extract_tokens(pages_data, progress=gr.Progress()):
131
+ """Extract tokens from selected pages (both viewports)."""
132
+
133
+ state.log(f"πŸ“₯ Received pages_data type: {type(pages_data)}")
134
+
135
+ if pages_data is None:
136
+ return "❌ Please discover pages first", state.get_logs(), None, None
137
+
138
+ # Get selected URLs - handle pandas DataFrame
139
+ selected_urls = []
140
+
141
+ try:
142
+ # Check if it's a pandas DataFrame
143
+ if hasattr(pages_data, 'iterrows'):
144
+ state.log(f"πŸ“₯ DataFrame with {len(pages_data)} rows, columns: {list(pages_data.columns)}")
145
+
146
+ for idx, row in pages_data.iterrows():
147
+ # Get values by column name or position
148
+ try:
149
+ # Try column names first
150
+ is_selected = row.get('Select', row.iloc[0] if len(row) > 0 else False)
151
+ url = row.get('URL', row.iloc[1] if len(row) > 1 else '')
152
+ except:
153
+ # Fallback to positional
154
+ is_selected = row.iloc[0] if len(row) > 0 else False
155
+ url = row.iloc[1] if len(row) > 1 else ''
156
+
157
+ if is_selected and url:
158
+ selected_urls.append(url)
159
+
160
+ # If it's a dict (Gradio sometimes sends this)
161
+ elif isinstance(pages_data, dict):
162
+ state.log(f"πŸ“₯ Dict with keys: {list(pages_data.keys())}")
163
+ data = pages_data.get('data', [])
164
+ for row in data:
165
+ if isinstance(row, (list, tuple)) and len(row) >= 2 and row[0]:
166
+ selected_urls.append(row[1])
167
+
168
+ # If it's a list
169
+ elif isinstance(pages_data, (list, tuple)):
170
+ state.log(f"πŸ“₯ List with {len(pages_data)} items")
171
+ for row in pages_data:
172
+ if isinstance(row, (list, tuple)) and len(row) >= 2 and row[0]:
173
+ selected_urls.append(row[1])
174
+
175
+ except Exception as e:
176
+ state.log(f"❌ Error parsing pages_data: {str(e)}")
177
+ import traceback
178
+ state.log(traceback.format_exc())
179
+
180
+ state.log(f"πŸ“‹ Found {len(selected_urls)} selected URLs")
181
+
182
+ # If still no URLs, try using stored discovered pages
183
+ if not selected_urls and state.discovered_pages:
184
+ state.log("⚠️ No URLs from table, using all discovered pages")
185
+ selected_urls = [p.url for p in state.discovered_pages if not p.error][:10]
186
+
187
+ if not selected_urls:
188
+ return "❌ No pages selected. Please select pages or rediscover.", state.get_logs(), None, None
189
+
190
+ # Limit to 10 pages for performance
191
+ selected_urls = selected_urls[:10]
192
+
193
+ state.log(f"πŸ“‹ Extracting from {len(selected_urls)} pages:")
194
+ for url in selected_urls[:3]:
195
+ state.log(f" β€’ {url}")
196
+ if len(selected_urls) > 3:
197
+ state.log(f" ... and {len(selected_urls) - 3} more")
198
+
199
+ progress(0.05, desc="πŸš€ Starting extraction...")
200
+
201
+ try:
202
+ schema = get_schema()
203
+ extractor_mod = get_extractor()
204
+ normalizer_mod = get_normalizer()
205
+
206
+ # === DESKTOP EXTRACTION ===
207
+ state.log("")
208
+ state.log("πŸ–₯️ DESKTOP EXTRACTION (1440px)")
209
+ progress(0.1, desc="πŸ–₯️ Extracting desktop tokens...")
210
+
211
+ desktop_extractor = extractor_mod.TokenExtractor(viewport=schema.Viewport.DESKTOP)
212
+
213
+ def desktop_progress(p):
214
+ progress(0.1 + (p * 0.35), desc=f"πŸ–₯️ Desktop... {int(p*100)}%")
215
+
216
+ state.desktop_raw = await desktop_extractor.extract(selected_urls, progress_callback=desktop_progress)
217
+
218
+ state.log(f" Raw: {len(state.desktop_raw.colors)} colors, {len(state.desktop_raw.typography)} typography, {len(state.desktop_raw.spacing)} spacing")
219
+
220
+ # Normalize desktop
221
+ state.log(" Normalizing...")
222
+ state.desktop_normalized = normalizer_mod.normalize_tokens(state.desktop_raw)
223
+ state.log(f" Normalized: {len(state.desktop_normalized.colors)} colors, {len(state.desktop_normalized.typography)} typography, {len(state.desktop_normalized.spacing)} spacing")
224
+
225
+ # === MOBILE EXTRACTION ===
226
+ state.log("")
227
+ state.log("πŸ“± MOBILE EXTRACTION (375px)")
228
+ progress(0.5, desc="πŸ“± Extracting mobile tokens...")
229
+
230
+ mobile_extractor = extractor_mod.TokenExtractor(viewport=schema.Viewport.MOBILE)
231
+
232
+ def mobile_progress(p):
233
+ progress(0.5 + (p * 0.35), desc=f"πŸ“± Mobile... {int(p*100)}%")
234
+
235
+ state.mobile_raw = await mobile_extractor.extract(selected_urls, progress_callback=mobile_progress)
236
+
237
+ state.log(f" Raw: {len(state.mobile_raw.colors)} colors, {len(state.mobile_raw.typography)} typography, {len(state.mobile_raw.spacing)} spacing")
238
+
239
+ # Normalize mobile
240
+ state.log(" Normalizing...")
241
+ state.mobile_normalized = normalizer_mod.normalize_tokens(state.mobile_raw)
242
+ state.log(f" Normalized: {len(state.mobile_normalized.colors)} colors, {len(state.mobile_normalized.typography)} typography, {len(state.mobile_normalized.spacing)} spacing")
243
+
244
+ progress(0.95, desc="πŸ“Š Preparing results...")
245
+
246
+ # Format results for Stage 1 UI
247
+ desktop_data = format_tokens_for_display(state.desktop_normalized)
248
+ mobile_data = format_tokens_for_display(state.mobile_normalized)
249
+
250
+ state.log("")
251
+ state.log("=" * 50)
252
+ state.log("βœ… EXTRACTION COMPLETE!")
253
+ state.log("=" * 50)
254
+
255
+ progress(1.0, desc="βœ… Complete!")
256
+
257
+ status = f"""## βœ… Extraction Complete!
258
+
259
+ | Viewport | Colors | Typography | Spacing |
260
+ |----------|--------|------------|---------|
261
+ | Desktop | {len(state.desktop_normalized.colors)} | {len(state.desktop_normalized.typography)} | {len(state.desktop_normalized.spacing)} |
262
+ | Mobile | {len(state.mobile_normalized.colors)} | {len(state.mobile_normalized.typography)} | {len(state.mobile_normalized.spacing)} |
263
+
264
+ **Next:** Review the tokens below. Accept or reject, then proceed to Stage 2.
265
+ """
266
+
267
+ return status, state.get_logs(), desktop_data, mobile_data
268
+
269
+ except Exception as e:
270
+ import traceback
271
+ state.log(f"❌ Error: {str(e)}")
272
+ state.log(traceback.format_exc())
273
+ return f"❌ Error: {str(e)}", state.get_logs(), None, None
274
+
275
+
276
+ def format_tokens_for_display(normalized) -> dict:
277
+ """Format normalized tokens for Gradio display."""
278
+ if normalized is None:
279
+ return {"colors": [], "typography": [], "spacing": []}
280
+
281
+ # Colors are now a dict
282
+ colors = []
283
+ color_items = list(normalized.colors.values()) if isinstance(normalized.colors, dict) else normalized.colors
284
+ for c in sorted(color_items, key=lambda x: -x.frequency)[:50]:
285
+ colors.append([
286
+ True, # Accept checkbox
287
+ c.value,
288
+ c.suggested_name or "",
289
+ c.frequency,
290
+ c.confidence.value if c.confidence else "medium",
291
+ f"{c.contrast_white:.1f}:1" if c.contrast_white else "N/A",
292
+ "βœ“" if c.wcag_aa_small_text else "βœ—",
293
+ ", ".join(c.contexts[:2]) if c.contexts else "",
294
+ ])
295
+
296
+ # Typography
297
+ typography = []
298
+ typo_items = list(normalized.typography.values()) if isinstance(normalized.typography, dict) else normalized.typography
299
+ for t in sorted(typo_items, key=lambda x: -x.frequency)[:30]:
300
+ typography.append([
301
+ True, # Accept checkbox
302
+ t.font_family,
303
+ t.font_size,
304
+ str(t.font_weight),
305
+ t.line_height or "",
306
+ t.suggested_name or "",
307
+ t.frequency,
308
+ t.confidence.value if t.confidence else "medium",
309
+ ])
310
+
311
+ # Spacing
312
+ spacing = []
313
+ spacing_items = list(normalized.spacing.values()) if isinstance(normalized.spacing, dict) else normalized.spacing
314
+ for s in sorted(spacing_items, key=lambda x: x.value_px)[:20]:
315
+ spacing.append([
316
+ True, # Accept checkbox
317
+ s.value,
318
+ f"{s.value_px}px",
319
+ s.suggested_name or "",
320
+ s.frequency,
321
+ "βœ“" if s.fits_base_8 else "",
322
+ s.confidence.value if s.confidence else "medium",
323
+ ])
324
+
325
+ return {
326
+ "colors": colors,
327
+ "typography": typography,
328
+ "spacing": spacing,
329
+ }
330
+
331
+
332
+ def switch_viewport(viewport: str):
333
+ """Switch between desktop and mobile view."""
334
+ if viewport == "Desktop (1440px)":
335
+ data = format_tokens_for_display(state.desktop_normalized)
336
+ else:
337
+ data = format_tokens_for_display(state.mobile_normalized)
338
+
339
+ return data["colors"], data["typography"], data["spacing"]
340
+
341
+
342
+ # =============================================================================
343
+ # STAGE 3: EXPORT
344
+ # =============================================================================
345
+
346
+ def export_tokens_json():
347
+ """Export tokens to JSON."""
348
+ result = {
349
+ "metadata": {
350
+ "source_url": state.base_url,
351
+ "extracted_at": datetime.now().isoformat(),
352
+ "version": "v1-extracted",
353
+ },
354
+ "desktop": None,
355
+ "mobile": None,
356
+ }
357
+
358
+ if state.desktop_normalized:
359
+ result["desktop"] = {
360
+ "colors": [
361
+ {"value": c.value, "name": c.suggested_name, "frequency": c.frequency,
362
+ "confidence": c.confidence.value if c.confidence else "medium"}
363
+ for c in state.desktop_normalized.colors
364
+ ],
365
+ "typography": [
366
+ {"font_family": t.font_family, "font_size": t.font_size,
367
+ "font_weight": t.font_weight, "line_height": t.line_height,
368
+ "name": t.suggested_name, "frequency": t.frequency}
369
+ for t in state.desktop_normalized.typography
370
+ ],
371
+ "spacing": [
372
+ {"value": s.value, "value_px": s.value_px, "name": s.suggested_name,
373
+ "frequency": s.frequency, "fits_base_8": s.fits_base_8}
374
+ for s in state.desktop_normalized.spacing
375
+ ],
376
+ }
377
+
378
+ if state.mobile_normalized:
379
+ result["mobile"] = {
380
+ "colors": [
381
+ {"value": c.value, "name": c.suggested_name, "frequency": c.frequency,
382
+ "confidence": c.confidence.value if c.confidence else "medium"}
383
+ for c in state.mobile_normalized.colors
384
+ ],
385
+ "typography": [
386
+ {"font_family": t.font_family, "font_size": t.font_size,
387
+ "font_weight": t.font_weight, "line_height": t.line_height,
388
+ "name": t.suggested_name, "frequency": t.frequency}
389
+ for t in state.mobile_normalized.typography
390
+ ],
391
+ "spacing": [
392
+ {"value": s.value, "value_px": s.value_px, "name": s.suggested_name,
393
+ "frequency": s.frequency, "fits_base_8": s.fits_base_8}
394
+ for s in state.mobile_normalized.spacing
395
+ ],
396
+ }
397
+
398
+ return json.dumps(result, indent=2, default=str)
399
+
400
+
401
+ # =============================================================================
402
+ # UI BUILDING
403
+ # =============================================================================
404
+
405
+ def create_ui():
406
+ """Create the Gradio interface."""
407
+
408
+ with gr.Blocks(
409
+ title="Design System Extractor v2",
410
+ theme=gr.themes.Soft(),
411
+ css="""
412
+ .color-swatch { display: inline-block; width: 24px; height: 24px; border-radius: 4px; margin-right: 8px; vertical-align: middle; }
413
+ """
414
+ ) as app:
415
+
416
+ gr.Markdown("""
417
+ # 🎨 Design System Extractor v2
418
+
419
+ **Reverse-engineer design systems from live websites.**
420
+
421
+ A semi-automated, human-in-the-loop system that extracts, normalizes, and upgrades design tokens.
422
+
423
+ ---
424
+ """)
425
+
426
+ # =================================================================
427
+ # CONFIGURATION
428
+ # =================================================================
429
+
430
+ with gr.Accordion("βš™οΈ Configuration", open=not bool(HF_TOKEN_FROM_ENV)):
431
+ gr.Markdown("**HuggingFace Token** β€” Required for Stage 2 (AI upgrades)")
432
+ with gr.Row():
433
+ hf_token_input = gr.Textbox(
434
+ label="HF Token", placeholder="hf_xxxx", type="password",
435
+ scale=4, value=HF_TOKEN_FROM_ENV,
436
+ )
437
+ save_token_btn = gr.Button("πŸ’Ύ Save", scale=1)
438
+ token_status = gr.Markdown("βœ… Token loaded" if HF_TOKEN_FROM_ENV else "⏳ Enter token")
439
+
440
+ def save_token(token):
441
+ if token and len(token) > 10:
442
+ os.environ["HF_TOKEN"] = token.strip()
443
+ return "βœ… Token saved!"
444
+ return "❌ Invalid token"
445
+
446
+ save_token_btn.click(save_token, [hf_token_input], [token_status])
447
+
448
+ # =================================================================
449
+ # URL INPUT & PAGE DISCOVERY
450
+ # =================================================================
451
+
452
+ with gr.Accordion("πŸ” Step 1: Discover Pages", open=True):
453
+ gr.Markdown("Enter your website URL to discover pages for extraction.")
454
+
455
+ with gr.Row():
456
+ url_input = gr.Textbox(label="Website URL", placeholder="https://example.com", scale=4)
457
+ discover_btn = gr.Button("πŸ” Discover Pages", variant="primary", scale=1)
458
+
459
+ discover_status = gr.Markdown("")
460
+
461
+ with gr.Row():
462
+ log_output = gr.Textbox(label="πŸ“‹ Log", lines=8, interactive=False)
463
+
464
+ pages_table = gr.Dataframe(
465
+ headers=["Select", "URL", "Title", "Type", "Status"],
466
+ datatype=["bool", "str", "str", "str", "str"],
467
+ label="Discovered Pages",
468
+ interactive=True,
469
+ visible=False,
470
+ )
471
+
472
+ extract_btn = gr.Button("πŸš€ Extract Tokens (Desktop + Mobile)", variant="primary", visible=False)
473
+
474
+ # =================================================================
475
+ # STAGE 1: EXTRACTION REVIEW
476
+ # =================================================================
477
+
478
+ with gr.Accordion("πŸ“Š Stage 1: Review Extracted Tokens", open=False) as stage1_accordion:
479
+
480
+ extraction_status = gr.Markdown("")
481
+
482
+ gr.Markdown("""
483
+ **Review the extracted tokens.** Toggle between Desktop and Mobile viewports.
484
+ Accept or reject tokens, then proceed to Stage 2 for AI-powered upgrades.
485
+ """)
486
+
487
+ viewport_toggle = gr.Radio(
488
+ choices=["Desktop (1440px)", "Mobile (375px)"],
489
+ value="Desktop (1440px)",
490
+ label="Viewport",
491
+ )
492
+
493
+ with gr.Tabs():
494
+ with gr.Tab("🎨 Colors"):
495
+ colors_table = gr.Dataframe(
496
+ headers=["Accept", "Color", "Suggested Name", "Frequency", "Confidence", "Contrast", "AA", "Context"],
497
+ datatype=["bool", "str", "str", "number", "str", "str", "str", "str"],
498
+ label="Colors",
499
+ interactive=True,
500
+ )
501
+
502
+ with gr.Tab("πŸ“ Typography"):
503
+ typography_table = gr.Dataframe(
504
+ headers=["Accept", "Font", "Size", "Weight", "Line Height", "Suggested Name", "Frequency", "Confidence"],
505
+ datatype=["bool", "str", "str", "str", "str", "str", "number", "str"],
506
+ label="Typography",
507
+ interactive=True,
508
+ )
509
+
510
+ with gr.Tab("πŸ“ Spacing"):
511
+ spacing_table = gr.Dataframe(
512
+ headers=["Accept", "Value", "Pixels", "Suggested Name", "Frequency", "Base 8", "Confidence"],
513
+ datatype=["bool", "str", "str", "str", "number", "str", "str"],
514
+ label="Spacing",
515
+ interactive=True,
516
+ )
517
+
518
+ proceed_stage2_btn = gr.Button("➑️ Proceed to Stage 2: AI Upgrades", variant="primary")
519
+
520
+ # =================================================================
521
+ # STAGE 2: AI UPGRADES (Placeholder)
522
+ # =================================================================
523
+
524
+ with gr.Accordion("🧠 Stage 2: AI-Powered Upgrades (Coming Soon)", open=False):
525
+ gr.Markdown("""
526
+ **Agent 3 (Design System Advisor)** will analyze your tokens and propose:
527
+
528
+ - **Type Scale Options:** Choose from A/B/C (1.25, 1.333, 1.414 ratios)
529
+ - **Color Ramp Generation:** AA-compliant tints and shades
530
+ - **Spacing System:** Aligned to 8px base grid
531
+ - **Naming Conventions:** Semantic token names
532
+
533
+ Each option will show a **live preview** so you can see the changes before accepting.
534
+
535
+ *Requires HuggingFace token for LLM inference.*
536
+ """)
537
+
538
+ # =================================================================
539
+ # STAGE 3: EXPORT
540
+ # =================================================================
541
+
542
+ with gr.Accordion("πŸ“¦ Stage 3: Export", open=False):
543
+ gr.Markdown("Export your design tokens to JSON (compatible with Figma Tokens Studio).")
544
+
545
+ export_btn = gr.Button("πŸ“₯ Export JSON", variant="secondary")
546
+ export_output = gr.Code(label="Tokens JSON", language="json", lines=20)
547
+
548
+ export_btn.click(export_tokens_json, outputs=[export_output])
549
+
550
+ # =================================================================
551
+ # EVENT HANDLERS
552
+ # =================================================================
553
+
554
+ # Store data for viewport toggle
555
+ desktop_data = gr.State({})
556
+ mobile_data = gr.State({})
557
+
558
+ # Discover pages
559
+ discover_btn.click(
560
+ fn=discover_pages,
561
+ inputs=[url_input],
562
+ outputs=[discover_status, log_output, pages_table],
563
+ ).then(
564
+ fn=lambda: (gr.update(visible=True), gr.update(visible=True)),
565
+ outputs=[pages_table, extract_btn],
566
+ )
567
+
568
+ # Extract tokens
569
+ extract_btn.click(
570
+ fn=extract_tokens,
571
+ inputs=[pages_table],
572
+ outputs=[extraction_status, log_output, desktop_data, mobile_data],
573
+ ).then(
574
+ fn=lambda d: (d.get("colors", []), d.get("typography", []), d.get("spacing", [])),
575
+ inputs=[desktop_data],
576
+ outputs=[colors_table, typography_table, spacing_table],
577
+ ).then(
578
+ fn=lambda: gr.update(open=True),
579
+ outputs=[stage1_accordion],
580
+ )
581
+
582
+ # Viewport toggle
583
+ viewport_toggle.change(
584
+ fn=switch_viewport,
585
+ inputs=[viewport_toggle],
586
+ outputs=[colors_table, typography_table, spacing_table],
587
+ )
588
+
589
+ # =================================================================
590
+ # FOOTER
591
+ # =================================================================
592
+
593
+ gr.Markdown("""
594
+ ---
595
+ **Design System Extractor v2** | Built with Playwright + Gradio + LangGraph + HuggingFace
596
+
597
+ *A semi-automated co-pilot for design system recovery and modernization.*
598
+ """)
599
+
600
+ return app
601
+
602
+
603
+ # =============================================================================
604
+ # MAIN
605
+ # =============================================================================
606
+
607
+ if __name__ == "__main__":
608
+ app = create_ui()
609
+ app.launch(server_name="0.0.0.0", server_port=7860)