riazmo commited on
Commit
098e9b4
Β·
verified Β·
1 Parent(s): a2498f7

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +107 -73
  2. requirements.txt +2 -0
app.py CHANGED
@@ -188,7 +188,7 @@ async def extract_tokens(pages_data, progress=gr.Progress()):
188
  # Try column names first
189
  is_selected = row.get('Select', row.iloc[0] if len(row) > 0 else False)
190
  url = row.get('URL', row.iloc[1] if len(row) > 1 else '')
191
- except:
192
  # Fallback to positional
193
  is_selected = row.iloc[0] if len(row) > 0 else False
194
  url = row.iloc[1] if len(row) > 1 else ''
@@ -1169,88 +1169,98 @@ async def run_stage2_analysis_v2(
1169
  semantic_analysis = getattr(state, 'semantic_analysis', {})
1170
  desktop_dict = normalized_to_dict(state.desktop_normalized)
1171
 
1172
- # Run agents (with individual error handling)
1173
- # Brand Identifier
1174
- try:
1175
- brand_result = await brand_agent.analyze(
1176
- color_tokens=desktop_dict.get("colors", {}),
1177
- semantic_analysis=semantic_analysis,
1178
- log_callback=state.log,
1179
- )
1180
- # Log what the LLM contributed
1181
- if brand_result:
1182
- bp = brand_result.brand_primary or {}
1183
- bs = brand_result.brand_secondary or {}
1184
- state.log(f" β”œβ”€ Brand Primary: {bp.get('color', 'N/A')} ({bp.get('confidence', 'N/A')} confidence)")
1185
- state.log(f" β”‚ └─ Reasoning: {bp.get('reasoning', 'N/A')[:80]}")
1186
- state.log(f" β”œβ”€ Brand Secondary: {bs.get('color', 'N/A')}")
1187
- state.log(f" β”œβ”€ Palette Strategy: {brand_result.palette_strategy or 'N/A'}")
1188
- state.log(f" └─ Cohesion Score: {brand_result.cohesion_score}/10 β€” {brand_result.cohesion_notes[:60] if brand_result.cohesion_notes else 'N/A'}")
1189
- except Exception as e:
1190
- state.log(f" ⚠️ Brand Identifier failed: {str(e)[:120]}")
1191
- brand_result = BrandIdentification()
 
1192
 
1193
- # Benchmark Advisor
1194
- if benchmark_comparisons:
 
 
1195
  try:
1196
- benchmark_advice = await benchmark_agent.analyze(
1197
  user_ratio=rule_results.typography.detected_ratio,
1198
  user_base=int(rule_results.typography.base_size) if rule_results.typography.sizes_px else 16,
1199
  user_spacing=rule_results.spacing.detected_base,
1200
  benchmark_comparisons=benchmark_comparisons,
1201
  log_callback=state.log,
1202
  )
1203
- # Log what the LLM contributed
1204
- if benchmark_advice:
1205
- state.log(f" β”œβ”€ Recommended: {benchmark_advice.recommended_benchmark_name or benchmark_advice.recommended_benchmark or 'N/A'}")
1206
- state.log(f" β”‚ └─ Reasoning: {benchmark_advice.reasoning[:80] if benchmark_advice.reasoning else 'N/A'}")
1207
- changes = benchmark_advice.alignment_changes or []
1208
  state.log(f" β”œβ”€ Changes Needed: {len(changes)}")
1209
  for i, ch in enumerate(changes[:3]):
1210
  if isinstance(ch, dict):
1211
  state.log(f" β”‚ {i+1}. {ch.get('change', ch.get('what', str(ch)[:60]))}")
1212
  else:
1213
  state.log(f" β”‚ {i+1}. {str(ch)[:60]}")
1214
- if benchmark_advice.pros_of_alignment:
1215
- state.log(f" └─ Pros: {', '.join(str(p)[:30] for p in benchmark_advice.pros_of_alignment[:2])}")
 
1216
  except Exception as e:
1217
- state.log(f" ⚠️ Benchmark Advisor failed: {str(e)[:120]}")
1218
- benchmark_advice = BenchmarkAdvice()
1219
- else:
1220
- benchmark_advice = BenchmarkAdvice()
1221
 
1222
- # Best Practices Validator
1223
- try:
1224
- best_practices = await best_practices_agent.analyze(
1225
- rule_engine_results=rule_results,
1226
- log_callback=state.log,
1227
- )
1228
- # Log what the LLM contributed
1229
- if best_practices:
1230
- state.log(f" β”œβ”€ Overall Score: {best_practices.overall_score}/100")
1231
- passing_count = len(best_practices.passing_practices) if best_practices.passing_practices else 0
1232
- failing_count = len(best_practices.failing_practices) if best_practices.failing_practices else 0
1233
- state.log(f" β”œβ”€ Passing: {passing_count} | Failing: {failing_count}")
1234
- # Show checks dict
1235
- if best_practices.checks:
1236
- for check_name, check_data in list(best_practices.checks.items())[:3]:
1237
- if isinstance(check_data, dict):
1238
- status = check_data.get('status', '?')
1239
- note = check_data.get('note', '')[:50]
1240
- icon = "βœ…" if status == "pass" else "⚠️" if status == "warn" else "❌"
1241
- state.log(f" β”‚ {icon} {check_name}: {note}")
 
 
 
 
 
1242
  else:
1243
- state.log(f" β”‚ β€’ {check_name}: {check_data}")
1244
- # Show priority fixes
1245
- if best_practices.priority_fixes:
1246
- top_fix = best_practices.priority_fixes[0]
1247
- if isinstance(top_fix, dict):
1248
- state.log(f" └─ Top Fix: {top_fix.get('issue', top_fix.get('action', str(top_fix)[:60]))}")
1249
- else:
1250
- state.log(f" └─ Top Fix: {str(top_fix)[:60]}")
1251
- except Exception as e:
1252
- state.log(f" ⚠️ Best Practices Validator failed: {str(e)[:120]}")
1253
- best_practices = BestPracticesResult(overall_score=rule_results.consistency_score)
 
 
 
 
1254
  else:
1255
  # No HF client - use defaults
1256
  state.log(" └─ Using default values (no LLM)")
@@ -2135,7 +2145,7 @@ def get_detected_fonts() -> dict:
2135
  if weight:
2136
  try:
2137
  weights.add(int(weight))
2138
- except:
2139
  pass
2140
 
2141
  primary = max(fonts.items(), key=lambda x: x[1])[0] if fonts else "Unknown"
@@ -2160,7 +2170,7 @@ def get_base_font_size() -> int:
2160
  size = float(size_str)
2161
  if 14 <= size <= 18:
2162
  sizes[size] = sizes.get(size, 0) + t.frequency
2163
- except:
2164
  pass
2165
 
2166
  if sizes:
@@ -2501,7 +2511,7 @@ def format_llm_color_recommendations_table(final_recs: dict, semantic_analysis:
2501
  old_contrast = get_contrast_with_white(current)
2502
  new_contrast = get_contrast_with_white(suggested)
2503
  contrast_str = f"{old_contrast:.1f} β†’ {new_contrast:.1f}"
2504
- except:
2505
  contrast_str = "?"
2506
 
2507
  rows.append([
@@ -2551,7 +2561,7 @@ def format_typography_comparison_viewport(normalized_tokens, base_size: int, vie
2551
  size_str = str(t.font_size).replace('px', '').replace('rem', '').replace('em', '')
2552
  try:
2553
  return float(size_str)
2554
- except:
2555
  return 16
2556
 
2557
  current_typo.sort(key=lambda t: -parse_size(t))
@@ -2694,7 +2704,7 @@ def format_radius_with_tokens() -> str:
2694
  val = str(r.value).replace('px', '').replace('%', '')
2695
  try:
2696
  return float(val)
2697
- except:
2698
  return 999
2699
 
2700
  radii.sort(key=lambda r: parse_radius(r))
@@ -2790,6 +2800,21 @@ def snap_to_grid(value: float, base: int) -> int:
2790
  return round(value / base) * base
2791
 
2792
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2793
  def apply_selected_upgrades(type_choice: str, spacing_choice: str, apply_ramps: bool, color_recs_table: list = None):
2794
  """Apply selected upgrade options including LLM color recommendations."""
2795
  if not state.upgrade_recommendations:
@@ -3126,7 +3151,7 @@ def export_tokens_json():
3126
  "type": "color",
3127
  "source": "upgraded" if shade != "500" else "detected",
3128
  }
3129
- except:
3130
  result["colors"][clean_name] = {
3131
  "value": c.value,
3132
  "type": "color",
@@ -4312,7 +4337,7 @@ def create_ui():
4312
  # STAGE 3: EXPORT
4313
  # =================================================================
4314
 
4315
- with gr.Accordion("πŸ“¦ Stage 3: Export", open=False):
4316
  gr.Markdown("Export your finalized design tokens as JSON, compatible with **Figma Tokens Studio**.",
4317
  elem_classes=["section-desc"])
4318
  gr.Markdown("""
@@ -4416,6 +4441,15 @@ Copy the JSON output below or save it as a `.json` file for import into Figma.
4416
  fn=apply_selected_upgrades,
4417
  inputs=[type_scale_radio, spacing_radio, color_ramps_checkbox, color_recommendations_table],
4418
  outputs=[apply_status, stage2_log],
 
 
 
 
 
 
 
 
 
4419
  )
4420
 
4421
  # Stage 1: Download JSON
 
188
  # Try column names first
189
  is_selected = row.get('Select', row.iloc[0] if len(row) > 0 else False)
190
  url = row.get('URL', row.iloc[1] if len(row) > 1 else '')
191
+ except (KeyError, IndexError, TypeError):
192
  # Fallback to positional
193
  is_selected = row.iloc[0] if len(row) > 0 else False
194
  url = row.iloc[1] if len(row) > 1 else ''
 
1169
  semantic_analysis = getattr(state, 'semantic_analysis', {})
1170
  desktop_dict = normalized_to_dict(state.desktop_normalized)
1171
 
1172
+ # Run AURORA, ATLAS, SENTINEL in PARALLEL (then NEXUS sequentially)
1173
+ async def _run_aurora():
1174
+ """Run AURORA (Brand Identifier) agent."""
1175
+ try:
1176
+ result = await brand_agent.analyze(
1177
+ color_tokens=desktop_dict.get("colors", {}),
1178
+ semantic_analysis=semantic_analysis,
1179
+ log_callback=state.log,
1180
+ )
1181
+ if result:
1182
+ bp = result.brand_primary or {}
1183
+ bs = result.brand_secondary or {}
1184
+ state.log(f" β”œβ”€ Brand Primary: {bp.get('color', 'N/A')} ({bp.get('confidence', 'N/A')} confidence)")
1185
+ state.log(f" β”‚ └─ Reasoning: {bp.get('reasoning', 'N/A')[:80]}")
1186
+ state.log(f" β”œβ”€ Brand Secondary: {bs.get('color', 'N/A')}")
1187
+ state.log(f" β”œβ”€ Palette Strategy: {result.palette_strategy or 'N/A'}")
1188
+ state.log(f" └─ Cohesion Score: {result.cohesion_score}/10 β€” {result.cohesion_notes[:60] if result.cohesion_notes else 'N/A'}")
1189
+ return result
1190
+ except Exception as e:
1191
+ state.log(f" ⚠️ AURORA (Brand Identifier) failed: {str(e)[:120]}")
1192
+ return BrandIdentification()
1193
 
1194
+ async def _run_atlas():
1195
+ """Run ATLAS (Benchmark Advisor) agent."""
1196
+ if not benchmark_comparisons:
1197
+ return BenchmarkAdvice()
1198
  try:
1199
+ result = await benchmark_agent.analyze(
1200
  user_ratio=rule_results.typography.detected_ratio,
1201
  user_base=int(rule_results.typography.base_size) if rule_results.typography.sizes_px else 16,
1202
  user_spacing=rule_results.spacing.detected_base,
1203
  benchmark_comparisons=benchmark_comparisons,
1204
  log_callback=state.log,
1205
  )
1206
+ if result:
1207
+ state.log(f" β”œβ”€ Recommended: {result.recommended_benchmark_name or result.recommended_benchmark or 'N/A'}")
1208
+ state.log(f" β”‚ └─ Reasoning: {result.reasoning[:80] if result.reasoning else 'N/A'}")
1209
+ changes = result.alignment_changes or []
 
1210
  state.log(f" β”œβ”€ Changes Needed: {len(changes)}")
1211
  for i, ch in enumerate(changes[:3]):
1212
  if isinstance(ch, dict):
1213
  state.log(f" β”‚ {i+1}. {ch.get('change', ch.get('what', str(ch)[:60]))}")
1214
  else:
1215
  state.log(f" β”‚ {i+1}. {str(ch)[:60]}")
1216
+ if result.pros_of_alignment:
1217
+ state.log(f" └─ Pros: {', '.join(str(p)[:30] for p in result.pros_of_alignment[:2])}")
1218
+ return result
1219
  except Exception as e:
1220
+ state.log(f" ⚠️ ATLAS (Benchmark Advisor) failed: {str(e)[:120]}")
1221
+ return BenchmarkAdvice()
 
 
1222
 
1223
+ async def _run_sentinel():
1224
+ """Run SENTINEL (Best Practices Validator) agent."""
1225
+ try:
1226
+ result = await best_practices_agent.analyze(
1227
+ rule_engine_results=rule_results,
1228
+ log_callback=state.log,
1229
+ )
1230
+ if result:
1231
+ state.log(f" β”œβ”€ Overall Score: {result.overall_score}/100")
1232
+ passing_count = len(result.passing_practices) if result.passing_practices else 0
1233
+ failing_count = len(result.failing_practices) if result.failing_practices else 0
1234
+ state.log(f" β”œβ”€ Passing: {passing_count} | Failing: {failing_count}")
1235
+ if result.checks:
1236
+ for check_name, check_data in list(result.checks.items())[:3]:
1237
+ if isinstance(check_data, dict):
1238
+ status = check_data.get('status', '?')
1239
+ note = check_data.get('note', '')[:50]
1240
+ icon = "βœ…" if status == "pass" else "⚠️" if status == "warn" else "❌"
1241
+ state.log(f" β”‚ {icon} {check_name}: {note}")
1242
+ else:
1243
+ state.log(f" β”‚ β€’ {check_name}: {check_data}")
1244
+ if result.priority_fixes:
1245
+ top_fix = result.priority_fixes[0]
1246
+ if isinstance(top_fix, dict):
1247
+ state.log(f" └─ Top Fix: {top_fix.get('issue', top_fix.get('action', str(top_fix)[:60]))}")
1248
  else:
1249
+ state.log(f" └─ Top Fix: {str(top_fix)[:60]}")
1250
+ return result
1251
+ except Exception as e:
1252
+ state.log(f" ⚠️ SENTINEL (Best Practices) failed: {str(e)[:120]}")
1253
+ return BestPracticesResult(overall_score=rule_results.consistency_score)
1254
+
1255
+ # Execute AURORA + ATLAS + SENTINEL in parallel
1256
+ import asyncio
1257
+ state.log("")
1258
+ state.log(" πŸš€ Running 3 agents in parallel: AURORA | ATLAS | SENTINEL")
1259
+ brand_result, benchmark_advice, best_practices = await asyncio.gather(
1260
+ _run_aurora(),
1261
+ _run_atlas(),
1262
+ _run_sentinel(),
1263
+ )
1264
  else:
1265
  # No HF client - use defaults
1266
  state.log(" └─ Using default values (no LLM)")
 
2145
  if weight:
2146
  try:
2147
  weights.add(int(weight))
2148
+ except (ValueError, TypeError):
2149
  pass
2150
 
2151
  primary = max(fonts.items(), key=lambda x: x[1])[0] if fonts else "Unknown"
 
2170
  size = float(size_str)
2171
  if 14 <= size <= 18:
2172
  sizes[size] = sizes.get(size, 0) + t.frequency
2173
+ except (ValueError, TypeError):
2174
  pass
2175
 
2176
  if sizes:
 
2511
  old_contrast = get_contrast_with_white(current)
2512
  new_contrast = get_contrast_with_white(suggested)
2513
  contrast_str = f"{old_contrast:.1f} β†’ {new_contrast:.1f}"
2514
+ except (ValueError, TypeError, ZeroDivisionError):
2515
  contrast_str = "?"
2516
 
2517
  rows.append([
 
2561
  size_str = str(t.font_size).replace('px', '').replace('rem', '').replace('em', '')
2562
  try:
2563
  return float(size_str)
2564
+ except (ValueError, TypeError):
2565
  return 16
2566
 
2567
  current_typo.sort(key=lambda t: -parse_size(t))
 
2704
  val = str(r.value).replace('px', '').replace('%', '')
2705
  try:
2706
  return float(val)
2707
+ except (ValueError, TypeError):
2708
  return 999
2709
 
2710
  radii.sort(key=lambda r: parse_radius(r))
 
2800
  return round(value / base) * base
2801
 
2802
 
2803
+ def reset_to_original():
2804
+ """Reset all upgrade selections to defaults."""
2805
+ state.selected_upgrades = {}
2806
+ state.log("")
2807
+ state.log("↩️ Reset all upgrade selections to original values.")
2808
+
2809
+ return (
2810
+ "Scale 1.25 (Major Third) ⭐", # type_scale_radio
2811
+ "8px Base Grid ⭐", # spacing_radio
2812
+ True, # color_ramps_checkbox
2813
+ "## ↩️ Reset Complete\n\nAll selections reverted to defaults. Review and apply again when ready.", # apply_status
2814
+ state.get_logs(), # stage2_log
2815
+ )
2816
+
2817
+
2818
  def apply_selected_upgrades(type_choice: str, spacing_choice: str, apply_ramps: bool, color_recs_table: list = None):
2819
  """Apply selected upgrade options including LLM color recommendations."""
2820
  if not state.upgrade_recommendations:
 
3151
  "type": "color",
3152
  "source": "upgraded" if shade != "500" else "detected",
3153
  }
3154
+ except (ValueError, KeyError, TypeError, IndexError):
3155
  result["colors"][clean_name] = {
3156
  "value": c.value,
3157
  "type": "color",
 
4337
  # STAGE 3: EXPORT
4338
  # =================================================================
4339
 
4340
+ with gr.Accordion("πŸ“¦ Stage 3: Export", open=False) as stage3_accordion:
4341
  gr.Markdown("Export your finalized design tokens as JSON, compatible with **Figma Tokens Studio**.",
4342
  elem_classes=["section-desc"])
4343
  gr.Markdown("""
 
4441
  fn=apply_selected_upgrades,
4442
  inputs=[type_scale_radio, spacing_radio, color_ramps_checkbox, color_recommendations_table],
4443
  outputs=[apply_status, stage2_log],
4444
+ ).then(
4445
+ fn=lambda: gr.update(open=True),
4446
+ outputs=[stage3_accordion],
4447
+ )
4448
+
4449
+ # Stage 2: Reset to original
4450
+ reset_btn.click(
4451
+ fn=reset_to_original,
4452
+ outputs=[type_scale_radio, spacing_radio, color_ramps_checkbox, apply_status, stage2_log],
4453
  )
4454
 
4455
  # Stage 1: Download JSON
requirements.txt CHANGED
@@ -56,6 +56,8 @@ aiofiles>=23.0.0
56
  rich>=13.0.0
57
  tqdm>=4.66.0
58
  python-slugify>=8.0.0
 
 
59
 
60
  # -----------------------------------------------------------------------------
61
  # Testing (development only)
 
56
  rich>=13.0.0
57
  tqdm>=4.66.0
58
  python-slugify>=8.0.0
59
+ loguru>=0.7.0
60
+ jsonschema>=4.20.0
61
 
62
  # -----------------------------------------------------------------------------
63
  # Testing (development only)