riazmo commited on
Commit
a72d929
·
verified ·
1 Parent(s): 222ce98

Delete agents/advisor.py

Browse files
Files changed (1) hide show
  1. agents/advisor.py +0 -681
agents/advisor.py DELETED
@@ -1,681 +0,0 @@
1
- """
2
- Agent 3: Design System Best Practices Advisor
3
- Design System Extractor v2
4
-
5
- Persona: Senior Staff Design Systems Architect
6
-
7
- Responsibilities:
8
- - Analyze extracted tokens against best practices (Material, Polaris, Carbon)
9
- - Propose upgrade OPTIONS with rationale (LLM-powered reasoning)
10
- - Generate type scales, color ramps, spacing grids (Rule-based calculation)
11
- - Never change: font families, primary/secondary base colors
12
-
13
- Hybrid Approach:
14
- - LLM: Analyzes patterns, recommends options, explains rationale
15
- - Rules: Calculates actual values (math-based)
16
- """
17
-
18
- import os
19
- import json
20
- from typing import Optional, Callable
21
- from dataclasses import dataclass, field
22
- from enum import Enum
23
-
24
- from core.token_schema import (
25
- NormalizedTokens,
26
- ColorToken,
27
- TypographyToken,
28
- SpacingToken,
29
- UpgradeOption,
30
- UpgradeRecommendations,
31
- )
32
- from core.color_utils import (
33
- parse_color,
34
- generate_color_ramp,
35
- get_contrast_ratio,
36
- )
37
-
38
-
39
- # =============================================================================
40
- # TYPE SCALE CALCULATIONS (Rule-Based)
41
- # =============================================================================
42
-
43
- class TypeScaleRatio(Enum):
44
- """Common type scale ratios."""
45
- MINOR_SECOND = 1.067
46
- MAJOR_SECOND = 1.125
47
- MINOR_THIRD = 1.200
48
- MAJOR_THIRD = 1.250
49
- PERFECT_FOURTH = 1.333
50
- AUGMENTED_FOURTH = 1.414
51
- PERFECT_FIFTH = 1.500
52
-
53
-
54
- def generate_type_scale(base_size: float, ratio: float, steps_up: int = 5, steps_down: int = 2) -> dict:
55
- """
56
- Generate a type scale from a base size.
57
-
58
- Args:
59
- base_size: Base font size in pixels (e.g., 16)
60
- ratio: Scale ratio (e.g., 1.25)
61
- steps_up: Number of sizes larger than base
62
- steps_down: Number of sizes smaller than base
63
-
64
- Returns:
65
- Dict with size names and values
66
- """
67
- scale = {}
68
-
69
- # Generate sizes below base
70
- for i in range(steps_down, 0, -1):
71
- size = base_size / (ratio ** i)
72
- name = f"text.{['xs', 'sm'][steps_down - i] if i <= 2 else f'xs-{i}'}"
73
- scale[name] = round(size)
74
-
75
- # Base size
76
- scale["text.base"] = round(base_size)
77
-
78
- # Generate sizes above base
79
- size_names = ["text.lg", "text.xl", "heading.sm", "heading.md", "heading.lg", "heading.xl", "heading.2xl", "display"]
80
- for i in range(1, steps_up + 1):
81
- size = base_size * (ratio ** i)
82
- name = size_names[i - 1] if i <= len(size_names) else f"heading.{i}xl"
83
- scale[name] = round(size)
84
-
85
- return scale
86
-
87
-
88
- # =============================================================================
89
- # SPACING GRID CALCULATIONS (Rule-Based)
90
- # =============================================================================
91
-
92
- def snap_to_grid(value: float, base: int = 8) -> int:
93
- """Snap a value to the nearest grid unit."""
94
- return round(value / base) * base
95
-
96
-
97
- def generate_spacing_scale(base: int = 8, max_value: int = 96) -> dict:
98
- """
99
- Generate a spacing scale based on a base unit.
100
-
101
- Args:
102
- base: Base unit (4 or 8)
103
- max_value: Maximum spacing value
104
-
105
- Returns:
106
- Dict with spacing names and values
107
- """
108
- scale = {}
109
- multipliers = [0.5, 1, 1.5, 2, 2.5, 3, 4, 5, 6, 8, 10, 12, 16, 20, 24]
110
- names = ["0.5", "1", "1.5", "2", "2.5", "3", "4", "5", "6", "8", "10", "12", "16", "20", "24"]
111
-
112
- for mult, name in zip(multipliers, names):
113
- value = int(base * mult)
114
- if value <= max_value:
115
- scale[f"space.{name}"] = f"{value}px"
116
-
117
- return scale
118
-
119
-
120
- def analyze_spacing_fit(detected_values: list[int], base: int = 8) -> dict:
121
- """
122
- Analyze how well detected spacing values fit a grid.
123
-
124
- Returns:
125
- Dict with fit percentage and adjustments needed
126
- """
127
- fits = 0
128
- adjustments = []
129
-
130
- for value in detected_values:
131
- snapped = snap_to_grid(value, base)
132
- if value == snapped:
133
- fits += 1
134
- else:
135
- adjustments.append({
136
- "original": value,
137
- "snapped": snapped,
138
- "delta": snapped - value
139
- })
140
-
141
- return {
142
- "base": base,
143
- "fit_percentage": (fits / len(detected_values) * 100) if detected_values else 0,
144
- "adjustments": adjustments,
145
- "already_aligned": fits,
146
- "needs_adjustment": len(adjustments)
147
- }
148
-
149
-
150
- # =============================================================================
151
- # COLOR RAMP GENERATION (Rule-Based)
152
- # =============================================================================
153
-
154
- def generate_semantic_color_ramp(base_color: str, role: str = "primary") -> dict:
155
- """
156
- Generate a full color ramp from a base color.
157
-
158
- Args:
159
- base_color: Hex color (e.g., "#373737")
160
- role: Semantic role (primary, secondary, neutral, etc.)
161
-
162
- Returns:
163
- Dict with shade names (50-900) and hex values
164
- """
165
- ramp = generate_color_ramp(base_color)
166
-
167
- result = {}
168
- shades = ["50", "100", "200", "300", "400", "500", "600", "700", "800", "900"]
169
-
170
- for shade, color in zip(shades, ramp):
171
- result[f"{role}.{shade}"] = color
172
-
173
- return result
174
-
175
-
176
- # =============================================================================
177
- # LLM-POWERED ANALYSIS (Agent 3 Brain)
178
- # =============================================================================
179
-
180
- class DesignSystemAdvisor:
181
- """
182
- Agent 3: Analyzes tokens and proposes upgrades.
183
-
184
- Uses LLM for reasoning and recommendations.
185
- Uses rules for calculating actual values.
186
- """
187
-
188
- def __init__(self, log_callback: Optional[Callable[[str], None]] = None):
189
- self.log = log_callback or print
190
- self.hf_token = os.getenv("HF_TOKEN", "")
191
- self.model = os.getenv("AGENT3_MODEL", "meta-llama/Llama-3.1-70B-Instruct")
192
-
193
- async def analyze(
194
- self,
195
- desktop_tokens: NormalizedTokens,
196
- mobile_tokens: NormalizedTokens,
197
- ) -> UpgradeRecommendations:
198
- """
199
- Analyze tokens and generate upgrade recommendations.
200
-
201
- Args:
202
- desktop_tokens: Normalized desktop tokens
203
- mobile_tokens: Normalized mobile tokens
204
-
205
- Returns:
206
- UpgradeRecommendations with options for each category
207
- """
208
- self.log("🤖 Agent 3: Starting design system analysis...")
209
-
210
- # Gather token statistics
211
- stats = self._gather_statistics(desktop_tokens, mobile_tokens)
212
- self.log(f"📊 Gathered statistics: {len(stats['colors'])} colors, {len(stats['typography'])} typography, {len(stats['spacing'])} spacing")
213
-
214
- # Generate rule-based options first
215
- self.log("🔧 Generating rule-based options...")
216
- type_scale_options = self._generate_type_scale_options(stats)
217
- spacing_options = self._generate_spacing_options(stats)
218
- color_ramp_options = self._generate_color_ramp_options(stats)
219
-
220
- # Get LLM analysis and recommendations
221
- self.log(f"🤖 Calling LLM ({self.model}) for analysis...")
222
- llm_analysis = await self._get_llm_analysis(stats, type_scale_options, spacing_options)
223
-
224
- # Apply LLM recommendations to options
225
- self._apply_llm_recommendations(type_scale_options, spacing_options, color_ramp_options, llm_analysis)
226
-
227
- self.log("✅ Analysis complete!")
228
-
229
- return UpgradeRecommendations(
230
- typography_scales=type_scale_options,
231
- spacing_systems=spacing_options,
232
- color_ramps=color_ramp_options,
233
- naming_conventions=[], # TODO: Add naming convention options
234
- llm_rationale=llm_analysis.get("rationale", ""),
235
- detected_patterns=llm_analysis.get("patterns", []),
236
- accessibility_issues=llm_analysis.get("accessibility", []),
237
- )
238
-
239
- def _gather_statistics(self, desktop: NormalizedTokens, mobile: NormalizedTokens) -> dict:
240
- """Gather statistics from tokens for analysis."""
241
-
242
- # Combine colors (colors are viewport-agnostic)
243
- colors = {}
244
- for name, token in desktop.colors.items():
245
- colors[token.value] = {
246
- "value": token.value,
247
- "frequency": token.frequency,
248
- "contexts": token.contexts,
249
- "suggested_name": token.suggested_name,
250
- }
251
-
252
- # Typography (viewport-specific)
253
- typography = {
254
- "desktop": [],
255
- "mobile": [],
256
- }
257
- for name, token in desktop.typography.items():
258
- typography["desktop"].append({
259
- "font_family": token.font_family,
260
- "font_size": token.font_size,
261
- "font_weight": token.font_weight,
262
- "frequency": token.frequency,
263
- })
264
- for name, token in mobile.typography.items():
265
- typography["mobile"].append({
266
- "font_family": token.font_family,
267
- "font_size": token.font_size,
268
- "font_weight": token.font_weight,
269
- "frequency": token.frequency,
270
- })
271
-
272
- # Spacing
273
- spacing = {
274
- "desktop": [],
275
- "mobile": [],
276
- }
277
- for name, token in desktop.spacing.items():
278
- spacing["desktop"].append(token.value_px)
279
- for name, token in mobile.spacing.items():
280
- spacing["mobile"].append(token.value_px)
281
-
282
- # Find most used font family
283
- font_families = {}
284
- for t in typography["desktop"]:
285
- family = t["font_family"]
286
- font_families[family] = font_families.get(family, 0) + t["frequency"]
287
-
288
- primary_font = max(font_families.items(), key=lambda x: x[1])[0] if font_families else "sans-serif"
289
-
290
- # Find base font size (most frequent in body context)
291
- font_sizes = [self._parse_size(t["font_size"]) for t in typography["desktop"]]
292
- base_font_size = 16 # Default
293
- if font_sizes:
294
- # Find most common size between 14-18px (typical body text)
295
- body_sizes = [s for s in font_sizes if 14 <= s <= 18]
296
- if body_sizes:
297
- base_font_size = max(set(body_sizes), key=body_sizes.count)
298
-
299
- return {
300
- "colors": colors,
301
- "typography": typography,
302
- "spacing": spacing,
303
- "primary_font": primary_font,
304
- "base_font_size": base_font_size,
305
- "all_font_sizes": list(set(font_sizes)),
306
- }
307
-
308
- def _parse_size(self, size_str: str) -> float:
309
- """Parse a size string to pixels."""
310
- if not size_str:
311
- return 16
312
- size_str = str(size_str).lower().strip()
313
- if "px" in size_str:
314
- return float(size_str.replace("px", ""))
315
- if "rem" in size_str:
316
- return float(size_str.replace("rem", "")) * 16
317
- if "em" in size_str:
318
- return float(size_str.replace("em", "")) * 16
319
- try:
320
- return float(size_str)
321
- except:
322
- return 16
323
-
324
- def _generate_type_scale_options(self, stats: dict) -> list[UpgradeOption]:
325
- """Generate type scale options."""
326
- base = stats["base_font_size"]
327
- options = []
328
-
329
- ratios = [
330
- ("minor_third", 1.200, "Conservative — subtle size differences"),
331
- ("major_third", 1.250, "Balanced — clear hierarchy without extremes"),
332
- ("perfect_fourth", 1.333, "Bold — strong visual hierarchy"),
333
- ]
334
-
335
- for id_name, ratio, desc in ratios:
336
- scale = generate_type_scale(base, ratio)
337
- options.append(UpgradeOption(
338
- id=f"type_scale_{id_name}",
339
- name=f"Type Scale {ratio}",
340
- description=desc,
341
- category="typography",
342
- values={
343
- "ratio": ratio,
344
- "base": base,
345
- "scale": scale,
346
- },
347
- pros=[
348
- f"Based on {base}px base (detected)",
349
- f"Ratio {ratio} is industry standard",
350
- ],
351
- cons=[],
352
- effort="low",
353
- recommended=False,
354
- ))
355
-
356
- # Add "keep original" option
357
- options.append(UpgradeOption(
358
- id="type_scale_keep",
359
- name="Keep Original",
360
- description="Preserve detected font sizes without scaling",
361
- category="typography",
362
- values={
363
- "ratio": None,
364
- "base": base,
365
- "scale": {f"size_{i}": s for i, s in enumerate(stats["all_font_sizes"])},
366
- },
367
- pros=["No changes needed", "Preserves original design"],
368
- cons=["May have inconsistent scale"],
369
- effort="none",
370
- recommended=False,
371
- ))
372
-
373
- return options
374
-
375
- def _generate_spacing_options(self, stats: dict) -> list[UpgradeOption]:
376
- """Generate spacing system options."""
377
- desktop_spacing = stats["spacing"]["desktop"]
378
-
379
- options = []
380
-
381
- for base in [8, 4]:
382
- fit_analysis = analyze_spacing_fit(desktop_spacing, base)
383
- scale = generate_spacing_scale(base)
384
-
385
- options.append(UpgradeOption(
386
- id=f"spacing_{base}px",
387
- name=f"{base}px Base Grid",
388
- description=f"{'Modern standard' if base == 8 else 'Finer control'} — {fit_analysis['fit_percentage']:.0f}% of your values already fit",
389
- category="spacing",
390
- values={
391
- "base": base,
392
- "scale": scale,
393
- "fit_analysis": fit_analysis,
394
- },
395
- pros=[
396
- f"{fit_analysis['already_aligned']} values already aligned",
397
- "Consistent visual rhythm" if base == 8 else "More granular control",
398
- ],
399
- cons=[
400
- f"{fit_analysis['needs_adjustment']} values need adjustment" if fit_analysis['needs_adjustment'] > 0 else None,
401
- ],
402
- effort="low" if fit_analysis['fit_percentage'] > 70 else "medium",
403
- recommended=False,
404
- ))
405
-
406
- # Add "keep original" option
407
- options.append(UpgradeOption(
408
- id="spacing_keep",
409
- name="Keep Original",
410
- description="Preserve detected spacing values",
411
- category="spacing",
412
- values={
413
- "base": None,
414
- "scale": {f"space_{v}": f"{v}px" for v in desktop_spacing},
415
- },
416
- pros=["No changes needed"],
417
- cons=["May have irregular spacing"],
418
- effort="none",
419
- recommended=False,
420
- ))
421
-
422
- return options
423
-
424
- def _generate_color_ramp_options(self, stats: dict) -> list[UpgradeOption]:
425
- """Generate color ramp options."""
426
- options = []
427
-
428
- # Find primary colors (high frequency, used in text/background)
429
- primary_candidates = []
430
- for hex_val, data in stats["colors"].items():
431
- if data["frequency"] > 10:
432
- primary_candidates.append((hex_val, data))
433
-
434
- # Sort by frequency
435
- primary_candidates.sort(key=lambda x: -x[1]["frequency"])
436
-
437
- # Generate ramps for top colors
438
- for hex_val, data in primary_candidates[:5]:
439
- role = self._infer_color_role(data)
440
- ramp = generate_semantic_color_ramp(hex_val, role)
441
-
442
- options.append(UpgradeOption(
443
- id=f"color_ramp_{role}",
444
- name=f"{role.title()} Ramp",
445
- description=f"Generate 50-900 shades from {hex_val}",
446
- category="colors",
447
- values={
448
- "base_color": hex_val,
449
- "role": role,
450
- "ramp": ramp,
451
- "preserve_base": True,
452
- },
453
- pros=[
454
- f"Base color {hex_val} preserved",
455
- "Full shade range for UI states",
456
- "AA contrast compliant",
457
- ],
458
- cons=[],
459
- effort="low",
460
- recommended=True,
461
- ))
462
-
463
- return options
464
-
465
- def _infer_color_role(self, color_data: dict) -> str:
466
- """Infer semantic role from color context."""
467
- contexts = " ".join(color_data.get("contexts", [])).lower()
468
-
469
- if "primary" in contexts or "brand" in contexts:
470
- return "primary"
471
- if "secondary" in contexts or "accent" in contexts:
472
- return "secondary"
473
- if "background" in contexts or "surface" in contexts:
474
- return "surface"
475
- if "text" in contexts or "foreground" in contexts:
476
- return "text"
477
- if "border" in contexts or "divider" in contexts:
478
- return "border"
479
- if "success" in contexts or "green" in contexts:
480
- return "success"
481
- if "error" in contexts or "red" in contexts:
482
- return "error"
483
- if "warning" in contexts or "yellow" in contexts:
484
- return "warning"
485
-
486
- return "neutral"
487
-
488
- async def _get_llm_analysis(self, stats: dict, type_options: list, spacing_options: list) -> dict:
489
- """Get LLM analysis and recommendations."""
490
-
491
- if not self.hf_token:
492
- self.log("⚠️ No HF token, using default recommendations")
493
- return self._get_default_recommendations(stats, type_options, spacing_options)
494
-
495
- try:
496
- from core.hf_inference import HFInferenceClient
497
-
498
- # HFInferenceClient gets token from settings/env
499
- client = HFInferenceClient()
500
-
501
- # Build prompt
502
- prompt = self._build_analysis_prompt(stats, type_options, spacing_options)
503
-
504
- self.log("📤 Sending analysis request to LLM...")
505
-
506
- # Use the agent-specific complete method
507
- response = await client.complete_async(
508
- agent_name="advisor",
509
- system_prompt="You are a Senior Design Systems Architect analyzing design tokens.",
510
- user_message=prompt,
511
- max_tokens=1500,
512
- )
513
-
514
- self.log("📥 Received LLM response")
515
-
516
- # Parse LLM response
517
- return self._parse_llm_response(response)
518
-
519
- except Exception as e:
520
- self.log(f"⚠️ LLM error: {str(e)}, using default recommendations")
521
- return self._get_default_recommendations(stats, type_options, spacing_options)
522
-
523
- def _build_analysis_prompt(self, stats: dict, type_options: list, spacing_options: list) -> str:
524
- """Build the prompt for LLM analysis."""
525
-
526
- # Format colors
527
- colors_str = "\n".join([
528
- f" - {data['value']}: frequency={data['frequency']}, contexts={data['contexts'][:3]}"
529
- for hex_val, data in list(stats['colors'].items())[:10]
530
- ])
531
-
532
- # Format typography
533
- typo_str = "\n".join([
534
- f" - {t['font_family']} {t['font_size']} (weight: {t['font_weight']}, freq: {t['frequency']})"
535
- for t in stats['typography']['desktop'][:10]
536
- ])
537
-
538
- # Format spacing
539
- spacing_str = f"Desktop: {sorted(stats['spacing']['desktop'])[:15]}"
540
-
541
- # Format options
542
- type_opts = "\n".join([
543
- f" {i+1}. {opt.name} ({opt.values.get('ratio', 'N/A')}) - {opt.description}"
544
- for i, opt in enumerate(type_options[:3])
545
- ])
546
-
547
- spacing_opts = "\n".join([
548
- f" {i+1}. {opt.name} - {opt.description}"
549
- for i, opt in enumerate(spacing_options[:2])
550
- ])
551
-
552
- return f"""You are a Senior Design Systems Architect. Analyze these extracted design tokens and provide recommendations.
553
-
554
- ## EXTRACTED TOKENS
555
-
556
- ### Colors (top 10 by frequency):
557
- {colors_str}
558
-
559
- ### Typography:
560
- Primary font: {stats['primary_font']}
561
- Base size: {stats['base_font_size']}px
562
- {typo_str}
563
-
564
- ### Spacing:
565
- {spacing_str}
566
-
567
- ## OPTIONS TO EVALUATE
568
-
569
- ### Type Scale Options:
570
- {type_opts}
571
-
572
- ### Spacing Options:
573
- {spacing_opts}
574
-
575
- ## YOUR TASK
576
-
577
- Based on best practices from Material Design, Shopify Polaris, and IBM Carbon Design System:
578
-
579
- 1. Which TYPE SCALE ratio would you recommend and why?
580
- 2. Which SPACING BASE (4px or 8px) fits better and why?
581
- 3. What ACCESSIBILITY concerns do you see?
582
- 4. What PATTERNS do you notice in this design system?
583
-
584
- Respond in this JSON format:
585
- {{
586
- "recommended_type_scale": "minor_third|major_third|perfect_fourth|keep",
587
- "recommended_spacing": "8px|4px|keep",
588
- "rationale": "Your detailed explanation...",
589
- "patterns": ["pattern 1", "pattern 2"],
590
- "accessibility": ["issue 1", "issue 2"]
591
- }}"""
592
-
593
- def _parse_llm_response(self, response: str) -> dict:
594
- """Parse LLM response into structured recommendations."""
595
- try:
596
- # Try to extract JSON from response
597
- import re
598
- json_match = re.search(r'\{[\s\S]*\}', response)
599
- if json_match:
600
- return json.loads(json_match.group())
601
- except:
602
- pass
603
-
604
- # Default if parsing fails
605
- return {
606
- "recommended_type_scale": "major_third",
607
- "recommended_spacing": "8px",
608
- "rationale": response[:500] if response else "Analysis complete.",
609
- "patterns": [],
610
- "accessibility": [],
611
- }
612
-
613
- def _get_default_recommendations(self, stats: dict, type_options: list, spacing_options: list) -> dict:
614
- """Get default recommendations without LLM."""
615
-
616
- # Recommend based on fit analysis
617
- spacing_8_fit = 0
618
- spacing_4_fit = 0
619
- for opt in spacing_options:
620
- if opt.id == "spacing_8px":
621
- spacing_8_fit = opt.values.get("fit_analysis", {}).get("fit_percentage", 0)
622
- elif opt.id == "spacing_4px":
623
- spacing_4_fit = opt.values.get("fit_analysis", {}).get("fit_percentage", 0)
624
-
625
- return {
626
- "recommended_type_scale": "major_third", # Most common default
627
- "recommended_spacing": "8px" if spacing_8_fit >= spacing_4_fit else "4px",
628
- "rationale": "Based on industry best practices: Major Third (1.25) type scale provides clear hierarchy. 8px spacing grid is the modern standard used by Material Design and most design systems.",
629
- "patterns": [
630
- f"Primary font: {stats['primary_font']}",
631
- f"Base font size: {stats['base_font_size']}px",
632
- ],
633
- "accessibility": [],
634
- }
635
-
636
- def _apply_llm_recommendations(
637
- self,
638
- type_options: list[UpgradeOption],
639
- spacing_options: list[UpgradeOption],
640
- color_options: list[UpgradeOption],
641
- llm_analysis: dict
642
- ):
643
- """Apply LLM recommendations to options."""
644
-
645
- # Mark recommended type scale
646
- rec_type = llm_analysis.get("recommended_type_scale", "major_third")
647
- for opt in type_options:
648
- if rec_type in opt.id:
649
- opt.recommended = True
650
- opt.description += " ⭐ LLM Recommended"
651
-
652
- # Mark recommended spacing
653
- rec_spacing = llm_analysis.get("recommended_spacing", "8px")
654
- for opt in spacing_options:
655
- if rec_spacing.replace("px", "") in opt.id:
656
- opt.recommended = True
657
- opt.description += " ⭐ LLM Recommended"
658
-
659
-
660
- # =============================================================================
661
- # CONVENIENCE FUNCTIONS
662
- # =============================================================================
663
-
664
- async def analyze_design_system(
665
- desktop_tokens: NormalizedTokens,
666
- mobile_tokens: NormalizedTokens,
667
- log_callback: Optional[Callable[[str], None]] = None
668
- ) -> UpgradeRecommendations:
669
- """
670
- Convenience function to analyze a design system.
671
-
672
- Args:
673
- desktop_tokens: Normalized desktop tokens
674
- mobile_tokens: Normalized mobile tokens
675
- log_callback: Optional callback for logging
676
-
677
- Returns:
678
- UpgradeRecommendations
679
- """
680
- advisor = DesignSystemAdvisor(log_callback=log_callback)
681
- return await advisor.analyze(desktop_tokens, mobile_tokens)