riazmo commited on
Commit
2a8fe10
·
verified ·
1 Parent(s): e8110e0

Delete core/rule_engine.py

Browse files
Files changed (1) hide show
  1. core/rule_engine.py +0 -900
core/rule_engine.py DELETED
@@ -1,900 +0,0 @@
1
- """
2
- Rule Engine — Deterministic Design System Analysis
3
- ===================================================
4
-
5
- This module handles ALL calculations that don't need LLM reasoning:
6
- - Type scale detection
7
- - AA/AAA contrast checking
8
- - Algorithmic color fixes
9
- - Spacing grid detection
10
- - Color statistics and deduplication
11
-
12
- LLMs should ONLY be used for:
13
- - Brand color identification (requires context understanding)
14
- - Palette cohesion (subjective assessment)
15
- - Design maturity scoring (holistic evaluation)
16
- - Prioritized recommendations (business reasoning)
17
- """
18
-
19
- import colorsys
20
- import re
21
- from dataclasses import dataclass, field
22
- from functools import reduce
23
- from math import gcd
24
- from typing import Optional
25
-
26
-
27
- # =============================================================================
28
- # DATA CLASSES
29
- # =============================================================================
30
-
31
- @dataclass
32
- class TypeScaleAnalysis:
33
- """Results of type scale analysis."""
34
- detected_ratio: float
35
- closest_standard_ratio: float
36
- scale_name: str
37
- is_consistent: bool
38
- variance: float
39
- sizes_px: list[float]
40
- ratios_between_sizes: list[float]
41
- recommendation: float
42
- recommendation_name: str
43
- base_size: float = 16.0 # Detected base/body font size
44
-
45
- def to_dict(self) -> dict:
46
- return {
47
- "detected_ratio": round(self.detected_ratio, 3),
48
- "closest_standard_ratio": self.closest_standard_ratio,
49
- "scale_name": self.scale_name,
50
- "is_consistent": self.is_consistent,
51
- "variance": round(self.variance, 3),
52
- "sizes_px": self.sizes_px,
53
- "base_size": self.base_size,
54
- "recommendation": self.recommendation,
55
- "recommendation_name": self.recommendation_name,
56
- }
57
-
58
-
59
- @dataclass
60
- class ColorAccessibility:
61
- """Accessibility analysis for a single color."""
62
- hex_color: str
63
- name: str
64
- contrast_on_white: float
65
- contrast_on_black: float
66
- passes_aa_normal: bool # 4.5:1
67
- passes_aa_large: bool # 3.0:1
68
- passes_aaa_normal: bool # 7.0:1
69
- best_text_color: str # White or black
70
- suggested_fix: Optional[str] = None
71
- suggested_fix_contrast: Optional[float] = None
72
-
73
- def to_dict(self) -> dict:
74
- return {
75
- "color": self.hex_color,
76
- "name": self.name,
77
- "contrast_white": round(self.contrast_on_white, 2),
78
- "contrast_black": round(self.contrast_on_black, 2),
79
- "aa_normal": self.passes_aa_normal,
80
- "aa_large": self.passes_aa_large,
81
- "aaa_normal": self.passes_aaa_normal,
82
- "best_text": self.best_text_color,
83
- "suggested_fix": self.suggested_fix,
84
- "suggested_fix_contrast": round(self.suggested_fix_contrast, 2) if self.suggested_fix_contrast else None,
85
- }
86
-
87
-
88
- @dataclass
89
- class SpacingGridAnalysis:
90
- """Results of spacing grid analysis."""
91
- detected_base: int
92
- is_aligned: bool
93
- alignment_percentage: float
94
- misaligned_values: list[int]
95
- recommendation: int
96
- recommendation_reason: str
97
- current_values: list[int]
98
- suggested_scale: list[int]
99
-
100
- def to_dict(self) -> dict:
101
- return {
102
- "detected_base": self.detected_base,
103
- "is_aligned": self.is_aligned,
104
- "alignment_percentage": round(self.alignment_percentage, 1),
105
- "misaligned_values": self.misaligned_values,
106
- "recommendation": self.recommendation,
107
- "recommendation_reason": self.recommendation_reason,
108
- "current_values": self.current_values,
109
- "suggested_scale": self.suggested_scale,
110
- }
111
-
112
-
113
- @dataclass
114
- class ColorStatistics:
115
- """Statistical analysis of color palette."""
116
- total_count: int
117
- unique_count: int
118
- duplicate_count: int
119
- gray_count: int
120
- saturated_count: int
121
- near_duplicates: list[tuple[str, str, float]] # (color1, color2, similarity)
122
- hue_distribution: dict[str, int] # {"red": 5, "blue": 3, ...}
123
-
124
- def to_dict(self) -> dict:
125
- return {
126
- "total": self.total_count,
127
- "unique": self.unique_count,
128
- "duplicates": self.duplicate_count,
129
- "grays": self.gray_count,
130
- "saturated": self.saturated_count,
131
- "near_duplicates_count": len(self.near_duplicates),
132
- "hue_distribution": self.hue_distribution,
133
- }
134
-
135
-
136
- @dataclass
137
- class RuleEngineResults:
138
- """Complete rule engine analysis results."""
139
- typography: TypeScaleAnalysis
140
- accessibility: list[ColorAccessibility]
141
- spacing: SpacingGridAnalysis
142
- color_stats: ColorStatistics
143
-
144
- # Summary
145
- aa_failures: int
146
- consistency_score: int # 0-100
147
-
148
- def to_dict(self) -> dict:
149
- return {
150
- "typography": self.typography.to_dict(),
151
- "accessibility": [a.to_dict() for a in self.accessibility if not a.passes_aa_normal],
152
- "accessibility_all": [a.to_dict() for a in self.accessibility],
153
- "spacing": self.spacing.to_dict(),
154
- "color_stats": self.color_stats.to_dict(),
155
- "summary": {
156
- "aa_failures": self.aa_failures,
157
- "consistency_score": self.consistency_score,
158
- }
159
- }
160
-
161
-
162
- # =============================================================================
163
- # COLOR UTILITIES
164
- # =============================================================================
165
-
166
- def hex_to_rgb(hex_color: str) -> tuple[int, int, int]:
167
- """Convert hex to RGB tuple."""
168
- hex_color = hex_color.lstrip('#')
169
- if len(hex_color) == 3:
170
- hex_color = ''.join([c*2 for c in hex_color])
171
- return tuple(int(hex_color[i:i+2], 16) for i in (0, 2, 4))
172
-
173
-
174
- def rgb_to_hex(r: int, g: int, b: int) -> str:
175
- """Convert RGB to hex string."""
176
- r = max(0, min(255, r))
177
- g = max(0, min(255, g))
178
- b = max(0, min(255, b))
179
- return f"#{r:02x}{g:02x}{b:02x}"
180
-
181
-
182
- def get_relative_luminance(hex_color: str) -> float:
183
- """Calculate relative luminance per WCAG 2.1."""
184
- r, g, b = hex_to_rgb(hex_color)
185
-
186
- def channel_luminance(c):
187
- c = c / 255
188
- return c / 12.92 if c <= 0.03928 else ((c + 0.055) / 1.055) ** 2.4
189
-
190
- return 0.2126 * channel_luminance(r) + 0.7152 * channel_luminance(g) + 0.0722 * channel_luminance(b)
191
-
192
-
193
- def get_contrast_ratio(color1: str, color2: str) -> float:
194
- """Calculate WCAG contrast ratio between two colors."""
195
- l1 = get_relative_luminance(color1)
196
- l2 = get_relative_luminance(color2)
197
- lighter = max(l1, l2)
198
- darker = min(l1, l2)
199
- return (lighter + 0.05) / (darker + 0.05)
200
-
201
-
202
- def is_gray(hex_color: str, threshold: float = 0.1) -> bool:
203
- """Check if color is a gray (low saturation)."""
204
- r, g, b = hex_to_rgb(hex_color)
205
- h, s, v = colorsys.rgb_to_hsv(r/255, g/255, b/255)
206
- return s < threshold
207
-
208
-
209
- def get_saturation(hex_color: str) -> float:
210
- """Get saturation value (0-1)."""
211
- r, g, b = hex_to_rgb(hex_color)
212
- h, s, v = colorsys.rgb_to_hsv(r/255, g/255, b/255)
213
- return s
214
-
215
-
216
- def get_hue_name(hex_color: str) -> str:
217
- """Get human-readable hue name."""
218
- r, g, b = hex_to_rgb(hex_color)
219
- h, s, v = colorsys.rgb_to_hsv(r/255, g/255, b/255)
220
-
221
- if s < 0.1:
222
- return "gray"
223
-
224
- hue_deg = h * 360
225
-
226
- if hue_deg < 15 or hue_deg >= 345:
227
- return "red"
228
- elif hue_deg < 45:
229
- return "orange"
230
- elif hue_deg < 75:
231
- return "yellow"
232
- elif hue_deg < 150:
233
- return "green"
234
- elif hue_deg < 210:
235
- return "cyan"
236
- elif hue_deg < 270:
237
- return "blue"
238
- elif hue_deg < 315:
239
- return "purple"
240
- else:
241
- return "pink"
242
-
243
-
244
- def color_distance(hex1: str, hex2: str) -> float:
245
- """Calculate perceptual color distance (0-1, lower = more similar)."""
246
- r1, g1, b1 = hex_to_rgb(hex1)
247
- r2, g2, b2 = hex_to_rgb(hex2)
248
-
249
- # Simple Euclidean distance in RGB space (normalized)
250
- dr = (r1 - r2) / 255
251
- dg = (g1 - g2) / 255
252
- db = (b1 - b2) / 255
253
-
254
- return (dr**2 + dg**2 + db**2) ** 0.5 / (3 ** 0.5)
255
-
256
-
257
- def darken_color(hex_color: str, factor: float) -> str:
258
- """Darken a color by a factor (0-1)."""
259
- r, g, b = hex_to_rgb(hex_color)
260
- r = int(r * (1 - factor))
261
- g = int(g * (1 - factor))
262
- b = int(b * (1 - factor))
263
- return rgb_to_hex(r, g, b)
264
-
265
-
266
- def lighten_color(hex_color: str, factor: float) -> str:
267
- """Lighten a color by a factor (0-1)."""
268
- r, g, b = hex_to_rgb(hex_color)
269
- r = int(r + (255 - r) * factor)
270
- g = int(g + (255 - g) * factor)
271
- b = int(b + (255 - b) * factor)
272
- return rgb_to_hex(r, g, b)
273
-
274
-
275
- def find_aa_compliant_color(hex_color: str, background: str = "#ffffff", target_contrast: float = 4.5) -> str:
276
- """
277
- Algorithmically adjust a color until it meets AA contrast requirements.
278
-
279
- Returns the original color if it already passes, otherwise returns
280
- a darkened/lightened version that passes.
281
- """
282
- current_contrast = get_contrast_ratio(hex_color, background)
283
-
284
- if current_contrast >= target_contrast:
285
- return hex_color
286
-
287
- # Determine direction: move fg *away* from bg in luminance.
288
- # If fg is lighter than bg → darken fg to increase gap.
289
- # If fg is darker than bg → lighten fg to increase gap.
290
- bg_luminance = get_relative_luminance(background)
291
- color_luminance = get_relative_luminance(hex_color)
292
-
293
- should_darken = color_luminance >= bg_luminance
294
-
295
- best_color = hex_color
296
- best_contrast = current_contrast
297
-
298
- for i in range(1, 101):
299
- factor = i / 100
300
-
301
- if should_darken:
302
- new_color = darken_color(hex_color, factor)
303
- else:
304
- new_color = lighten_color(hex_color, factor)
305
-
306
- new_contrast = get_contrast_ratio(new_color, background)
307
-
308
- if new_contrast >= target_contrast:
309
- return new_color
310
-
311
- if new_contrast > best_contrast:
312
- best_contrast = new_contrast
313
- best_color = new_color
314
-
315
- # If first direction didn't reach target, try the opposite direction
316
- # (e.g., very similar luminances where either direction could work)
317
- should_darken = not should_darken
318
- for i in range(1, 101):
319
- factor = i / 100
320
-
321
- if should_darken:
322
- new_color = darken_color(hex_color, factor)
323
- else:
324
- new_color = lighten_color(hex_color, factor)
325
-
326
- new_contrast = get_contrast_ratio(new_color, background)
327
-
328
- if new_contrast >= target_contrast:
329
- return new_color
330
-
331
- if new_contrast > best_contrast:
332
- best_contrast = new_contrast
333
- best_color = new_color
334
-
335
- return best_color
336
-
337
-
338
- # =============================================================================
339
- # TYPE SCALE ANALYSIS
340
- # =============================================================================
341
-
342
- # Standard type scale ratios
343
- STANDARD_SCALES = {
344
- 1.067: "Minor Second",
345
- 1.125: "Major Second",
346
- 1.200: "Minor Third",
347
- 1.250: "Major Third", # ⭐ Recommended
348
- 1.333: "Perfect Fourth",
349
- 1.414: "Augmented Fourth",
350
- 1.500: "Perfect Fifth",
351
- 1.618: "Golden Ratio",
352
- 2.000: "Octave",
353
- }
354
-
355
-
356
- def parse_size_to_px(size: str) -> Optional[float]:
357
- """Convert any size string to pixels."""
358
- if isinstance(size, (int, float)):
359
- return float(size)
360
-
361
- size = str(size).strip().lower()
362
-
363
- # Extract number
364
- match = re.search(r'([\d.]+)', size)
365
- if not match:
366
- return None
367
-
368
- value = float(match.group(1))
369
-
370
- if 'rem' in size:
371
- return value * 16 # Assume 16px base
372
- elif 'em' in size:
373
- return value * 16 # Approximate
374
- elif 'px' in size or size.replace('.', '').isdigit():
375
- return value
376
-
377
- return value
378
-
379
-
380
- def analyze_type_scale(typography_tokens: dict) -> TypeScaleAnalysis:
381
- """
382
- Analyze typography tokens to detect type scale ratio.
383
-
384
- Args:
385
- typography_tokens: Dict of typography tokens with font_size
386
-
387
- Returns:
388
- TypeScaleAnalysis with detected ratio and recommendations
389
- """
390
- # Extract and parse sizes
391
- sizes = []
392
- for name, token in typography_tokens.items():
393
- if isinstance(token, dict):
394
- size = token.get("font_size") or token.get("fontSize") or token.get("size")
395
- else:
396
- size = getattr(token, "font_size", None)
397
-
398
- if size:
399
- px = parse_size_to_px(size)
400
- if px and px > 0:
401
- sizes.append(px)
402
-
403
- # Sort and dedupe
404
- sizes_px = sorted(set(sizes))
405
-
406
- if len(sizes_px) < 2:
407
- base_size = sizes_px[0] if sizes_px else 16.0
408
- return TypeScaleAnalysis(
409
- detected_ratio=1.0,
410
- closest_standard_ratio=1.25,
411
- scale_name="Unknown",
412
- is_consistent=False,
413
- variance=0,
414
- sizes_px=sizes_px,
415
- ratios_between_sizes=[],
416
- recommendation=1.25,
417
- recommendation_name="Major Third",
418
- base_size=base_size,
419
- )
420
-
421
- # Calculate ratios between consecutive sizes
422
- ratios = []
423
- for i in range(len(sizes_px) - 1):
424
- if sizes_px[i] > 0:
425
- ratio = sizes_px[i + 1] / sizes_px[i]
426
- if 1.0 < ratio < 3.0: # Reasonable range
427
- ratios.append(ratio)
428
-
429
- if not ratios:
430
- # Detect base size even if no valid ratios
431
- base_candidates = [s for s in sizes_px if 14 <= s <= 18]
432
- base_size = min(base_candidates, key=lambda x: abs(x - 16)) if base_candidates else (min(sizes_px, key=lambda x: abs(x - 16)) if sizes_px else 16.0)
433
- return TypeScaleAnalysis(
434
- detected_ratio=1.0,
435
- closest_standard_ratio=1.25,
436
- scale_name="Unknown",
437
- is_consistent=False,
438
- variance=0,
439
- sizes_px=sizes_px,
440
- ratios_between_sizes=[],
441
- recommendation=1.25,
442
- recommendation_name="Major Third",
443
- base_size=base_size,
444
- )
445
-
446
- # Average ratio
447
- avg_ratio = sum(ratios) / len(ratios)
448
-
449
- # Variance (consistency check)
450
- variance = max(ratios) - min(ratios) if ratios else 0
451
- is_consistent = variance < 0.15 # Within 15% variance is "consistent"
452
-
453
- # Find closest standard scale
454
- closest_scale = min(STANDARD_SCALES.keys(), key=lambda x: abs(x - avg_ratio))
455
- scale_name = STANDARD_SCALES[closest_scale]
456
-
457
- # Detect base size (closest to 16px, or 14-18px range typical for body)
458
- # The base size is typically the most common body text size
459
- base_candidates = [s for s in sizes_px if 14 <= s <= 18]
460
- if base_candidates:
461
- # Prefer 16px if present, otherwise closest to 16
462
- if 16 in base_candidates:
463
- base_size = 16.0
464
- else:
465
- base_size = min(base_candidates, key=lambda x: abs(x - 16))
466
- elif sizes_px:
467
- # Fallback: find size closest to 16px
468
- base_size = min(sizes_px, key=lambda x: abs(x - 16))
469
- else:
470
- base_size = 16.0
471
-
472
- # Recommendation
473
- if is_consistent and abs(avg_ratio - closest_scale) < 0.05:
474
- # Already using a standard scale
475
- recommendation = closest_scale
476
- recommendation_name = scale_name
477
- else:
478
- # Recommend Major Third (1.25) as default
479
- recommendation = 1.25
480
- recommendation_name = "Major Third"
481
-
482
- return TypeScaleAnalysis(
483
- detected_ratio=avg_ratio,
484
- closest_standard_ratio=closest_scale,
485
- scale_name=scale_name,
486
- is_consistent=is_consistent,
487
- variance=variance,
488
- sizes_px=sizes_px,
489
- ratios_between_sizes=ratios,
490
- recommendation=recommendation,
491
- recommendation_name=recommendation_name,
492
- base_size=base_size,
493
- )
494
-
495
-
496
- # =============================================================================
497
- # ACCESSIBILITY ANALYSIS
498
- # =============================================================================
499
-
500
- def analyze_accessibility(color_tokens: dict, fg_bg_pairs: list[dict] = None) -> list[ColorAccessibility]:
501
- """
502
- Analyze all colors for WCAG accessibility compliance.
503
-
504
- Args:
505
- color_tokens: Dict of color tokens with value/hex
506
- fg_bg_pairs: Optional list of actual foreground/background pairs
507
- extracted from the DOM (each dict has 'foreground',
508
- 'background', 'element' keys).
509
-
510
- Returns:
511
- List of ColorAccessibility results
512
- """
513
- results = []
514
-
515
- for name, token in color_tokens.items():
516
- if isinstance(token, dict):
517
- hex_color = token.get("value") or token.get("hex") or token.get("color")
518
- else:
519
- hex_color = getattr(token, "value", None)
520
-
521
- if not hex_color or not hex_color.startswith("#"):
522
- continue
523
-
524
- try:
525
- contrast_white = get_contrast_ratio(hex_color, "#ffffff")
526
- contrast_black = get_contrast_ratio(hex_color, "#000000")
527
-
528
- passes_aa_normal = contrast_white >= 4.5 or contrast_black >= 4.5
529
- passes_aa_large = contrast_white >= 3.0 or contrast_black >= 3.0
530
- passes_aaa_normal = contrast_white >= 7.0 or contrast_black >= 7.0
531
-
532
- best_text = "#ffffff" if contrast_white > contrast_black else "#000000"
533
-
534
- # Generate fix suggestion if needed
535
- suggested_fix = None
536
- suggested_fix_contrast = None
537
-
538
- if not passes_aa_normal:
539
- suggested_fix = find_aa_compliant_color(hex_color, "#ffffff", 4.5)
540
- suggested_fix_contrast = get_contrast_ratio(suggested_fix, "#ffffff")
541
-
542
- results.append(ColorAccessibility(
543
- hex_color=hex_color,
544
- name=name,
545
- contrast_on_white=contrast_white,
546
- contrast_on_black=contrast_black,
547
- passes_aa_normal=passes_aa_normal,
548
- passes_aa_large=passes_aa_large,
549
- passes_aaa_normal=passes_aaa_normal,
550
- best_text_color=best_text,
551
- suggested_fix=suggested_fix,
552
- suggested_fix_contrast=suggested_fix_contrast,
553
- ))
554
- except Exception:
555
- continue
556
-
557
- # --- Real foreground-background pair checks ---
558
- if fg_bg_pairs:
559
- for pair in fg_bg_pairs:
560
- fg = pair.get("foreground", "").lower()
561
- bg = pair.get("background", "").lower()
562
- element = pair.get("element", "")
563
- if not (fg.startswith("#") and bg.startswith("#")):
564
- continue
565
- # Skip same-color pairs (invisible/placeholder text — not real failures)
566
- if fg == bg:
567
- continue
568
- try:
569
- ratio = get_contrast_ratio(fg, bg)
570
- # Skip near-identical pairs (ratio < 1.1) — likely decorative/hidden
571
- if ratio < 1.1:
572
- continue
573
- if ratio < 4.5:
574
- # This pair fails AA — record it
575
- fix = find_aa_compliant_color(fg, bg, 4.5)
576
- fix_contrast = get_contrast_ratio(fix, bg)
577
- results.append(ColorAccessibility(
578
- hex_color=fg,
579
- name=f"fg:{fg} on bg:{bg} ({element}) [{ratio:.1f}:1]",
580
- contrast_on_white=get_contrast_ratio(fg, "#ffffff"),
581
- contrast_on_black=get_contrast_ratio(fg, "#000000"),
582
- passes_aa_normal=False,
583
- passes_aa_large=ratio >= 3.0,
584
- passes_aaa_normal=False,
585
- best_text_color="#ffffff" if get_contrast_ratio(fg, "#ffffff") > get_contrast_ratio(fg, "#000000") else "#000000",
586
- suggested_fix=fix,
587
- suggested_fix_contrast=fix_contrast,
588
- ))
589
- except Exception:
590
- continue
591
-
592
- return results
593
-
594
-
595
- # =============================================================================
596
- # SPACING GRID ANALYSIS
597
- # =============================================================================
598
-
599
- def analyze_spacing_grid(spacing_tokens: dict) -> SpacingGridAnalysis:
600
- """
601
- Analyze spacing tokens to detect grid alignment.
602
-
603
- Args:
604
- spacing_tokens: Dict of spacing tokens with value_px or value
605
-
606
- Returns:
607
- SpacingGridAnalysis with detected grid and recommendations
608
- """
609
- values = []
610
-
611
- for name, token in spacing_tokens.items():
612
- if isinstance(token, dict):
613
- px = token.get("value_px") or token.get("value")
614
- else:
615
- px = getattr(token, "value_px", None) or getattr(token, "value", None)
616
-
617
- if px:
618
- try:
619
- px_val = int(float(str(px).replace('px', '')))
620
- if px_val > 0:
621
- values.append(px_val)
622
- except (ValueError, TypeError):
623
- continue
624
-
625
- if not values:
626
- return SpacingGridAnalysis(
627
- detected_base=8,
628
- is_aligned=False,
629
- alignment_percentage=0,
630
- misaligned_values=[],
631
- recommendation=8,
632
- recommendation_reason="No spacing values detected, defaulting to 8px grid",
633
- current_values=[],
634
- suggested_scale=[0, 4, 8, 12, 16, 20, 24, 32, 40, 48, 64],
635
- )
636
-
637
- values = sorted(set(values))
638
-
639
- # Find GCD (greatest common divisor) of all values
640
- detected_base = reduce(gcd, values)
641
-
642
- # Check alignment to common grids (4px, 8px)
643
- aligned_to_4 = all(v % 4 == 0 for v in values)
644
- aligned_to_8 = all(v % 8 == 0 for v in values)
645
-
646
- # Find misaligned values (not divisible by detected base)
647
- misaligned = [v for v in values if v % detected_base != 0] if detected_base > 1 else values
648
-
649
- alignment_percentage = (len(values) - len(misaligned)) / len(values) * 100 if values else 0
650
-
651
- # Determine recommendation
652
- if aligned_to_8:
653
- recommendation = 8
654
- recommendation_reason = "All values already align to 8px grid"
655
- is_aligned = True
656
- elif aligned_to_4:
657
- recommendation = 4
658
- recommendation_reason = "Values align to 4px grid (consider 8px for simpler system)"
659
- is_aligned = True
660
- elif detected_base in [4, 8]:
661
- recommendation = detected_base
662
- recommendation_reason = f"Detected {detected_base}px base with {alignment_percentage:.0f}% alignment"
663
- is_aligned = alignment_percentage >= 80
664
- else:
665
- recommendation = 8
666
- recommendation_reason = f"Inconsistent spacing detected (GCD={detected_base}), recommend 8px grid"
667
- is_aligned = False
668
-
669
- # Generate suggested scale
670
- base = recommendation
671
- suggested_scale = [0] + [base * i for i in [0.5, 1, 1.5, 2, 2.5, 3, 4, 5, 6, 8, 10, 12, 16] if base * i == int(base * i)]
672
- suggested_scale = sorted(set([int(v) for v in suggested_scale]))
673
-
674
- return SpacingGridAnalysis(
675
- detected_base=detected_base,
676
- is_aligned=is_aligned,
677
- alignment_percentage=alignment_percentage,
678
- misaligned_values=misaligned,
679
- recommendation=recommendation,
680
- recommendation_reason=recommendation_reason,
681
- current_values=values,
682
- suggested_scale=suggested_scale,
683
- )
684
-
685
-
686
- # =============================================================================
687
- # COLOR STATISTICS
688
- # =============================================================================
689
-
690
- def analyze_color_statistics(color_tokens: dict, similarity_threshold: float = 0.05) -> ColorStatistics:
691
- """
692
- Analyze color palette statistics.
693
-
694
- Args:
695
- color_tokens: Dict of color tokens
696
- similarity_threshold: Distance threshold for "near duplicate" (0-1)
697
-
698
- Returns:
699
- ColorStatistics with palette analysis
700
- """
701
- colors = []
702
-
703
- for name, token in color_tokens.items():
704
- if isinstance(token, dict):
705
- hex_color = token.get("value") or token.get("hex")
706
- else:
707
- hex_color = getattr(token, "value", None)
708
-
709
- if hex_color and hex_color.startswith("#"):
710
- colors.append(hex_color.lower())
711
-
712
- unique_colors = list(set(colors))
713
-
714
- # Count grays and saturated
715
- grays = [c for c in unique_colors if is_gray(c)]
716
- saturated = [c for c in unique_colors if get_saturation(c) > 0.3]
717
-
718
- # Find near duplicates
719
- near_duplicates = []
720
- for i, c1 in enumerate(unique_colors):
721
- for c2 in unique_colors[i+1:]:
722
- dist = color_distance(c1, c2)
723
- if dist < similarity_threshold and dist > 0:
724
- near_duplicates.append((c1, c2, round(dist, 4)))
725
-
726
- # Hue distribution
727
- hue_dist = {}
728
- for c in unique_colors:
729
- hue = get_hue_name(c)
730
- hue_dist[hue] = hue_dist.get(hue, 0) + 1
731
-
732
- return ColorStatistics(
733
- total_count=len(colors),
734
- unique_count=len(unique_colors),
735
- duplicate_count=len(colors) - len(unique_colors),
736
- gray_count=len(grays),
737
- saturated_count=len(saturated),
738
- near_duplicates=near_duplicates,
739
- hue_distribution=hue_dist,
740
- )
741
-
742
-
743
- # =============================================================================
744
- # MAIN ANALYSIS FUNCTION
745
- # =============================================================================
746
-
747
- def run_rule_engine(
748
- typography_tokens: dict,
749
- color_tokens: dict,
750
- spacing_tokens: dict,
751
- radius_tokens: dict = None,
752
- shadow_tokens: dict = None,
753
- log_callback: Optional[callable] = None,
754
- fg_bg_pairs: list[dict] = None,
755
- ) -> RuleEngineResults:
756
- """
757
- Run complete rule-based analysis on design tokens.
758
-
759
- This is FREE (no LLM costs) and handles all deterministic calculations.
760
-
761
- Args:
762
- typography_tokens: Dict of typography tokens
763
- color_tokens: Dict of color tokens
764
- spacing_tokens: Dict of spacing tokens
765
- radius_tokens: Dict of border radius tokens (optional)
766
- shadow_tokens: Dict of shadow tokens (optional)
767
- log_callback: Function to log messages
768
-
769
- Returns:
770
- RuleEngineResults with all analysis data
771
- """
772
-
773
- def log(msg: str):
774
- if log_callback:
775
- log_callback(msg)
776
-
777
- log("")
778
- log("═" * 60)
779
- log("⚙️ LAYER 1: RULE ENGINE (FREE - $0.00)")
780
- log("═" * 60)
781
- log("")
782
-
783
- # ─────────────────────────────────────────────────────────────
784
- # Typography Analysis
785
- # ─────────────────────────────────────────────────────────────
786
- log(" 📐 TYPE SCALE ANALYSIS")
787
- log(" " + "─" * 40)
788
- typography = analyze_type_scale(typography_tokens)
789
-
790
- consistency_icon = "✅" if typography.is_consistent else "⚠️"
791
- log(f" ├─ Detected Ratio: {typography.detected_ratio:.3f}")
792
- log(f" ├─ Closest Standard: {typography.scale_name} ({typography.closest_standard_ratio})")
793
- log(f" ├─ Consistent: {consistency_icon} {'Yes' if typography.is_consistent else f'No (variance: {typography.variance:.2f})'}")
794
- log(f" ├─ Sizes Found: {typography.sizes_px}")
795
- log(f" └─ 💡 Recommendation: {typography.recommendation} ({typography.recommendation_name})")
796
- log("")
797
-
798
- # ─────────────────────────────────────────────────────────────
799
- # Accessibility Analysis
800
- # ─────────────────────────────────────────────────────────────
801
- log(" ♿ ACCESSIBILITY CHECK (WCAG AA/AAA)")
802
- log(" " + "─" * 40)
803
- accessibility = analyze_accessibility(color_tokens, fg_bg_pairs=fg_bg_pairs)
804
-
805
- # Separate individual-color failures from real FG/BG pair failures
806
- pair_failures = [a for a in accessibility if not a.passes_aa_normal and a.name.startswith("fg:")]
807
- color_only_failures = [a for a in accessibility if not a.passes_aa_normal and not a.name.startswith("fg:")]
808
- failures = [a for a in accessibility if not a.passes_aa_normal]
809
- passes = len(accessibility) - len(failures)
810
-
811
- pair_count = len(fg_bg_pairs) if fg_bg_pairs else 0
812
- log(f" ├─ Colors Analyzed: {len(accessibility)}")
813
- log(f" ├─ FG/BG Pairs Checked: {pair_count}")
814
- log(f" ├─ AA Pass: {passes} ✅")
815
- log(f" ├─ AA Fail (color vs white/black): {len(color_only_failures)} {'❌' if color_only_failures else '✅'}")
816
- log(f" ├─ AA Fail (real FG/BG pairs): {len(pair_failures)} {'❌' if pair_failures else '✅'}")
817
-
818
- if color_only_failures:
819
- log(" │")
820
- log(" │ ⚠️ FAILING COLORS (vs white/black):")
821
- for i, f in enumerate(color_only_failures[:5]):
822
- fix_info = f" → 💡 Fix: {f.suggested_fix} ({f.suggested_fix_contrast:.1f}:1)" if f.suggested_fix else ""
823
- log(f" │ ├─ {f.name}: {f.hex_color} ({f.contrast_on_white:.1f}:1 on white){fix_info}")
824
- if len(color_only_failures) > 5:
825
- log(f" │ └─ ... and {len(color_only_failures) - 5} more")
826
-
827
- if pair_failures:
828
- log(" │")
829
- log(" │ ❌ FAILING FG/BG PAIRS (actual on-page combinations):")
830
- for i, f in enumerate(pair_failures[:5]):
831
- fix_info = f" → 💡 Fix: {f.suggested_fix} ({f.suggested_fix_contrast:.1f}:1)" if f.suggested_fix else ""
832
- log(f" │ ├─ {f.name}{fix_info}")
833
- if len(pair_failures) > 5:
834
- log(f" │ └─ ... and {len(pair_failures) - 5} more")
835
-
836
- log("")
837
-
838
- # ─────────────────────────────────────────��───────────────────
839
- # Spacing Grid Analysis
840
- # ─────────────────────────────────────────────────────────────
841
- log(" 📏 SPACING GRID ANALYSIS")
842
- log(" " + "─" * 40)
843
- spacing = analyze_spacing_grid(spacing_tokens)
844
-
845
- alignment_icon = "✅" if spacing.is_aligned else "⚠️"
846
- log(f" ├─ Detected Base: {spacing.detected_base}px")
847
- log(f" ├─ Grid Aligned: {alignment_icon} {spacing.alignment_percentage:.0f}%")
848
-
849
- if spacing.misaligned_values:
850
- log(f" ├─ Misaligned Values: {spacing.misaligned_values[:8]}{'...' if len(spacing.misaligned_values) > 8 else ''}")
851
-
852
- log(f" ├─ Suggested Scale: {spacing.suggested_scale[:10]}...")
853
- log(f" └─ 💡 Recommendation: {spacing.recommendation}px ({spacing.recommendation_reason})")
854
- log("")
855
-
856
- # ─────────────────────────────────────────────────────────────
857
- # Color Statistics
858
- # ─────────────────────────────────────────────────────────────
859
- log(" 🎨 COLOR PALETTE STATISTICS")
860
- log(" " + "─" * 40)
861
- color_stats = analyze_color_statistics(color_tokens)
862
-
863
- dup_icon = "⚠️" if color_stats.duplicate_count > 10 else "✅"
864
- unique_icon = "⚠️" if color_stats.unique_count > 30 else "✅"
865
-
866
- log(f" ├─ Total Colors: {color_stats.total_count}")
867
- log(f" ├─ Unique Colors: {color_stats.unique_count} {unique_icon}")
868
- log(f" ├─ Exact Duplicates: {color_stats.duplicate_count} {dup_icon}")
869
- log(f" ├─ Near-Duplicates: {len(color_stats.near_duplicates)}")
870
- log(f" ├─ Grays: {color_stats.gray_count} | Saturated: {color_stats.saturated_count}")
871
- log(f" └─ Hue Distribution: {dict(list(color_stats.hue_distribution.items())[:5])}...")
872
- log("")
873
-
874
- # ─────────────────────────────────────────────────────────────
875
- # Calculate Summary Scores
876
- # ─────────────────────────────────────────────────────────────
877
-
878
- # Consistency score (0-100)
879
- type_score = 25 if typography.is_consistent else 10
880
- aa_score = 25 * (passes / max(len(accessibility), 1))
881
- spacing_score = 25 * (spacing.alignment_percentage / 100)
882
- color_score = 25 * (1 - min(color_stats.duplicate_count / max(color_stats.total_count, 1), 1))
883
-
884
- consistency_score = int(type_score + aa_score + spacing_score + color_score)
885
-
886
- log(" " + "─" * 40)
887
- log(f" 📊 RULE ENGINE SUMMARY")
888
- log(f" ├─ Consistency Score: {consistency_score}/100")
889
- log(f" ├─ AA Failures: {len(failures)}")
890
- log(f" └─ Cost: $0.00 (free)")
891
- log("")
892
-
893
- return RuleEngineResults(
894
- typography=typography,
895
- accessibility=accessibility,
896
- spacing=spacing,
897
- color_stats=color_stats,
898
- aa_failures=len(failures),
899
- consistency_score=consistency_score,
900
- )