Spaces:
Sleeping
feat: color ramps in JSON export + shadow interpolation + semantic variants
Browse filesColor Ramps (was silently broken β ramp[i] on a dict):
- Fix: ramp.get(shade) instead of ramp[i] β ramps now appear in export
- PALETTE colors: full 50-900 ramp (10 shades each)
- SEMANTIC colors (brand/text/bg/border/feedback): base + 4 variants
(shades 50, 200, 800, 950 for light/dark tints)
- No ramps: only base color exported (when checkbox unchecked)
Shadow Interpolation:
- If site has <5 shadows, interpolate to fill 5-level elevation scale
- Linear interpolation of blur, offset, spread, and color alpha
- 1 shadow: generates proportional 5-level scale around it
- 2-4 shadows: interpolates between extracted values
- 5+ shadows: takes first 5 sorted by blur
- As-Is/To-Be card now shows dynamic count instead of hardcoded 5
DataFrame fix (prior commit) already handles Gradio 6 truthiness.
All 113 tests pass.
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
|
@@ -1502,10 +1502,13 @@ async def run_stage2_analysis_v2(
|
|
| 1502 |
shadow_count = 0
|
| 1503 |
if state.desktop_normalized:
|
| 1504 |
shadow_count = len(getattr(state.desktop_normalized, 'shadows', {}))
|
|
|
|
| 1505 |
cards.append(_render_as_is_to_be(
|
| 1506 |
"Shadows", f"{shadow_count} levels",
|
| 1507 |
"Elevation tokens" if shadow_count > 0 else "No shadows found",
|
| 1508 |
-
"
|
|
|
|
|
|
|
| 1509 |
))
|
| 1510 |
asis_tobe_html = "".join(cards)
|
| 1511 |
except Exception:
|
|
@@ -3373,24 +3376,53 @@ def export_tokens_json(convention: str = "semantic"):
|
|
| 3373 |
log_callback=state.log,
|
| 3374 |
)
|
| 3375 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3376 |
for c in classification.colors:
|
| 3377 |
flat_key = c.token_name
|
| 3378 |
-
|
|
|
|
|
|
|
|
|
|
| 3379 |
try:
|
| 3380 |
ramp = generate_color_ramp(c.hex)
|
| 3381 |
-
shades = ["50", "100", "200", "300", "400", "500", "600", "700", "800", "900"
|
| 3382 |
-
for
|
| 3383 |
-
|
|
|
|
| 3384 |
shade_key = f"{flat_key}.{shade}"
|
| 3385 |
-
|
| 3386 |
-
dtcg_token = _to_dtcg_token(color_val, "color")
|
| 3387 |
_flat_key_to_nested(shade_key, dtcg_token, result)
|
| 3388 |
token_count += 1
|
| 3389 |
except (ValueError, KeyError, TypeError, IndexError):
|
| 3390 |
dtcg_token = _to_dtcg_token(c.hex, "color")
|
| 3391 |
_flat_key_to_nested(flat_key, dtcg_token, result)
|
| 3392 |
token_count += 1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3393 |
else:
|
|
|
|
| 3394 |
dtcg_token = _to_dtcg_token(c.hex, "color")
|
| 3395 |
_flat_key_to_nested(flat_key, dtcg_token, result)
|
| 3396 |
token_count += 1
|
|
@@ -3555,19 +3587,93 @@ def export_tokens_json(convention: str = "semantic"):
|
|
| 3555 |
token_count += 1
|
| 3556 |
|
| 3557 |
# =========================================================================
|
| 3558 |
-
# SHADOWS β W3C DTCG format with shadow spec
|
| 3559 |
# =========================================================================
|
|
|
|
|
|
|
|
|
|
| 3560 |
if state.desktop_normalized and state.desktop_normalized.shadows:
|
| 3561 |
-
shadow_names = ["shadow.xs", "shadow.sm", "shadow.md", "shadow.lg", "shadow.xl", "shadow.2xl"]
|
| 3562 |
sorted_shadows = sorted(
|
| 3563 |
state.desktop_normalized.shadows.items(),
|
| 3564 |
key=lambda x: _get_shadow_blur(x[1].value),
|
| 3565 |
)
|
| 3566 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3567 |
token_name = shadow_names[i] if i < len(shadow_names) else f"shadow.{i + 1}"
|
| 3568 |
-
|
| 3569 |
-
|
| 3570 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3571 |
_flat_key_to_nested(token_name, dtcg_token, result)
|
| 3572 |
token_count += 1
|
| 3573 |
|
|
|
|
| 1502 |
shadow_count = 0
|
| 1503 |
if state.desktop_normalized:
|
| 1504 |
shadow_count = len(getattr(state.desktop_normalized, 'shadows', {}))
|
| 1505 |
+
tobe_shadow_count = max(shadow_count, 5) # At least 5, interpolated if needed
|
| 1506 |
cards.append(_render_as_is_to_be(
|
| 1507 |
"Shadows", f"{shadow_count} levels",
|
| 1508 |
"Elevation tokens" if shadow_count > 0 else "No shadows found",
|
| 1509 |
+
f"{tobe_shadow_count} levels",
|
| 1510 |
+
"xs β sm β md β lg β xl" + (" (interpolated)" if shadow_count < 5 else ""),
|
| 1511 |
+
icon="π«οΈ"
|
| 1512 |
))
|
| 1513 |
asis_tobe_html = "".join(cards)
|
| 1514 |
except Exception:
|
|
|
|
| 3376 |
log_callback=state.log,
|
| 3377 |
)
|
| 3378 |
|
| 3379 |
+
# Semantic categories get light/dark variants; palette gets full 50-900 ramps
|
| 3380 |
+
_SEMANTIC_CATS = {"brand", "text", "bg", "border", "feedback"}
|
| 3381 |
+
_SEMANTIC_VARIANT_SHADES = {"50": 0.85, "200": 0.5, "800": -0.5, "950": -0.85}
|
| 3382 |
+
# factor > 0 = lighter, factor < 0 = darker
|
| 3383 |
+
|
| 3384 |
for c in classification.colors:
|
| 3385 |
flat_key = c.token_name
|
| 3386 |
+
is_semantic = c.category in _SEMANTIC_CATS
|
| 3387 |
+
|
| 3388 |
+
if apply_ramps and not is_semantic:
|
| 3389 |
+
# PALETTE colors: full 50-900 ramp
|
| 3390 |
try:
|
| 3391 |
ramp = generate_color_ramp(c.hex)
|
| 3392 |
+
shades = ["50", "100", "200", "300", "400", "500", "600", "700", "800", "900"]
|
| 3393 |
+
for shade in shades:
|
| 3394 |
+
shade_hex = ramp.get(shade)
|
| 3395 |
+
if shade_hex:
|
| 3396 |
shade_key = f"{flat_key}.{shade}"
|
| 3397 |
+
dtcg_token = _to_dtcg_token(shade_hex, "color")
|
|
|
|
| 3398 |
_flat_key_to_nested(shade_key, dtcg_token, result)
|
| 3399 |
token_count += 1
|
| 3400 |
except (ValueError, KeyError, TypeError, IndexError):
|
| 3401 |
dtcg_token = _to_dtcg_token(c.hex, "color")
|
| 3402 |
_flat_key_to_nested(flat_key, dtcg_token, result)
|
| 3403 |
token_count += 1
|
| 3404 |
+
|
| 3405 |
+
elif apply_ramps and is_semantic:
|
| 3406 |
+
# SEMANTIC colors: base + light/dark variants
|
| 3407 |
+
# Base color
|
| 3408 |
+
dtcg_token = _to_dtcg_token(c.hex, "color")
|
| 3409 |
+
_flat_key_to_nested(flat_key, dtcg_token, result)
|
| 3410 |
+
token_count += 1
|
| 3411 |
+
# Generate tint/shade variants
|
| 3412 |
+
try:
|
| 3413 |
+
ramp = generate_color_ramp(c.hex)
|
| 3414 |
+
for variant_shade, _ in _SEMANTIC_VARIANT_SHADES.items():
|
| 3415 |
+
variant_hex = ramp.get(variant_shade)
|
| 3416 |
+
if variant_hex:
|
| 3417 |
+
variant_key = f"{flat_key}.{variant_shade}"
|
| 3418 |
+
dtcg_token = _to_dtcg_token(variant_hex, "color")
|
| 3419 |
+
_flat_key_to_nested(variant_key, dtcg_token, result)
|
| 3420 |
+
token_count += 1
|
| 3421 |
+
except (ValueError, KeyError, TypeError, IndexError):
|
| 3422 |
+
pass # Base already exported above
|
| 3423 |
+
|
| 3424 |
else:
|
| 3425 |
+
# No ramps β base color only
|
| 3426 |
dtcg_token = _to_dtcg_token(c.hex, "color")
|
| 3427 |
_flat_key_to_nested(flat_key, dtcg_token, result)
|
| 3428 |
token_count += 1
|
|
|
|
| 3587 |
token_count += 1
|
| 3588 |
|
| 3589 |
# =========================================================================
|
| 3590 |
+
# SHADOWS β W3C DTCG format with shadow spec + interpolation to 5 levels
|
| 3591 |
# =========================================================================
|
| 3592 |
+
TARGET_SHADOW_COUNT = 5
|
| 3593 |
+
shadow_names = ["shadow.xs", "shadow.sm", "shadow.md", "shadow.lg", "shadow.xl"]
|
| 3594 |
+
|
| 3595 |
if state.desktop_normalized and state.desktop_normalized.shadows:
|
|
|
|
| 3596 |
sorted_shadows = sorted(
|
| 3597 |
state.desktop_normalized.shadows.items(),
|
| 3598 |
key=lambda x: _get_shadow_blur(x[1].value),
|
| 3599 |
)
|
| 3600 |
+
|
| 3601 |
+
# Parse all extracted shadows into numeric components
|
| 3602 |
+
parsed_shadows = []
|
| 3603 |
+
for name, s in sorted_shadows:
|
| 3604 |
+
p = _parse_shadow_to_tokens_studio(s.value)
|
| 3605 |
+
parsed_shadows.append({
|
| 3606 |
+
"x": float(p.get("x", 0)),
|
| 3607 |
+
"y": float(p.get("y", 0)),
|
| 3608 |
+
"blur": float(p.get("blur", 0)),
|
| 3609 |
+
"spread": float(p.get("spread", 0)),
|
| 3610 |
+
"color": p.get("color", "rgba(0,0,0,0.25)"),
|
| 3611 |
+
})
|
| 3612 |
+
|
| 3613 |
+
# Interpolate to fill TARGET_SHADOW_COUNT levels
|
| 3614 |
+
def _lerp(a, b, t):
|
| 3615 |
+
return a + (b - a) * t
|
| 3616 |
+
|
| 3617 |
+
def _lerp_shadow(s1, s2, t):
|
| 3618 |
+
"""Interpolate between two shadow dicts at factor t (0.0=s1, 1.0=s2)."""
|
| 3619 |
+
import re
|
| 3620 |
+
# Interpolate numeric values
|
| 3621 |
+
interp = {
|
| 3622 |
+
"x": round(_lerp(s1["x"], s2["x"], t), 1),
|
| 3623 |
+
"y": round(_lerp(s1["y"], s2["y"], t), 1),
|
| 3624 |
+
"blur": round(_lerp(s1["blur"], s2["blur"], t), 1),
|
| 3625 |
+
"spread": round(_lerp(s1["spread"], s2["spread"], t), 1),
|
| 3626 |
+
}
|
| 3627 |
+
# Interpolate alpha from rgba color string
|
| 3628 |
+
alpha1, alpha2 = 0.25, 0.25
|
| 3629 |
+
m1 = re.search(r'rgba?\([^)]*,\s*([\d.]+)\)', s1["color"])
|
| 3630 |
+
m2 = re.search(r'rgba?\([^)]*,\s*([\d.]+)\)', s2["color"])
|
| 3631 |
+
if m1:
|
| 3632 |
+
alpha1 = float(m1.group(1))
|
| 3633 |
+
if m2:
|
| 3634 |
+
alpha2 = float(m2.group(1))
|
| 3635 |
+
interp_alpha = round(_lerp(alpha1, alpha2, t), 3)
|
| 3636 |
+
interp["color"] = f"rgba(0, 0, 0, {interp_alpha})"
|
| 3637 |
+
return interp
|
| 3638 |
+
|
| 3639 |
+
final_shadows = []
|
| 3640 |
+
n = len(parsed_shadows)
|
| 3641 |
+
if n >= TARGET_SHADOW_COUNT:
|
| 3642 |
+
# Already have enough β take the first 5 sorted by blur
|
| 3643 |
+
final_shadows = parsed_shadows[:TARGET_SHADOW_COUNT]
|
| 3644 |
+
elif n == 1:
|
| 3645 |
+
# Only 1 shadow β generate a 5-level scale around it
|
| 3646 |
+
base = parsed_shadows[0]
|
| 3647 |
+
for i in range(TARGET_SHADOW_COUNT):
|
| 3648 |
+
factor = (i + 1) / 3.0 # 0.33, 0.67, 1.0, 1.33, 1.67
|
| 3649 |
+
final_shadows.append({
|
| 3650 |
+
"x": round(base["x"] * factor, 1),
|
| 3651 |
+
"y": round(max(1, base["y"] * factor), 1),
|
| 3652 |
+
"blur": round(max(1, base["blur"] * factor), 1),
|
| 3653 |
+
"spread": round(base["spread"] * factor, 1),
|
| 3654 |
+
"color": f"rgba(0, 0, 0, {round(0.04 + i * 0.04, 3)})",
|
| 3655 |
+
})
|
| 3656 |
+
elif n >= 2:
|
| 3657 |
+
# Interpolate between extracted shadows to fill 5 levels
|
| 3658 |
+
for i in range(TARGET_SHADOW_COUNT):
|
| 3659 |
+
t = i / (TARGET_SHADOW_COUNT - 1) # 0.0 to 1.0
|
| 3660 |
+
# Map t to source shadow pair
|
| 3661 |
+
src_pos = t * (n - 1)
|
| 3662 |
+
lo = int(src_pos)
|
| 3663 |
+
hi = min(lo + 1, n - 1)
|
| 3664 |
+
frac = src_pos - lo
|
| 3665 |
+
final_shadows.append(_lerp_shadow(parsed_shadows[lo], parsed_shadows[hi], frac))
|
| 3666 |
+
|
| 3667 |
+
for i, shadow in enumerate(final_shadows):
|
| 3668 |
token_name = shadow_names[i] if i < len(shadow_names) else f"shadow.{i + 1}"
|
| 3669 |
+
dtcg_value = {
|
| 3670 |
+
"color": shadow["color"],
|
| 3671 |
+
"offsetX": f"{shadow['x']}px",
|
| 3672 |
+
"offsetY": f"{shadow['y']}px",
|
| 3673 |
+
"blur": f"{shadow['blur']}px",
|
| 3674 |
+
"spread": f"{shadow['spread']}px",
|
| 3675 |
+
}
|
| 3676 |
+
dtcg_token = _to_dtcg_token(dtcg_value, "shadow")
|
| 3677 |
_flat_key_to_nested(token_name, dtcg_token, result)
|
| 3678 |
token_count += 1
|
| 3679 |
|