Spaces:
Sleeping
Sleeping
fix: palette ramps flat, semantic variants, Figma page limit, shadow honesty
Browse files- Palette ramps: strip shade from token name before ramp generation
(color.blue.50 not color.blue.700.50) — one ramp per hue family
- Semantic variants: use DEFAULT key for base + 50/200/800/950 variants
(avoids DTCG leaf guard blocking children)
- Figma plugin: use currentPage instead of createPage() — fixes
"Starter plan only comes with 3 pages" error
- Shadows: export only what was actually extracted (no interpolation)
with best-fit naming based on count (2→sm/lg, 3→sm/md/lg, etc.)
- As-Is/To-Be card reflects actual extracted shadow count
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
app.py
CHANGED
|
@@ -1502,12 +1502,14 @@ async def run_stage2_analysis_v2(
|
|
| 1502 |
shadow_count = 0
|
| 1503 |
if state.desktop_normalized:
|
| 1504 |
shadow_count = len(getattr(state.desktop_normalized, 'shadows', {}))
|
| 1505 |
-
tobe_shadow_count =
|
|
|
|
|
|
|
| 1506 |
cards.append(_render_as_is_to_be(
|
| 1507 |
"Shadows", f"{shadow_count} levels",
|
| 1508 |
"Elevation tokens" if shadow_count > 0 else "No shadows found",
|
| 1509 |
f"{tobe_shadow_count} levels",
|
| 1510 |
-
|
| 1511 |
icon="🌫️"
|
| 1512 |
))
|
| 1513 |
asis_tobe_html = "".join(cards)
|
|
@@ -3378,22 +3380,33 @@ def export_tokens_json(convention: str = "semantic"):
|
|
| 3378 |
|
| 3379 |
# Semantic categories get light/dark variants; palette gets full 50-900 ramps
|
| 3380 |
_SEMANTIC_CATS = {"brand", "text", "bg", "border", "feedback"}
|
| 3381 |
-
|
| 3382 |
-
|
|
|
|
|
|
|
|
|
|
| 3383 |
|
| 3384 |
for c in classification.colors:
|
| 3385 |
flat_key = c.token_name
|
| 3386 |
is_semantic = c.category in _SEMANTIC_CATS
|
| 3387 |
|
| 3388 |
if apply_ramps and not is_semantic:
|
| 3389 |
-
# PALETTE colors: full 50-900 ramp
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3390 |
try:
|
| 3391 |
ramp = generate_color_ramp(c.hex)
|
| 3392 |
-
|
| 3393 |
-
for shade in shades:
|
| 3394 |
shade_hex = ramp.get(shade)
|
| 3395 |
if shade_hex:
|
| 3396 |
-
shade_key = f"{
|
| 3397 |
dtcg_token = _to_dtcg_token(shade_hex, "color")
|
| 3398 |
_flat_key_to_nested(shade_key, dtcg_token, result)
|
| 3399 |
token_count += 1
|
|
@@ -3403,15 +3416,17 @@ def export_tokens_json(convention: str = "semantic"):
|
|
| 3403 |
token_count += 1
|
| 3404 |
|
| 3405 |
elif apply_ramps and is_semantic:
|
| 3406 |
-
# SEMANTIC colors: base +
|
| 3407 |
-
#
|
| 3408 |
-
|
| 3409 |
-
_flat_key_to_nested(flat_key, dtcg_token, result)
|
| 3410 |
-
token_count += 1
|
| 3411 |
-
# Generate tint/shade variants
|
| 3412 |
try:
|
| 3413 |
ramp = generate_color_ramp(c.hex)
|
| 3414 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3415 |
variant_hex = ramp.get(variant_shade)
|
| 3416 |
if variant_hex:
|
| 3417 |
variant_key = f"{flat_key}.{variant_shade}"
|
|
@@ -3419,7 +3434,10 @@ def export_tokens_json(convention: str = "semantic"):
|
|
| 3419 |
_flat_key_to_nested(variant_key, dtcg_token, result)
|
| 3420 |
token_count += 1
|
| 3421 |
except (ValueError, KeyError, TypeError, IndexError):
|
| 3422 |
-
|
|
|
|
|
|
|
|
|
|
| 3423 |
|
| 3424 |
else:
|
| 3425 |
# No ramps — base color only
|
|
@@ -3587,10 +3605,16 @@ def export_tokens_json(convention: str = "semantic"):
|
|
| 3587 |
token_count += 1
|
| 3588 |
|
| 3589 |
# =========================================================================
|
| 3590 |
-
# SHADOWS — W3C DTCG format
|
| 3591 |
# =========================================================================
|
| 3592 |
-
|
| 3593 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3594 |
|
| 3595 |
if state.desktop_normalized and state.desktop_normalized.shadows:
|
| 3596 |
sorted_shadows = sorted(
|
|
@@ -3610,62 +3634,16 @@ def export_tokens_json(convention: str = "semantic"):
|
|
| 3610 |
"color": p.get("color", "rgba(0,0,0,0.25)"),
|
| 3611 |
})
|
| 3612 |
|
| 3613 |
-
# Interpolate to fill TARGET_SHADOW_COUNT levels
|
| 3614 |
-
def _lerp(a, b, t):
|
| 3615 |
-
return a + (b - a) * t
|
| 3616 |
-
|
| 3617 |
-
def _lerp_shadow(s1, s2, t):
|
| 3618 |
-
"""Interpolate between two shadow dicts at factor t (0.0=s1, 1.0=s2)."""
|
| 3619 |
-
import re
|
| 3620 |
-
# Interpolate numeric values
|
| 3621 |
-
interp = {
|
| 3622 |
-
"x": round(_lerp(s1["x"], s2["x"], t), 1),
|
| 3623 |
-
"y": round(_lerp(s1["y"], s2["y"], t), 1),
|
| 3624 |
-
"blur": round(_lerp(s1["blur"], s2["blur"], t), 1),
|
| 3625 |
-
"spread": round(_lerp(s1["spread"], s2["spread"], t), 1),
|
| 3626 |
-
}
|
| 3627 |
-
# Interpolate alpha from rgba color string
|
| 3628 |
-
alpha1, alpha2 = 0.25, 0.25
|
| 3629 |
-
m1 = re.search(r'rgba?\([^)]*,\s*([\d.]+)\)', s1["color"])
|
| 3630 |
-
m2 = re.search(r'rgba?\([^)]*,\s*([\d.]+)\)', s2["color"])
|
| 3631 |
-
if m1:
|
| 3632 |
-
alpha1 = float(m1.group(1))
|
| 3633 |
-
if m2:
|
| 3634 |
-
alpha2 = float(m2.group(1))
|
| 3635 |
-
interp_alpha = round(_lerp(alpha1, alpha2, t), 3)
|
| 3636 |
-
interp["color"] = f"rgba(0, 0, 0, {interp_alpha})"
|
| 3637 |
-
return interp
|
| 3638 |
-
|
| 3639 |
-
final_shadows = []
|
| 3640 |
n = len(parsed_shadows)
|
| 3641 |
-
|
| 3642 |
-
|
| 3643 |
-
|
| 3644 |
-
|
| 3645 |
-
#
|
| 3646 |
-
|
| 3647 |
-
for i in range(TARGET_SHADOW_COUNT):
|
| 3648 |
-
factor = (i + 1) / 3.0 # 0.33, 0.67, 1.0, 1.33, 1.67
|
| 3649 |
-
final_shadows.append({
|
| 3650 |
-
"x": round(base["x"] * factor, 1),
|
| 3651 |
-
"y": round(max(1, base["y"] * factor), 1),
|
| 3652 |
-
"blur": round(max(1, base["blur"] * factor), 1),
|
| 3653 |
-
"spread": round(base["spread"] * factor, 1),
|
| 3654 |
-
"color": f"rgba(0, 0, 0, {round(0.04 + i * 0.04, 3)})",
|
| 3655 |
-
})
|
| 3656 |
-
elif n >= 2:
|
| 3657 |
-
# Interpolate between extracted shadows to fill 5 levels
|
| 3658 |
-
for i in range(TARGET_SHADOW_COUNT):
|
| 3659 |
-
t = i / (TARGET_SHADOW_COUNT - 1) # 0.0 to 1.0
|
| 3660 |
-
# Map t to source shadow pair
|
| 3661 |
-
src_pos = t * (n - 1)
|
| 3662 |
-
lo = int(src_pos)
|
| 3663 |
-
hi = min(lo + 1, n - 1)
|
| 3664 |
-
frac = src_pos - lo
|
| 3665 |
-
final_shadows.append(_lerp_shadow(parsed_shadows[lo], parsed_shadows[hi], frac))
|
| 3666 |
|
| 3667 |
for i, shadow in enumerate(final_shadows):
|
| 3668 |
-
token_name =
|
| 3669 |
dtcg_value = {
|
| 3670 |
"color": shadow["color"],
|
| 3671 |
"offsetX": f"{shadow['x']}px",
|
|
|
|
| 1502 |
shadow_count = 0
|
| 1503 |
if state.desktop_normalized:
|
| 1504 |
shadow_count = len(getattr(state.desktop_normalized, 'shadows', {}))
|
| 1505 |
+
tobe_shadow_count = min(shadow_count, 5) # Export only what was extracted (capped at 5)
|
| 1506 |
+
_SHADOW_LABELS = {1: "md", 2: "sm → lg", 3: "sm → md → lg", 4: "xs → sm → lg → xl", 5: "xs → sm → md → lg → xl"}
|
| 1507 |
+
tobe_label = _SHADOW_LABELS.get(tobe_shadow_count, f"{tobe_shadow_count} levels")
|
| 1508 |
cards.append(_render_as_is_to_be(
|
| 1509 |
"Shadows", f"{shadow_count} levels",
|
| 1510 |
"Elevation tokens" if shadow_count > 0 else "No shadows found",
|
| 1511 |
f"{tobe_shadow_count} levels",
|
| 1512 |
+
tobe_label,
|
| 1513 |
icon="🌫️"
|
| 1514 |
))
|
| 1515 |
asis_tobe_html = "".join(cards)
|
|
|
|
| 3380 |
|
| 3381 |
# Semantic categories get light/dark variants; palette gets full 50-900 ramps
|
| 3382 |
_SEMANTIC_CATS = {"brand", "text", "bg", "border", "feedback"}
|
| 3383 |
+
_PALETTE_SHADES = ["50", "100", "200", "300", "400", "500", "600", "700", "800", "900"]
|
| 3384 |
+
_SEMANTIC_VARIANT_SHADES = ["50", "200", "800", "950"]
|
| 3385 |
+
|
| 3386 |
+
# Track which palette hue families already have ramps (avoid duplicates)
|
| 3387 |
+
_palette_hues_with_ramps = set()
|
| 3388 |
|
| 3389 |
for c in classification.colors:
|
| 3390 |
flat_key = c.token_name
|
| 3391 |
is_semantic = c.category in _SEMANTIC_CATS
|
| 3392 |
|
| 3393 |
if apply_ramps and not is_semantic:
|
| 3394 |
+
# PALETTE colors: full 50-900 ramp under hue family
|
| 3395 |
+
# token_name = "color.blue.700" → base = "color.blue" (strip shade)
|
| 3396 |
+
parts = flat_key.rsplit(".", 1)
|
| 3397 |
+
hue_base = parts[0] if len(parts) > 1 else flat_key
|
| 3398 |
+
|
| 3399 |
+
# Only one ramp per hue family (first/most-used color wins)
|
| 3400 |
+
if hue_base in _palette_hues_with_ramps:
|
| 3401 |
+
continue
|
| 3402 |
+
_palette_hues_with_ramps.add(hue_base)
|
| 3403 |
+
|
| 3404 |
try:
|
| 3405 |
ramp = generate_color_ramp(c.hex)
|
| 3406 |
+
for shade in _PALETTE_SHADES:
|
|
|
|
| 3407 |
shade_hex = ramp.get(shade)
|
| 3408 |
if shade_hex:
|
| 3409 |
+
shade_key = f"{hue_base}.{shade}"
|
| 3410 |
dtcg_token = _to_dtcg_token(shade_hex, "color")
|
| 3411 |
_flat_key_to_nested(shade_key, dtcg_token, result)
|
| 3412 |
token_count += 1
|
|
|
|
| 3416 |
token_count += 1
|
| 3417 |
|
| 3418 |
elif apply_ramps and is_semantic:
|
| 3419 |
+
# SEMANTIC colors: base + tint/shade variants
|
| 3420 |
+
# Build as a namespace dict (not sequential leaf calls)
|
| 3421 |
+
# so that base and variants coexist without nesting conflict
|
|
|
|
|
|
|
|
|
|
| 3422 |
try:
|
| 3423 |
ramp = generate_color_ramp(c.hex)
|
| 3424 |
+
# Emit base as "DEFAULT" inside a namespace
|
| 3425 |
+
default_key = f"{flat_key}.DEFAULT"
|
| 3426 |
+
dtcg_token = _to_dtcg_token(c.hex, "color")
|
| 3427 |
+
_flat_key_to_nested(default_key, dtcg_token, result)
|
| 3428 |
+
token_count += 1
|
| 3429 |
+
for variant_shade in _SEMANTIC_VARIANT_SHADES:
|
| 3430 |
variant_hex = ramp.get(variant_shade)
|
| 3431 |
if variant_hex:
|
| 3432 |
variant_key = f"{flat_key}.{variant_shade}"
|
|
|
|
| 3434 |
_flat_key_to_nested(variant_key, dtcg_token, result)
|
| 3435 |
token_count += 1
|
| 3436 |
except (ValueError, KeyError, TypeError, IndexError):
|
| 3437 |
+
# Fallback: just base color
|
| 3438 |
+
dtcg_token = _to_dtcg_token(c.hex, "color")
|
| 3439 |
+
_flat_key_to_nested(flat_key, dtcg_token, result)
|
| 3440 |
+
token_count += 1
|
| 3441 |
|
| 3442 |
else:
|
| 3443 |
# No ramps — base color only
|
|
|
|
| 3605 |
token_count += 1
|
| 3606 |
|
| 3607 |
# =========================================================================
|
| 3608 |
+
# SHADOWS — W3C DTCG format — export ONLY what was actually extracted
|
| 3609 |
# =========================================================================
|
| 3610 |
+
# Name mapping: assign best-fit names based on how many shadows were found
|
| 3611 |
+
_SHADOW_NAMES_BY_COUNT = {
|
| 3612 |
+
1: ["shadow.md"],
|
| 3613 |
+
2: ["shadow.sm", "shadow.lg"],
|
| 3614 |
+
3: ["shadow.sm", "shadow.md", "shadow.lg"],
|
| 3615 |
+
4: ["shadow.xs", "shadow.sm", "shadow.lg", "shadow.xl"],
|
| 3616 |
+
5: ["shadow.xs", "shadow.sm", "shadow.md", "shadow.lg", "shadow.xl"],
|
| 3617 |
+
}
|
| 3618 |
|
| 3619 |
if state.desktop_normalized and state.desktop_normalized.shadows:
|
| 3620 |
sorted_shadows = sorted(
|
|
|
|
| 3634 |
"color": p.get("color", "rgba(0,0,0,0.25)"),
|
| 3635 |
})
|
| 3636 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3637 |
n = len(parsed_shadows)
|
| 3638 |
+
# Cap at 5 maximum, take first 5 sorted by blur
|
| 3639 |
+
final_shadows = parsed_shadows[:5]
|
| 3640 |
+
names = _SHADOW_NAMES_BY_COUNT.get(len(final_shadows))
|
| 3641 |
+
if names is None:
|
| 3642 |
+
# Fallback for n > 5: xs, sm, md, lg, xl
|
| 3643 |
+
names = [f"shadow.{i+1}" for i in range(len(final_shadows))]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3644 |
|
| 3645 |
for i, shadow in enumerate(final_shadows):
|
| 3646 |
+
token_name = names[i] if i < len(names) else f"shadow.{i + 1}"
|
| 3647 |
dtcg_value = {
|
| 3648 |
"color": shadow["color"],
|
| 3649 |
"offsetX": f"{shadow['x']}px",
|
output_json/figma-plugin-extracted/figma-design-token-creator 5/src/code.js
CHANGED
|
@@ -781,10 +781,14 @@ figma.ui.onmessage = async function(msg) {
|
|
| 781 |
var tokens = normalizeTokens(rawTokens);
|
| 782 |
console.log('Normalized tokens - colors:', tokens.colors.length, 'typography:', tokens.typography.length);
|
| 783 |
|
| 784 |
-
//
|
| 785 |
-
var specPage = figma.
|
| 786 |
specPage.name = '🎨 Design System Spec';
|
| 787 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 788 |
|
| 789 |
var yOffset = 0;
|
| 790 |
var xOffset = 0;
|
|
|
|
| 781 |
var tokens = normalizeTokens(rawTokens);
|
| 782 |
console.log('Normalized tokens - colors:', tokens.colors.length, 'typography:', tokens.typography.length);
|
| 783 |
|
| 784 |
+
// Use current page instead of creating new page (Figma Starter plan has 3-page limit)
|
| 785 |
+
var specPage = figma.currentPage;
|
| 786 |
specPage.name = '🎨 Design System Spec';
|
| 787 |
+
|
| 788 |
+
// Clear existing children on the page so spec starts fresh
|
| 789 |
+
while (specPage.children.length > 0) {
|
| 790 |
+
specPage.children[0].remove();
|
| 791 |
+
}
|
| 792 |
|
| 793 |
var yOffset = 0;
|
| 794 |
var xOffset = 0;
|