File size: 26,795 Bytes
3fbbaab
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7609903
 
 
 
 
 
 
4d7adc9
 
3fbbaab
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7609903
 
 
 
 
 
 
 
21ff762
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3f78849
3fbbaab
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7609903
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3f78849
 
3fbbaab
 
7609903
 
 
 
0bcc198
 
 
 
7609903
 
 
 
0bcc198
 
 
7609903
 
 
 
 
0bcc198
 
 
 
 
7609903
 
 
 
 
3fbbaab
 
 
 
 
0bcc198
3fbbaab
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0bcc198
 
 
 
 
 
 
3fbbaab
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7609903
 
 
 
 
3fbbaab
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7609903
 
 
 
 
3fbbaab
 
7609903
 
 
 
 
 
 
3fbbaab
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7609903
 
 
 
 
3fbbaab
 
 
 
 
 
 
 
 
0bcc198
 
 
 
3fbbaab
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7609903
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3fbbaab
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7609903
3fbbaab
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7609903
 
 
3fbbaab
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7609903
3fbbaab
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
#!/usr/bin/env python3
"""

batch_convert.py — Batch convert skills >180 lines using micro-skill-pipeline logic.



Implements the /skill-convert classification spec from stevesolun/micro-skills:

  - "Do this" instructions -> Build step (03-build.md)

  - "Check/avoid/ensure" instructions -> YES/NO gate questions (check-gates.md)

  - Reference data (tables, lists, examples) -> Separate reference files

  - Context/scope instructions -> Scope step (01-scope.md)

  - Each pipeline file <= 40 lines

  - Build step splits into 03a, 03b, ... if >40 lines



Usage:

    python batch_convert.py --scan ~/.claude/skills [--min-lines 180] [--dry-run]

    python batch_convert.py --file ~/.claude/skills/fastapi-pro/SKILL.md

"""

import argparse
import hashlib
import json
import os
import re
import shutil
import sys
from datetime import datetime, timezone
from pathlib import Path


from ctx.utils._fs_utils import atomic_write_text as _atomic_write_text

from ctx_config import cfg

MIN_LINES = cfg.line_threshold
MAX_STAGE_LINES = cfg.max_stage_lines
TODAY = datetime.now(timezone.utc).strftime("%Y-%m-%d")


def _line_count(content: str) -> int:
    return len(content.splitlines())


# ── Section classification ────────────────────────────────────────────────────

SCOPE_KEYWORDS = re.compile(
    r"\b(scope|constraint|prerequisite|precondition|before you|context|when to use|"
    r"trigger|activation|description|overview|purpose|applies when|input|requirements?)\b",
    re.IGNORECASE,
)
PLAN_KEYWORDS = re.compile(
    r"\b(plan|approach|strategy|design|architecture|workflow|steps overview|"
    r"phases?|methodology|algorithm|decision|trade-?off|alternative)\b",
    re.IGNORECASE,
)
GATE_KEYWORDS = re.compile(
    r"\b(check|ensure|avoid|never|must not|must always|do not|don't|verify|validate|"
    r"confirm|assert|require|guard|prevent|warning|caution|important|rule|"
    r"forbidden|prohibited|mandatory|critical|always|quality|review|audit)\b",
    re.IGNORECASE,
)
DELIVER_KEYWORDS = re.compile(
    r"\b(deliver|output|present|finalize|format|handoff|hand off|return|report|"
    r"summary|cleanup|clean up|result|template|example output|response format)\b",
    re.IGNORECASE,
)
REFERENCE_INDICATORS = re.compile(
    r"(\|.*\|.*\|)|"           # markdown tables
    r"(```[\s\S]{60,}```)|"    # long code blocks
    r"(\bexample\b.*:)|"       # example sections
    r"(^\s*[-*]\s+`[^`]+`\s*[-:])", # definition lists with code
    re.IGNORECASE | re.MULTILINE,
)

DANGEROUS_MARKDOWN_REPLACEMENTS: tuple[tuple[str, str], ...] = (
    ("<?php", "&lt;?php"),
    ("<?PHP", "&lt;?PHP"),
    ("<?=", "&lt;?="),
    ("$(", "$&#8203;("),
    ("curl http://", "cu&#8203;rl http://"),
    ("curl https://", "cu&#8203;rl https://"),
    ("wget http://", "wg&#8203;et http://"),
    ("wget https://", "wg&#8203;et https://"),
    ("bash -i", "ba&#8203;sh -i"),
    ("/bin/bash", "/bin/ba&#8203;sh"),
    ("/bin/sh", "/bin/&#8203;sh"),
    ("/dev/tcp", "/dev/&#8203;tcp"),
    ("| bash", "| ba&#8203;sh"),
    ("|/bin/sh", "|/bin/&#8203;sh"),
    ("|nc ", "|n&#8203;c "),
    ("nc -e", "nc &#8203;-e"),
    ("rm /tmp", "r&#8203;m /tmp"),
    ("mkfifo", "mk&#8203;fifo"),
    ("cat /tmp", "ca&#8203;t /tmp"),
    ("cat /etc/passwd", "ca&#8203;t /etc/passwd"),
    (
        "type C:\\Windows\\System32\\config\\sam",
        "ty&#8203;pe C:\\Windows\\System32\\config\\sam",
    ),
    ("head -n", "he&#8203;ad -n"),
    ("base64 -d", "base64 &#8203;-d"),
    ("system(", "system&#8203;("),
    ("exec(", "exec&#8203;("),
    ("os.system", "os.&#8203;system"),
    ("New-Object", "New&#8203;-Object"),
    ("GetStream", "Get&#8203;Stream"),
    ("Out-String", "Out&#8203;-String"),
    ("iex ", "i&#8203;ex "),
    ("System.Net.Sockets.TCPClient", "System.Net.Sockets.TCP&#8203;Client"),
    ("System.Text.ASCIIEncoding", "System.Text.ASCII&#8203;Encoding"),
)
DANGEROUS_MARKDOWN_REGEX_REPLACEMENTS: tuple[tuple[re.Pattern[str], str], ...] = (
    (re.compile(r"\bpowershell\b", re.IGNORECASE), "power&#8203;shell"),
)


def defang_dangerous_markdown(text: str) -> str:
    """Defang executable-looking snippets before writing generated markdown."""
    for needle, replacement in DANGEROUS_MARKDOWN_REPLACEMENTS:
        text = text.replace(needle, replacement)
    for pattern, replacement in DANGEROUS_MARKDOWN_REGEX_REPLACEMENTS:
        text = pattern.sub(replacement, text)
    return text


def classify_section(header: str, body: str) -> str:
    """Classify a markdown section into a pipeline stage."""
    combined = f"{header}\n{body}"

    # Strong gate signals first — "avoid", "ensure", "never"
    gate_hits = len(GATE_KEYWORDS.findall(combined))
    scope_hits = len(SCOPE_KEYWORDS.findall(combined))
    plan_hits = len(PLAN_KEYWORDS.findall(combined))
    deliver_hits = len(DELIVER_KEYWORDS.findall(combined))

    # Reference data: tables or long code blocks
    if REFERENCE_INDICATORS.search(body) and len(body.split("\n")) > 10:
        return "reference"

    # Score-based classification
    scores = {
        "scope": scope_hits * 2,
        "plan": plan_hits * 2,
        "gate": gate_hits * 3,  # gate questions weighted higher
        "deliver": deliver_hits * 2,
        "build": 1,  # default fallback
    }

    # Boost for frontmatter-like content
    if re.search(r"^---\n.*?^---", body, re.MULTILINE | re.DOTALL):
        scores["scope"] += 5

    best = max(scores, key=lambda name: scores[name])
    return best


def extract_gate_questions(text: str) -> list[str]:
    """Extract and convert instructions into YES/NO gate questions."""
    questions: list[str] = []
    lines = text.split("\n")

    for line in lines:
        line_stripped = line.strip()
        if not line_stripped:
            continue

        # Already a question
        if line_stripped.endswith("?"):
            q = line_stripped.lstrip("-*0123456789.) ")
            if q:
                questions.append(q)
            continue

        # Convert "avoid X" patterns
        m = re.match(r"^[-*]\s*(?:avoid|don'?t|do not|never)\s+(.+)", line_stripped, re.IGNORECASE)
        if m:
            thing = m.group(1).rstrip(".")
            questions.append(f"Is the output free of {thing}? YES/NO")
            continue

        # Convert "ensure X" patterns
        m = re.match(r"^[-*]\s*(?:ensure|always|must|require)\s+(.+)", line_stripped, re.IGNORECASE)
        if m:
            thing = m.group(1).rstrip(".")
            questions.append(f"Does the output {thing}? YES/NO")
            continue

        # Convert "check X" patterns
        m = re.match(r"^[-*]\s*(?:check|verify|validate|confirm)\s+(?:that\s+)?(.+)", line_stripped, re.IGNORECASE)
        if m:
            thing = m.group(1).rstrip(".")
            questions.append(f"Has {thing} been verified? YES/NO")
            continue

    return questions


def parse_sections(content: str) -> tuple[list[dict], str]:
    """Parse a markdown document into sections by ## headers."""
    sections = []
    current_header = ""
    current_body_lines: list[str] = []

    # Strip frontmatter
    content_stripped = content
    fm_match = re.match(r"^---\n(.*?)\n---\n?", content, re.DOTALL)
    frontmatter = ""
    if fm_match:
        frontmatter = fm_match.group(0)
        content_stripped = content[fm_match.end():]

    for line in content_stripped.split("\n"):
        if re.match(r"^#{1,3}\s+", line):
            # Save previous section
            if current_header or current_body_lines:
                sections.append({
                    "header": current_header,
                    "body": "\n".join(current_body_lines).strip(),
                })
            current_header = line
            current_body_lines = []
        else:
            current_body_lines.append(line)

    # Last section
    if current_header or current_body_lines:
        sections.append({
            "header": current_header,
            "body": "\n".join(current_body_lines).strip(),
        })

    return sections, frontmatter


def split_into_chunks(text: str, max_lines: int) -> list[str]:
    """Split text into chunks of max_lines, breaking at paragraph boundaries."""
    lines = text.split("\n")
    if len(lines) <= max_lines:
        return [text]

    chunks = []
    current_chunk = []
    for line in lines:
        current_chunk.append(line)
        if len(current_chunk) >= max_lines:
            # Try to break at empty line
            for i in range(len(current_chunk) - 1, max(0, len(current_chunk) - 10), -1):
                if current_chunk[i].strip() == "":
                    break_at = i + 1
                    chunks.append("\n".join(current_chunk[:break_at]).strip())
                    current_chunk = current_chunk[break_at:]
                    break
            else:
                chunks.append("\n".join(current_chunk).strip())
                current_chunk = []

    if current_chunk:
        chunks.append("\n".join(current_chunk).strip())

    return chunks


def build_chunk_filename(index: int) -> str:
    """Return a Windows-safe build-stage filename for a zero-based chunk index."""
    if index < 0:
        raise ValueError("chunk index must be non-negative")
    if index < 26:
        return f"03{chr(ord('a') + index)}-build.md"
    return f"03-{index + 1:03d}-build.md"


def _fixed_line_chunks(text: str, max_lines: int) -> list[str]:
    """Split markdown into hard line-count chunks."""
    lines = text.strip().split("\n")
    return [
        "\n".join(lines[i:i + max_lines]).strip()
        for i in range(0, len(lines), max_lines)
    ]


def _stage_shard_path(path: Path, index: int) -> Path:
    """Return a shard path that sorts next to its stage index file."""
    if index < 0:
        raise ValueError("chunk index must be non-negative")
    stem = path.stem
    suffix = path.suffix
    match = re.match(r"^(\d+)(.*)$", stem)
    if match:
        prefix, rest = match.groups()
        if index < 26:
            return path.with_name(f"{prefix}{chr(ord('a') + index)}{rest}{suffix}")
        return path.with_name(f"{prefix}-{index + 1:03d}{rest}{suffix}")
    return path.with_name(f"{stem}-{index + 1:03d}{suffix}")


# ── Converter ─────────────────────────────────────────────────────────────────

def convert_skill(

    skill_path: Path | str,

    output_dir: Path | str | None = None,

    line_threshold: int | None = None,

    *,

    source_content: str | None = None,

    skill_name: str | None = None,

    preserve_original: bool = True,

) -> dict:
    """Convert a single skill file into a micro-skill pipeline.



    If output_dir is None, converts in-place (same directory as the skill).

    Callers that already have trusted in-memory content can pass

    source_content and preserve_original=False to avoid writing raw upstream

    bodies as temporary SKILL.md files.

    Returns stats dict.

    """
    skill_path = Path(skill_path)
    if output_dir is not None:
        output_dir = Path(output_dir)
    content = (
        source_content
        if source_content is not None
        else skill_path.read_text(encoding="utf-8", errors="replace")
    )
    line_count = _line_count(content)
    threshold = cfg.line_threshold if line_threshold is None else line_threshold

    if line_count <= threshold:
        return {"status": "skipped", "reason": f"{line_count} lines <= {threshold}"}

    # Compute source hash
    source_hash = hashlib.sha256(content.encode("utf-8")).hexdigest()

    # Determine skill name and output dir
    skill_name = skill_name or skill_path.parent.name
    if output_dir is None:
        output_dir = skill_path.parent

    refs_dir = output_dir / "references"
    refs_dir.mkdir(parents=True, exist_ok=True)

    # Parse sections
    sections, frontmatter = parse_sections(content)

    # Classify sections
    scope_parts = []
    plan_parts = []
    build_parts = []
    gate_parts = []
    deliver_parts = []
    reference_parts = []
    all_gate_questions = []

    # Extract description from frontmatter
    desc_match = re.search(r"description:\s*[\"']?(.+?)[\"']?\s*$", frontmatter, re.MULTILINE)
    skill_description = desc_match.group(1) if desc_match else f"Converted from {skill_name} SKILL.md"

    for section in sections:
        category = classify_section(section["header"], section["body"])
        combined = f"{section['header']}\n{section['body']}".strip()

        if category == "scope":
            scope_parts.append(combined)
        elif category == "plan":
            plan_parts.append(combined)
        elif category == "gate":
            gate_parts.append(combined)
            # Extract YES/NO questions
            questions = extract_gate_questions(section["body"])
            all_gate_questions.extend(questions)
        elif category == "deliver":
            deliver_parts.append(combined)
        elif category == "reference":
            reference_parts.append(combined)
        else:
            build_parts.append(combined)

    # Ensure no stage is empty — use fallback content
    if not scope_parts:
        scope_parts.append(f"# Step 1: Scope\n\nExtract constraints from the request for {skill_name}.")
    if not plan_parts:
        plan_parts.append("# Step 2: Plan\n\nDesign the approach. Map components to constraints.")
    if not build_parts:
        build_parts.append("# Step 3: Build\n\nExecute the plan, building each component in order.")
    if not deliver_parts:
        deliver_parts.append("# Step 5: Deliver\n\nFinalize and present the output.")

    # Generate gate questions from gate_parts if extraction found none
    if not all_gate_questions and gate_parts:
        for gp in gate_parts:
            qs = extract_gate_questions(gp)
            all_gate_questions.extend(qs)

    # If still no gate questions, generate generic domain ones
    if not all_gate_questions:
        all_gate_questions = [
            f"Does the output follow all constraints specified in the {skill_name} skill? YES/NO",
            "Is every element purposeful (no dead code, no placeholder text)? YES/NO",
            "Is the output usable as-is with no manual fixes needed? YES/NO",
        ]

    # ── Write pipeline files ──────────────────────────────────────────────

    # Preserve original. We do NOT unlink skill_path until the new SKILL.md
    # has been written atomically below — this guarantees the skill directory
    # always contains a valid SKILL.md, even if the process is killed mid-
    # conversion (data-loss protection).
    if preserve_original:
        original_path = output_dir / "SKILL.md.original"
        if not original_path.exists():
            if source_content is None:
                shutil.copy2(skill_path, original_path)
            else:
                original_path.write_text(source_content, encoding="utf-8")

    # 01-scope.md
    scope_text = "\n\n".join(scope_parts)
    scope_text = _ensure_header(scope_text, "# Step 1: Scope")
    scope_text += "\n\n## Gate\n\n- Can I state the deliverable in one sentence? YES/NO\n- Have I listed at least one explicit constraint? YES/NO\n- Do I know what inputs I'm working with? YES/NO\n\nAll YES = proceed. Any NO = ask the user one clarifying question."
    _write_stage(refs_dir / "01-scope.md", scope_text)

    # 02-plan.md
    plan_text = "\n\n".join(plan_parts)
    plan_text = _ensure_header(plan_text, "# Step 2: Plan")
    plan_text += "\n\n## Gate\n\n- Does every constraint from Step 1 map to at least one component? YES/NO\n- Is the build order explicit? YES/NO\n- Have I checked the failure log? YES/NO\n\nAll YES = proceed. Any NO = revise."
    _write_stage(refs_dir / "02-plan.md", plan_text)

    # 03-build.md (may split into 03a, 03b, ...)
    build_text = "\n\n".join(build_parts)
    build_text = _ensure_header(build_text, "# Step 3: Build")
    build_text += "\n\n## Gate\n\n- Have all components from the plan been built? YES/NO\n- Did every component pass its micro-check? YES/NO\n- Does the assembled output match the deliverable from Step 1? YES/NO\n\nAll YES = proceed. Any NO = rebuild the failing component."
    build_chunks = split_into_chunks(build_text, MAX_STAGE_LINES)
    build_files = []
    if len(build_chunks) == 1:
        _write_stage(refs_dir / "03-build.md", build_chunks[0])
        build_files.append("references/03-build.md")
    else:
        for i, chunk in enumerate(build_chunks):
            fname = build_chunk_filename(i)
            _write_stage(refs_dir / fname, chunk)
            build_files.append(f"references/{fname}")

    # 04-check.md
    check_text = "# Step 4: Check\n\nHard gate. Assume there are problems. Find them.\nAnswer every question YES or NO. \"Mostly yes\" = NO.\n\n"
    check_text += "## Universal Checks\n\n"
    check_text += "1. Does the output match the deliverable from Step 1? YES/NO\n"
    check_text += "2. Are all constraints satisfied? YES/NO\n"
    check_text += "3. Does every element serve a purpose? YES/NO\n"
    check_text += "4. Is the output usable as-is with no manual fixes? YES/NO\n"
    check_text += "5. If code: does it run without errors? YES/NO\n\n"
    check_text += "## Domain Checks\n\nLoad `check-gates.md` and answer every question there.\n\n"
    check_text += "## Failure Log\n\n6. Re-read `failure-log.md`. Does the output violate any pattern? YES/NO\n\n"
    check_text += "## On Failure\n\n- For each NO: state what is wrong in one sentence.\n- Fix each issue.\n- Re-run this entire checklist.\n- After passing: append a one-line pattern to `failure-log.md`."
    _write_stage(refs_dir / "04-check.md", check_text)

    # 05-deliver.md
    deliver_text = "\n\n".join(deliver_parts)
    deliver_text = _ensure_header(deliver_text, "# Step 5: Deliver")
    deliver_text += "\n\n## Gate\n\n- Is the output in its final location? YES/NO\n- Is the summary concise (under 5 sentences)? YES/NO\n- Are all temp artifacts cleaned up? YES/NO\n\nAll YES = done."
    _write_stage(refs_dir / "05-deliver.md", deliver_text)

    # Reference files
    ref_file_list = []
    for i, ref in enumerate(reference_parts):
        fname = f"ref-{i + 1:02d}.md"
        _write_stage(refs_dir / fname, ref)
        ref_file_list.append(f"references/{fname}")

    # check-gates.md
    gates_text = f"# Domain Gate Questions -- {skill_name}\n\nAnswer each YES or NO. Any NO = fix before proceeding.\n\n"
    for i, q in enumerate(all_gate_questions[:20], 1):  # cap at 20 gates
        gates_text += f"{i}. {q}\n"
    (output_dir / "check-gates.md").write_text(
        defang_dangerous_markdown(gates_text),
        encoding="utf-8",
    )

    # failure-log.md
    failure_text = "# Failure Log\nOne-line patterns learned from past mistakes.\n"
    (output_dir / "failure-log.md").write_text(failure_text, encoding="utf-8")

    # original-hash.txt
    (output_dir / "original-hash.txt").write_text(source_hash + "\n", encoding="utf-8")

    # Build file references for SKILL.md orchestrator
    build_ref_str = ""
    if len(build_files) == 1:
        build_ref_str = f"Read `{build_files[0]}`."
    else:
        build_ref_str = " then ".join(f"`{f}`" for f in build_files)
        build_ref_str = f"Read {build_ref_str}."

    # SKILL.md orchestrator
    orchestrator = f"""---

name: {skill_name}

description: "{skill_description}"

---



# {skill_name}



When this skill triggers, execute the following gated pipeline.

One step at a time. Do NOT skip ahead.



## Pipeline



1. **Scope** -- Read `references/01-scope.md`. Extract constraints from the request.

2. **Plan** -- Read `references/02-plan.md`. Design the approach. Map components.

3. **Build** -- {build_ref_str} Execute with micro-checks per component.

4. **Check** -- Read `references/04-check.md`. Answer every gate question YES or NO. Any NO = fix.

5. **Deliver** -- Read `references/05-deliver.md`. Finalize and present.



## Failure Log



Read `failure-log.md` before starting. Every pattern is a mandatory constraint.



## Rules



- Read each reference file when you reach that step, not all at once.

- Step 4 (Check) is the hard gate. "Mostly yes" counts as NO.

- On Check failure: fix, re-run full checklist, append pattern to `failure-log.md`.

"""
    # Atomic write replaces the original only for in-place conversion. When
    # output_dir is separate, the source skill remains intact.
    _atomic_write_text(output_dir / "SKILL.md", orchestrator.strip() + "\n")

    # Count total pipeline files and max line count
    all_pipeline_files = list(refs_dir.glob("*.md")) + [
        output_dir / "SKILL.md",
        output_dir / "check-gates.md",
        output_dir / "failure-log.md",
    ]
    max_lines = 0
    total_files = len(all_pipeline_files)
    for f in all_pipeline_files:
        if f.exists():
            try:
                lc = len(f.read_text(encoding="utf-8", errors="replace").split("\n"))
            except OSError:
                continue
            if lc > max_lines:
                max_lines = lc

    return {
        "status": "converted",
        "skill": skill_name,
        "original_lines": line_count,
        "pipeline_files": total_files,
        "gate_questions": len(all_gate_questions[:20]),
        "max_file_lines": max_lines,
        "build_splits": len(build_files),
        "reference_files": len(ref_file_list),
    }


def _ensure_header(text: str, default_header: str) -> str:
    """Ensure text starts with a markdown header."""
    if not text.strip().startswith("#"):
        return f"{default_header}\n\n{text}"
    return text


def _write_stage(path: Path, text: str) -> None:
    """Write a stage file, splitting into sub-files if >MAX_STAGE_LINES."""
    text = defang_dangerous_markdown(text).strip()
    lines = text.split("\n")
    if len(lines) <= MAX_STAGE_LINES:
        path.write_text(text + "\n", encoding="utf-8")
        return

    chunks = _fixed_line_chunks(text, MAX_STAGE_LINES)
    shard_paths = []
    for i, chunk in enumerate(chunks):
        shard_path = _stage_shard_path(path, i)
        shard_path.write_text(chunk + "\n", encoding="utf-8")
        shard_paths.append(shard_path)

    index_text = (
        f"# {path.stem}\n\n"
        f"This generated stage was split into {len(shard_paths)} shards to keep "
        f"each file under {MAX_STAGE_LINES} lines.\n"
        "Read the shard files in sorted filename order.\n\n"
        f"First shard: `{shard_paths[0].name}`\n"
        f"Last shard: `{shard_paths[-1].name}`\n"
    )
    path.write_text(index_text, encoding="utf-8")


# ── Main ──────────────────────────────────────────────────────────────────────

def main():
    parser = argparse.ArgumentParser(description="Batch convert skills to micro-skill pipeline")
    parser.add_argument("--scan", help="Directory to scan for SKILL.md files")
    parser.add_argument("--file", help="Single SKILL.md file to convert")
    parser.add_argument("--min-lines", type=int, default=cfg.line_threshold, help=f"Minimum lines to convert (default: {cfg.line_threshold})")
    parser.add_argument("--dry-run", action="store_true", help="Just count, don't convert")
    parser.add_argument("--extra-dirs", nargs="*", help="Additional directories to scan")
    args = parser.parse_args()

    min_lines_val = args.min_lines

    if args.file:
        path = Path(args.file)
        if not path.exists():
            print(f"File not found: {path}", file=sys.stderr)
            sys.exit(1)
        result = convert_skill(path, line_threshold=min_lines_val)
        print(json.dumps(result, indent=2))
        return

    if not args.scan:
        print("Error: --scan DIR or --file PATH required", file=sys.stderr)
        sys.exit(1)

    # Collect all SKILL.md files
    scan_dirs = [Path(os.path.expanduser(args.scan))]
    if args.extra_dirs:
        for d in args.extra_dirs:
            scan_dirs.append(Path(os.path.expanduser(d)))

    skill_files = []
    for scan_dir in scan_dirs:
        if not scan_dir.exists():
            print(f"Warning: {scan_dir} does not exist, skipping", file=sys.stderr)
            continue
        for skill_md in scan_dir.rglob("SKILL.md"):
            # Skip already-converted files (if SKILL.md.original exists)
            if (skill_md.parent / "SKILL.md.original").exists():
                continue
            try:
                line_count = _line_count(skill_md.read_text(encoding="utf-8", errors="replace"))
                if line_count > min_lines_val:
                    skill_files.append((skill_md, line_count))
            except Exception as exc:
                print(f"Warning: failed to read skill file {skill_md}: {exc}", file=sys.stderr)

    print(f"Found {len(skill_files)} skills > {min_lines_val} lines")

    if args.dry_run:
        for sf, lc in sorted(skill_files, key=lambda x: -x[1])[:20]:
            print(f"  {lc:5d} lines  {sf.parent.name}")
        if len(skill_files) > 20:
            print(f"  ... and {len(skill_files) - 20} more")
        return

    # Convert
    converted = 0
    errors = 0
    skipped = 0
    for i, (sf, lc) in enumerate(skill_files):
        try:
            result = convert_skill(sf, line_threshold=min_lines_val)
            if result["status"] == "converted":
                converted += 1
                if (i + 1) % 50 == 0:
                    print(f"  [{i + 1}/{len(skill_files)}] converted: {result['skill']} ({result['original_lines']} -> {result['pipeline_files']} files, {result['gate_questions']} gates)")
            else:
                skipped += 1
        except Exception as e:
            errors += 1
            print(f"  ERROR: {sf.parent.name}: {e}", file=sys.stderr)

    print(f"\nDone: {converted} converted, {skipped} skipped, {errors} errors")


if __name__ == "__main__":
    main()