# mypy: disable-error-code=attr-defined """ctx_monitor.py -- Local HTTP dashboard for ctx runtime and catalog activity. ``ctx-monitor serve [--port 8765]`` starts a zero-dependency threaded HTTP server (stdlib http.server) that renders the audit log + skill-events.jsonl + sidecars into a browser UI at http://localhost:8765/. Routes: / Home — summary stats + session list + links /loaded Live manifest view + load/unload actions /sessions List of sessions (skills/agents/MCP activity) /session/ Skills + agents seen in that session /skills Sidecar card grid with grade + score filters /skill/ Sidecar breakdown + timeline of audit events /wiki Wiki entity index — all pages with search /wiki/?type= One wiki entity page (frontmatter + body) /graph Built-in graph explorer + popular seeds /graph?slug=&type=... Focus graph view on a specific entity /manage Search/edit/delete/import catalog entities /harness Manual harness setup for user-owned LLMs /docs Local docs index + public docs handoff /config Editable ctx config with defaults fallback /status Durable queue + graph/wiki artifact state /kpi Grade / lifecycle / category KPIs /runtime Generic harness validation/escalation ledger /logs Filterable tail of ctx-audit.jsonl /events Live SSE stream of new audit-log lines /api/sessions.json JSON index for scripting /api/manifest.json Raw ~/.claude/skill-manifest.json /api/status.json Queue counts + artifact promotion metadata /api/runtime.json Generic harness validation/escalation summary /api/skill/.json Sidecar passthrough /api/graph/.json Dashboard-shaped neighborhood; accepts type /api/entities/search.json Search wiki entities across supported types /api/entity/.json Wiki entity frontmatter + Markdown body /api/config.json Effective/default/user config /api/kpi.json DashboardSummary passthrough Design notes: - No Flask / Starlette / FastAPI dependency. Request handling is threaded so one open SSE client cannot monopolize the local dashboard. Repo-doc rendering uses the package's Markdown dependencies for MkDocs-like output. - GET views read append-only files. POST mutation endpoints require loopback access, a per-process token, and same-origin headers. - SSE endpoint tails ``~/.claude/ctx-audit.jsonl`` and pushes each new line as a server-sent event. Clients auto-reconnect. - Security: binds to 127.0.0.1 by default. ``--host`` override requires an explicit flag to emphasize the local-dev-only intent. This is a minimal dashboard. Power users should pipe the audit log into Grafana / Loki / whatever; ``ctx-monitor`` is the zero-config starting point. """ from __future__ import annotations import argparse import html import ipaddress import json import math import os import re import secrets import sys import threading import time from collections import defaultdict, deque from http.server import BaseHTTPRequestHandler, ThreadingHTTPServer from pathlib import Path, PurePosixPath from typing import Any from urllib.parse import quote, unquote from ctx.core.wiki import wiki_queue from ctx.core.wiki.wiki_utils import parse_frontmatter_and_body from ctx.utils._file_lock import file_lock from ctx.utils._fs_utils import atomic_write_text as _atomic_write_text from ctx.utils._fs_utils import safe_atomic_write_text as _safe_atomic_write_text from ctx.utils._safe_name import is_safe_source_name _MONITOR_TOKEN = "" _MONITOR_MUTATIONS_ENABLED = True _GRAPH_CACHE_KEY: tuple[Any, ...] | None = None _GRAPH_CACHE_VALUE: Any | None = None _SIDECAR_INDEX_CACHE_KEY: tuple[tuple[Path, float, int], ...] | None = None _SIDECAR_INDEX_CACHE_VALUE: dict[tuple[str, str], dict] | None = None _WIKI_INDEX_LIMIT_PER_TYPE = 500 _GRAPH_REPORT_RE = re.compile(r"Nodes:\s*([\d,]+)\s*\|\s*Edges:\s*([\d,]+)") _MAX_POST_BODY_BYTES = 64 * 1024 # ─── Data sources ──────────────────────────────────────────────────────────── def _host_allows_mutations(host: str) -> bool: normalized = (host or "").strip().strip("[]").rstrip(".").lower() if normalized == "localhost": return True try: return ipaddress.ip_address(normalized).is_loopback except ValueError: return False def _claude_dir() -> Path: return Path(os.path.expanduser("~/.claude")) def _audit_log_path() -> Path: # Avoid importing ctx_audit_log here so the monitor can run even if # ctx_audit_log is absent for some reason. return _claude_dir() / "ctx-audit.jsonl" def _events_jsonl_path() -> Path: return _claude_dir() / "skill-events.jsonl" def _runtime_lifecycle_path() -> Path: from ctx.adapters.generic.runtime_lifecycle import RuntimeLifecycleStore return RuntimeLifecycleStore().events_path def _manifest_path() -> Path: return _claude_dir() / "skill-manifest.json" def _sidecar_dir() -> Path: return _claude_dir() / "skill-quality" def _wiki_dir() -> Path: return _claude_dir() / "skill-wiki" def _user_config_path() -> Path: return _claude_dir() / "skill-system-config.json" def _load_dashboard_graph() -> Any: """Load the wiki graph once per graph.json file version.""" global _GRAPH_CACHE_KEY, _GRAPH_CACHE_VALUE graph_path = _wiki_dir() / "graphify-out" / "graph.json" overlay_path = graph_path.with_name("entity-overlays.jsonl") from ctx.core.graph.resolve_graph import load_graph as _lg # type: ignore if not graph_path.exists(): _GRAPH_CACHE_KEY = None _GRAPH_CACHE_VALUE = None return _lg(graph_path) stat = graph_path.stat() overlay_key = None if overlay_path.exists(): overlay_stat = overlay_path.stat() overlay_key = (overlay_stat.st_mtime, overlay_stat.st_size) cache_key = (graph_path.resolve(), stat.st_mtime, stat.st_size, id(_lg), overlay_key) if _GRAPH_CACHE_KEY == cache_key and _GRAPH_CACHE_VALUE is not None: return _GRAPH_CACHE_VALUE graph = _lg(graph_path) _GRAPH_CACHE_KEY = cache_key _GRAPH_CACHE_VALUE = graph return graph def _mcp_shard(slug: str) -> str: first = slug[0] if slug else "" return first if first.isalpha() else "0-9" _DASHBOARD_ENTITY_SOURCES: tuple[tuple[str, str, bool], ...] = ( ("skills", "skill", False), ("agents", "agent", False), ("mcp-servers", "mcp-server", True), ("harnesses", "harness", False), ) _DASHBOARD_ENTITY_TYPES: tuple[str, ...] = tuple( entity_type for _, entity_type, _ in _DASHBOARD_ENTITY_SOURCES ) def _normalize_dashboard_entity_type(raw: object) -> str | None: if raw is None: return None value = str(raw).strip() normalized = { "skills": "skill", "skill": "skill", "agents": "agent", "agent": "agent", "mcp": "mcp-server", "mcp-server": "mcp-server", "mcp-servers": "mcp-server", "harness": "harness", "harnesses": "harness", }.get(value, value) return normalized if normalized in _DASHBOARD_ENTITY_TYPES else None def _audit_entity_type(row: dict) -> str | None: raw_meta = row.get("meta") meta: dict[str, Any] = raw_meta if isinstance(raw_meta, dict) else {} for raw in ( meta.get("entity_type"), row.get("entity_type"), row.get("subject_type"), row.get("type"), ): normalized = _normalize_dashboard_entity_type(raw) if normalized: return normalized event = str(row.get("event") or "") prefix, _, _ = event.partition(".") return _normalize_dashboard_entity_type(prefix) def _wiki_entity_path(slug: str, entity_type: str | None = None) -> Path | None: """Resolve a slug to its wiki entity page. Wiki layout: ``entities/skills/.md``, ``entities/agents/.md``, ``entities/harnesses/.md``, or sharded ``entities/mcp-servers//.md``. Returns the first match unless ``entity_type`` disambiguates duplicate slugs. """ # Validate slug so a crafted request can't escape the wiki tree. if not _is_safe_slug(slug): return None for sub, current_type, recursive in _DASHBOARD_ENTITY_SOURCES: if entity_type is not None and entity_type != current_type: continue p = ( _wiki_dir() / "entities" / sub / _mcp_shard(slug) / f"{slug}.md" if recursive else _wiki_dir() / "entities" / sub / f"{slug}.md" ) if p.exists(): return p return None def _wiki_entity_target_path(slug: str, entity_type: str) -> Path: """Return the canonical wiki entity path for a new/updated entity.""" if not _is_safe_slug(slug): raise ValueError(f"invalid slug: {slug!r}") normalized = _normalize_dashboard_entity_type(entity_type) if normalized is None: raise ValueError(f"unsupported entity_type: {entity_type!r}") for sub, current_type, recursive in _DASHBOARD_ENTITY_SOURCES: if normalized != current_type: continue if recursive: return _wiki_dir() / "entities" / sub / _mcp_shard(slug) / f"{slug}.md" return _wiki_dir() / "entities" / sub / f"{slug}.md" raise ValueError(f"unsupported entity_type: {entity_type!r}") def _iter_wiki_entity_paths( entity_type: str | None = None, ) -> list[tuple[str, str, Path]]: normalized = _normalize_dashboard_entity_type(entity_type) if entity_type else None if entity_type is not None and normalized is None: raise ValueError(f"unsupported entity_type: {entity_type!r}") base = _wiki_dir() / "entities" if not base.is_dir(): return [] rows: list[tuple[str, str, Path]] = [] for sub, current_type, recursive in _DASHBOARD_ENTITY_SOURCES: if normalized is not None and normalized != current_type: continue root = base / sub if not root.is_dir(): continue paths = root.rglob("*.md") if recursive else root.glob("*.md") for path in paths: slug = path.stem if _is_safe_slug(slug): rows.append((slug, current_type, path)) return sorted(rows, key=lambda row: (row[1], row[0].lower(), row[2].as_posix())) def _wiki_entity_detail(slug: str, entity_type: str | None = None) -> dict[str, Any] | None: normalized = _normalize_dashboard_entity_type(entity_type) if entity_type else None if entity_type is not None and normalized is None: raise ValueError(f"unsupported entity_type: {entity_type!r}") path = _wiki_entity_path(slug, entity_type=normalized) if path is None: return None text = path.read_text(encoding="utf-8", errors="replace") frontmatter, body = _parse_frontmatter(text) detected_type = normalized or _normalize_dashboard_entity_type(frontmatter.get("type")) or "skill" return { "slug": slug, "type": detected_type, "path": str(path), "frontmatter": frontmatter, "body": body, } def _search_wiki_entities( query: str = "", entity_type: str | None = None, *, limit: int = 80, ) -> list[dict[str, Any]]: terms = [term for term in re.split(r"\s+", query.lower().strip()) if term] results: list[dict[str, Any]] = [] for slug, current_type, path in _iter_wiki_entity_paths(entity_type): try: head = path.read_text(encoding="utf-8", errors="replace")[:4096] except OSError: continue frontmatter, body = _parse_frontmatter(head) tags = _frontmatter_tags(frontmatter.get("tags", ""), limit=None) description = _frontmatter_text(frontmatter.get("description", "")) title = _frontmatter_text(frontmatter.get("title") or frontmatter.get("name") or slug) haystack = " ".join([slug, current_type, title, description, " ".join(tags), body]).lower() if terms and not all(term in haystack for term in terms): continue results.append({ "slug": slug, "type": current_type, "title": title, "description": description, "tags": tags[:12], "path": str(path), "href": _entity_wiki_href(slug, current_type), }) if len(results) >= max(1, limit): break return results def _normalize_entity_tags(raw: Any) -> list[str]: if isinstance(raw, list): parts = raw else: parts = re.split(r"[,\n]+", str(raw or "")) tags: list[str] = [] seen: set[str] = set() for part in parts: tag = re.sub(r"[^a-z0-9_.+-]+", "-", str(part).lower()).strip("-_.+") if tag and tag not in seen: seen.add(tag) tags.append(tag) return tags def _yaml_scalar(value: Any) -> str: if isinstance(value, bool): return "true" if value else "false" if isinstance(value, int | float): return str(value) text = str(value).replace("\r\n", "\n").replace("\r", "\n").strip() if not text: return '""' if re.fullmatch(r"[A-Za-z0-9][A-Za-z0-9 _./:+@-]*", text): return text return json.dumps(text, ensure_ascii=False) def _frontmatter_to_text(frontmatter: dict[str, Any]) -> str: lines = ["---"] for key, value in frontmatter.items(): if value is None or value == "": continue if isinstance(value, list): rendered = ", ".join(_yaml_scalar(item) for item in value) lines.append(f"{key}: [{rendered}]") else: lines.append(f"{key}: {_yaml_scalar(value)}") lines.append("---") return "\n".join(lines) + "\n" def _entity_content_from_payload( payload: dict[str, Any], *, existing: dict[str, Any] | None = None, ) -> tuple[str, str, str]: slug = str(payload.get("slug", "")).strip() if not _is_safe_slug(slug): raise ValueError(f"invalid slug: {slug!r}") entity_type = str(payload.get("entity_type", "skill")).strip() or "skill" normalized = _normalize_dashboard_entity_type(entity_type) if normalized is None: raise ValueError(f"unsupported entity_type: {entity_type!r}") body = str(payload.get("body", "")).strip() if not body: raise ValueError("body is required") title = str(payload.get("title") or slug).strip() today = time.strftime("%Y-%m-%d", time.gmtime()) frontmatter = dict(existing or {}) frontmatter["title"] = title frontmatter["type"] = normalized frontmatter.setdefault("created", today) frontmatter["updated"] = today description = str(payload.get("description") or "").strip() if description or "description" in payload: frontmatter.pop("description", None) if description: frontmatter["description"] = description tags = _normalize_entity_tags(payload.get("tags")) if tags or "tags" in payload: frontmatter.pop("tags", None) if tags: frontmatter["tags"] = tags source_url = str(payload.get("source_url") or "").strip() if source_url or "source_url" in payload: frontmatter.pop("source_url", None) if source_url: frontmatter["source_url"] = source_url return slug, normalized, _frontmatter_to_text(frontmatter) + body.rstrip() + "\n" def _queue_entity_refresh( *, entity_type: str, slug: str, entity_path: Path, content: str, action: str, ) -> None: wiki = _wiki_dir() wiki_queue.enqueue_entity_upsert( wiki, entity_type=entity_type, slug=slug, entity_path=entity_path, content=content, action=action, source="ctx-monitor", ) wiki_queue.enqueue_maintenance_job( wiki, kind=wiki_queue.GRAPH_EXPORT_JOB, payload={"reason": f"entity-{action}", "entity_type": entity_type, "slug": slug}, source="ctx-monitor", ) def _upsert_wiki_entity(payload: dict[str, Any]) -> tuple[bool, str]: try: requested_slug = str(payload.get("slug", "")).strip() requested_type = str(payload.get("entity_type", "skill")).strip() or "skill" existing_detail = _wiki_entity_detail(requested_slug, requested_type) existing_meta = ( existing_detail.get("frontmatter") if isinstance(existing_detail, dict) else None ) slug, entity_type, content = _entity_content_from_payload( payload, existing=existing_meta if isinstance(existing_meta, dict) else None, ) path = _wiki_entity_target_path(slug, entity_type) with file_lock(path): _safe_atomic_write_text(path, content, encoding="utf-8") _queue_entity_refresh( entity_type=entity_type, slug=slug, entity_path=path, content=content, action="upsert", ) except Exception as exc: # noqa: BLE001 return False, f"{type(exc).__name__}: {exc}" return True, f"saved {entity_type}:{slug} and queued graph refresh" def _delete_wiki_entity(slug: str, entity_type: str) -> tuple[bool, str]: try: normalized = _normalize_dashboard_entity_type(entity_type) if normalized is None: raise ValueError(f"unsupported entity_type: {entity_type!r}") if not _is_safe_slug(slug): raise ValueError(f"invalid slug: {slug!r}") path = _wiki_entity_path(slug, entity_type=normalized) if path is None: return False, f"no wiki entity found for {normalized}:{slug}" with file_lock(path): path.unlink() _queue_entity_refresh( entity_type=normalized, slug=slug, entity_path=path, content="", action="delete", ) except Exception as exc: # noqa: BLE001 return False, f"{type(exc).__name__}: {exc}" return True, f"deleted {normalized}:{slug} and queued graph refresh" def _parse_frontmatter(text: str) -> tuple[dict[str, Any], str]: """Split frontmatter from body using the canonical wiki parser.""" return parse_frontmatter_and_body(text) def _frontmatter_text(value: Any) -> str: if isinstance(value, list): return ", ".join(str(v) for v in value) if isinstance(value, dict): return json.dumps(value, ensure_ascii=False, default=str) if value is None: return "" return str(value) def _truncate_text(value: str, limit: int) -> tuple[str, bool]: if limit <= 0 or len(value) <= limit: return value, False if limit <= 3: return value[:limit], True return value[: limit - 3].rstrip() + "...", True def _json_for_script(value: Any) -> str: return json.dumps(value, ensure_ascii=False, default=str).replace(" list[str]: if isinstance(value, list): raw_items = value else: raw = _frontmatter_text(value) raw_items = raw.replace("[", "").replace("]", "").split(",") out: list[str] = [] for item in raw_items: tok = str(item).strip().strip("'\"") if tok: out.append(tok) if limit is not None and len(out) >= limit: break return out _WIKI_INLINE_RE = re.compile( r"(`[^`\n]+`|\[\[[^\]\n]+\]\]|(?\s*(.*?)\s*", re.IGNORECASE | re.DOTALL, ) def _wiki_link_href(target: str) -> tuple[str, str]: """Return dashboard href + display label for an Obsidian-style wikilink.""" normalized = target.strip().replace("\\", "/").removesuffix(".md") parts = [part for part in normalized.split("/") if part] entity_type = "" if len(parts) >= 3 and parts[0] == "entities": entity_type = { "skills": "skill", "agents": "agent", "mcp-servers": "mcp-server", "harnesses": "harness", }.get(parts[1], "") slug = parts[-1] if parts else normalized if not _is_safe_slug(slug): return "#", slug or target suffix = f"?type={quote(entity_type)}" if entity_type else "" return f"/wiki/{quote(slug)}{suffix}", slug def _markdown_link_href(target: str) -> str | None: """Return a safe href for normal Markdown links, or None to suppress it.""" cleaned = target.strip() if not cleaned: return None if cleaned.startswith(("/", "#")): return cleaned if re.match(r"^https?://", cleaned, re.IGNORECASE): return cleaned if re.match(r"^mailto:[^@\s]+@[^@\s]+$", cleaned, re.IGNORECASE): return cleaned return None def _render_wiki_inline(text: str) -> str: """Render a small safe inline Markdown subset used by wiki pages.""" out: list[str] = [] last = 0 for match in _WIKI_INLINE_RE.finditer(text): out.append(html.escape(text[last:match.start()])) token = match.group(0) if token.startswith("`"): out.append(f"{html.escape(token[1:-1])}") elif token.startswith("[["): inner = token[2:-2] target, _, label = inner.partition("|") href, fallback_label = _wiki_link_href(target) link_text = label.strip() or fallback_label out.append( f"{html.escape(link_text)}", ) else: link_match = re.fullmatch( r"\[([^\]\n]+)\]\(([^\s()\n]+)(?:\s+\"[^\"]*\")?\)", token, ) if not link_match: out.append(html.escape(token)) else: label, target = link_match.groups() safe_href = _markdown_link_href(target) if safe_href is None: out.append(html.escape(label)) else: out.append( f"{html.escape(label)}", ) last = match.end() out.append(html.escape(text[last:])) return "".join(out) def _render_wiki_markdown(markdown_text: str) -> str: """Render a conservative Markdown subset without adding dependencies.""" lines = markdown_text.splitlines() out: list[str] = [] paragraph: list[str] = [] list_items: list[str] = [] code_lines: list[str] = [] in_code = False def flush_paragraph() -> None: if paragraph: out.append(f"

{_render_wiki_inline(' '.join(paragraph))}

") paragraph.clear() def flush_list() -> None: if list_items: out.append("
    " + "".join(f"
  • {item}
  • " for item in list_items) + "
") list_items.clear() def flush_code() -> None: if code_lines: out.append("
" + html.escape("\n".join(code_lines)) + "
") code_lines.clear() for line in lines: stripped = line.strip() if stripped.startswith("```"): if in_code: flush_code() in_code = False else: flush_paragraph() flush_list() in_code = True continue if in_code: code_lines.append(line) continue if not stripped: flush_paragraph() flush_list() continue heading = re.match(r"^(#{1,4})\s+(.+)$", stripped) if heading: flush_paragraph() flush_list() level = min(len(heading.group(1)), 4) out.append(f"{_render_wiki_inline(heading.group(2))}") continue bullet = re.match(r"^\s*[-*]\s+(.+)$", line) if bullet: flush_paragraph() list_items.append(_render_wiki_inline(bullet.group(1).strip())) continue flush_list() paragraph.append(stripped) flush_code() flush_paragraph() flush_list() return "".join(out) if out else "

No body.

" def _extract_embedded_quality_block(markdown_text: str) -> tuple[str, str | None]: matches = list(_WIKI_QUALITY_BLOCK_RE.finditer(markdown_text)) if not matches: return markdown_text, None quality_blocks = [ match.group(1).strip() for match in matches if match.group(1).strip() ] body = _WIKI_QUALITY_BLOCK_RE.sub("\n\n", markdown_text) body = re.sub(r"\n{3,}", "\n\n", body).strip() quality_markdown = "\n\n".join(quality_blocks).strip() or None return body, quality_markdown def _slugish(value: str) -> str: return re.sub(r"[^a-z0-9]+", "-", value.lower()).strip("-") def _strip_duplicate_wiki_heading(markdown_text: str, slug: str) -> str: """Drop the first H1 if it only repeats the page slug.""" lines = markdown_text.splitlines() for idx, line in enumerate(lines): if not line.strip(): continue match = re.match(r"^#\s+(.+?)\s*$", line.strip()) if match and _slugish(match.group(1)) == _slugish(slug): del lines[idx] while idx < len(lines) and not lines[idx].strip(): del lines[idx] break return "\n".join(lines) def _entity_wiki_href(slug: str, entity_type: str | None = None) -> str: suffix = f"?type={quote(entity_type)}" if entity_type in _DASHBOARD_ENTITY_TYPES else "" return f"/wiki/{quote(slug)}{suffix}" def _graph_type_from_node_id(node_id: str, fallback: str = "skill") -> str: prefix = node_id.split(":", 1)[0] if ":" in node_id else "" return { "skill": "skill", "agent": "agent", "mcp-server": "mcp-server", "harness": "harness", }.get(prefix, fallback) def _subgraph_sidecar(slug: str, entity_type: str) -> dict[str, Any] | None: sidecar = _load_sidecar(slug, entity_type=entity_type) return sidecar if isinstance(sidecar, dict) else None def _subgraph_quality_cell(sidecar: dict[str, Any] | None) -> str: if sidecar is None: return "no sidecar" grade = html.escape(str(sidecar.get("grade", "F"))) score = float(sidecar.get("raw_score", sidecar.get("score", 0.0)) or 0.0) floor = str(sidecar.get("hard_floor") or "").strip() floor_html = ( f" floor {html.escape(floor)}" if floor else "" ) return ( f"{grade} " f"{score:.3f}{floor_html}" ) def _subgraph_node_title( label: str, entity_type: str, sidecar: dict[str, Any] | None, ) -> str: if sidecar is None: return f"{label} ({entity_type}) · no sidecar" grade = str(sidecar.get("grade", "F")) score = float(sidecar.get("raw_score", sidecar.get("score", 0.0)) or 0.0) floor = str(sidecar.get("hard_floor") or "").strip() floor_text = f" · floor {floor}" if floor else "" return f"{label} ({entity_type}) · grade {grade} · score {score:.3f}{floor_text}" def _subgraph_node_fill(entity_type: str) -> str: return { "agent": "#f59e0b", "mcp-server": "#ef4444", "harness": "#22c55e", "skill": "#6366f1", }.get(entity_type, "#64748b") def _subgraph_grade_stroke(sidecar: dict[str, Any] | None) -> str: grade = str((sidecar or {}).get("grade") or "") return { "A": "#059669", "B": "#2563eb", "C": "#d97706", "D": "#ea580c", "F": "#dc2626", }.get(grade, "#ffffff") def _render_entity_subgraph_svg( node_by_id: dict[str, dict[str, Any]], edges: list[dict], center: str, sidecar_by_id: dict[str, dict[str, Any] | None], ) -> str: """Render an embedded, interactive 3D graph for wiki entity pages.""" width = 980 height = 380 node_payload: list[dict[str, Any]] = [] for node_id, node in sorted( node_by_id.items(), key=lambda item: ( 0 if item[0] == center else 1, str(item[1].get("label") or item[0]), ), ): node = node_by_id[node_id] node_type = _graph_type_from_node_id( node_id, str(node.get("type") or "skill"), ) node_slug = _graph_slug_from_node_id(node_id) label = str(node.get("label") or node_slug) sidecar = sidecar_by_id.get(node_id) node_payload.append({ "id": node_id, "slug": node_slug, "label": label, "type": node_type, "href": _entity_wiki_href(node_slug, node_type), "title": _subgraph_node_title(label, node_type, sidecar), "fill": _subgraph_node_fill(node_type), "stroke": _subgraph_grade_stroke(sidecar), "is_center": node_id == center, }) edge_payload: list[dict[str, Any]] = [] for edge in edges: data = edge.get("data", {}) source = str(data.get("source", "")) target = str(data.get("target", "")) if source not in node_by_id or target not in node_by_id: continue shared = ", ".join(str(tag) for tag in data.get("shared_tags", [])[:6]) or "none" weight = float(data.get("weight", 0.0) or 0.0) edge_payload.append({ "source": source, "target": target, "weight": weight, "title": ( f"{_graph_slug_from_node_id(source)} ↔ " f"{_graph_slug_from_node_id(target)} · weight {weight:.3f} " f"· shared {shared}" ), }) nodes_json = json.dumps(node_payload) edges_json = json.dumps(edge_payload) return ( "
" "
" "" "" "drag to rotate · wheel to zoom · hover nodes or edges" "
" f"" "
" "
" "Hover a node for sidecar grade/score/floor.
" "
" "Hover an edge for weight and shared signals.
" "" "
" ) def _render_entity_subgraph(slug: str, entity_type: str | None = None) -> str: """Render a compact 1-hop subgraph table for wiki entity pages.""" graph = _graph_neighborhood(slug, hops=1, limit=32, entity_type=entity_type) center = graph.get("center") nodes = graph.get("nodes") or [] edges = graph.get("edges") or [] if not center: return ( "
" "

No graph node was found for this entity.

" "
" ) node_by_id = { str(node.get("data", {}).get("id", "")): node.get("data", {}) for node in nodes } sidecar_by_id = { node_id: _subgraph_sidecar( _graph_slug_from_node_id(node_id), _graph_type_from_node_id(node_id, str(node.get("type") or "skill")), ) for node_id, node in node_by_id.items() } rows: list[str] = [] for edge in edges: data = edge.get("data", {}) source = str(data.get("source", "")) target = str(data.get("target", "")) other_id = target if source == center else source if other_id == center or other_id not in node_by_id: continue other = node_by_id[other_id] other_type = _graph_type_from_node_id(other_id, str(other.get("type", "skill"))) other_slug = other_id.split(":", 1)[-1] shared = ", ".join(str(tag) for tag in data.get("shared_tags", [])[:6]) shared_html = html.escape(shared) if shared else "none" quality_html = _subgraph_quality_cell(sidecar_by_id.get(other_id)) rows.append( "" f"" f"{html.escape(str(other.get('label') or other_slug))}" f"" f"{html.escape(other_type)}" f"{quality_html}" f"{float(data.get('weight', 0.0)):.3f}" f"{shared_html}" "" ) table = ( "" "" + ("".join(rows) if rows else "") + "
EntityTypeQuality sidecarWeightShared signals
No neighbors under the current limit.
" ) return ( "
" "

Subgraph

" f"

{len(nodes)} nodes and {len(edges)} edges in the 1-hop neighborhood.

" + _render_entity_subgraph_svg(node_by_id, edges, center, sidecar_by_id) + table + "
" ) def _render_quality_drilldown( sidecar: dict | None, embedded_quality_markdown: str | None = None, ) -> str: """Explain quality score signals for a wiki entity.""" if sidecar is None: if embedded_quality_markdown: quality_markdown = embedded_quality_markdown.strip() if not re.search(r"^#{1,6}\s+Quality\b", quality_markdown, re.IGNORECASE | re.MULTILINE): quality_markdown = "## Quality\n\n" + quality_markdown return ( "
" + _render_wiki_markdown(quality_markdown) + "
" ) return ( "
" "

Quality

" "

No quality sidecar exists for this entity yet.

" "
" ) grade = str(sidecar.get("grade", "F")) score = float(sidecar.get("raw_score", sidecar.get("score", 0.0)) or 0.0) weights_raw = sidecar.get("weights") signals_raw = sidecar.get("signals") weights: dict[str, Any] = weights_raw if isinstance(weights_raw, dict) else {} signals: dict[str, Any] = signals_raw if isinstance(signals_raw, dict) else {} signal_rows: list[str] = [] for name, signal in sorted(signals.items()): signal_data = signal if isinstance(signal, dict) else {} signal_score = float(signal_data.get("score", 0.0) or 0.0) weight = float(weights.get(name, 0.0) or 0.0) contribution = signal_score * weight evidence = signal_data.get("evidence", {}) evidence_text = json.dumps(evidence, ensure_ascii=False, sort_keys=True, default=str) evidence_preview, evidence_truncated = _truncate_text(evidence_text, 420) truncated_marker = " (truncated)" if evidence_truncated else "" signal_rows.append( "" f"{html.escape(str(name))}" f"{signal_score:.3f}" f"{weight:.3f}" f"{contribution:.3f}" f"{html.escape(evidence_preview)}{truncated_marker}" "" ) if not signal_rows: signal_rows.append("No signal breakdown was recorded.") hard_floor = sidecar.get("hard_floor") floor_html = f" floor {html.escape(str(hard_floor))}" if hard_floor else "" return ( "
" "

Quality

" f"

{html.escape(grade)} " f"score {score:.3f}" f"{floor_html}

" "

Score is the weighted sum of recorded quality signals. " "A hard floor can cap the final grade even when individual signals pass.

" "" "" + "".join(signal_rows) + "
SignalSignal scoreWeightContributionEvidence
" "
Raw sidecar JSON" f"
{html.escape(json.dumps(sidecar, indent=2, ensure_ascii=False, default=str)[:6000])}
" "
" "
" ) def _save_manifest(manifest: dict) -> None: _atomic_write_text(_manifest_path(), json.dumps(manifest, indent=2) + "\n") def _read_skill_manifest_only() -> dict: """Read the mutable skill manifest without synthetic harness rows.""" path = _manifest_path() if not path.exists(): return {"load": [], "unload": [], "warnings": []} try: manifest = json.loads(path.read_text(encoding="utf-8")) except (OSError, json.JSONDecodeError): return {"load": [], "unload": [], "warnings": []} if not isinstance(manifest, dict): return {"load": [], "unload": [], "warnings": []} if not isinstance(manifest.get("load"), list): manifest["load"] = [] if not isinstance(manifest.get("unload"), list): manifest["unload"] = [] if not isinstance(manifest.get("warnings"), list): manifest["warnings"] = [] return manifest def _remove_loaded_manifest_entry(slug: str, entity_type: str) -> list[dict]: """Remove loaded rows for one entity tuple and return removed rows.""" path = _manifest_path() with file_lock(path): manifest = _read_skill_manifest_only() removed: list[dict] = [] remaining: list[dict] = [] for entry in manifest.get("load", []): entry_type = str(entry.get("entity_type") or "skill") if entry.get("skill") == slug and entry_type == entity_type: removed.append(entry) else: remaining.append(entry) if not removed: return [] manifest["load"] = remaining unloaded = { (entry.get("skill"), str(entry.get("entity_type") or "skill")) for entry in manifest.get("unload", []) } preserved: dict[str, object] = {} for field in ("command", "json_config", "priority", "reason"): value = removed[0].get(field) if value not in (None, ""): preserved[field] = value if (slug, entity_type) not in unloaded: entry = { "skill": slug, "entity_type": entity_type, "source": removed[0].get("source") or "ctx-monitor", } entry.update(preserved) manifest.setdefault("unload", []).append(entry) elif preserved: for entry in manifest.get("unload", []): if ( entry.get("skill") == slug and str(entry.get("entity_type") or "skill") == entity_type ): for field, value in preserved.items(): entry.setdefault(field, value) break _save_manifest(manifest) return removed def _log_dashboard_entity_event( entity_type: str, action: str, slug: str, ) -> None: """Append a dashboard-visible audit row for a load/unload action.""" try: from ctx_audit_log import log if entity_type == "skill": log( f"skill.{action}", subject_type="skill", subject=slug, actor="user", meta={"via": "ctx-monitor"}, path=_audit_log_path(), ) elif entity_type == "agent": log( f"agent.{action}", subject_type="agent", subject=slug, actor="user", meta={"via": "ctx-monitor"}, path=_audit_log_path(), ) elif entity_type == "mcp-server": log( "toolbox.triggered", subject_type="toolbox", subject=slug, actor="user", meta={ "via": "ctx-monitor", "entity_type": "mcp-server", "action": action, }, path=_audit_log_path(), ) except Exception: # noqa: BLE001 pass def _read_manifest() -> dict: """Return current loaded entities from the skill manifest plus harness installs.""" path = _manifest_path() manifest: dict[str, Any] if not path.exists(): manifest = {"load": [], "unload": [], "warnings": []} else: try: manifest = json.loads(path.read_text(encoding="utf-8")) except (OSError, json.JSONDecodeError): manifest = {"load": [], "unload": [], "warnings": []} if not isinstance(manifest, dict): manifest = {"load": [], "unload": [], "warnings": []} load_rows = manifest.setdefault("load", []) if not isinstance(load_rows, list): load_rows = [] manifest["load"] = load_rows manifest.setdefault("unload", []) manifest.setdefault("warnings", []) existing = { (str(row.get("entity_type") or "skill"), str(row.get("skill") or "")) for row in load_rows if isinstance(row, dict) } for row in _read_harness_install_rows(): key = ("harness", str(row.get("skill") or "")) if key not in existing: load_rows.append(row) existing.add(key) return manifest def _read_harness_install_rows() -> list[dict]: """Return installed harness records as manifest-compatible load rows.""" root = _claude_dir() / "harness-installs" if not root.is_dir(): return [] rows: list[dict] = [] for path in sorted(root.glob("*.json")): try: data = json.loads(path.read_text(encoding="utf-8")) except (OSError, json.JSONDecodeError): continue if not isinstance(data, dict) or data.get("status") != "installed": continue slug = str(data.get("slug") or path.stem).strip() if not slug or not _is_safe_slug(slug): continue rows.append({ "skill": slug, "entity_type": "harness", "source": "ctx-harness-install", "command": data.get("target") or data.get("repo_url") or "", "installed_at": data.get("installed_at", ""), "status": data.get("status", "installed"), }) return rows def _queue_job_summary(job: wiki_queue.QueueJob) -> dict[str, Any]: return { "id": job.id, "kind": job.kind, "status": job.status, "attempts": job.attempts, "max_attempts": job.max_attempts, "worker_id": job.worker_id, "leased_until": job.leased_until, "available_at": job.available_at, "last_error": job.last_error, "created_at": job.created_at, "updated_at": job.updated_at, "source": job.payload.get("source"), "payload_keys": sorted(str(key) for key in job.payload), } def _queue_status() -> dict[str, Any]: """Return durable wiki/graph queue state without creating the DB.""" db_path = wiki_queue.queue_db_path(_wiki_dir()) counts = { wiki_queue.STATUS_PENDING: 0, wiki_queue.STATUS_RUNNING: 0, wiki_queue.STATUS_SUCCEEDED: 0, wiki_queue.STATUS_FAILED: 0, wiki_queue.STATUS_CANCELLED: 0, } if not db_path.exists(): return { "available": False, "db_path": str(db_path), "total": 0, "counts": counts, "recent_jobs": [], } try: raw_counts = wiki_queue.count_jobs_by_status(db_path) recent = wiki_queue.list_recent_jobs(db_path, limit=20) except Exception as exc: # noqa: BLE001 return { "available": False, "db_path": str(db_path), "total": 0, "counts": counts, "recent_jobs": [], "error": str(exc), } for status, count in raw_counts.items(): counts[status] = count return { "available": True, "db_path": str(db_path), "total": sum(raw_counts.values()), "counts": counts, "recent_jobs": [_queue_job_summary(job) for job in recent], } def _file_status(path: Path) -> dict[str, Any]: if not path.exists(): return {"path": str(path), "exists": False, "size": 0, "mtime": None} try: stat = path.stat() except OSError as exc: return { "path": str(path), "exists": False, "size": 0, "mtime": None, "error": str(exc), } return { "path": str(path), "exists": path.is_file(), "size": stat.st_size, "mtime": stat.st_mtime, } def _repo_graph_dir() -> Path: return Path(__file__).resolve().parents[1] / "graph" def _first_existing_file_status(*paths: Path) -> dict[str, Any]: for path in paths: if path.exists(): return _file_status(path) return _file_status(paths[0]) def _promotion_status(path: Path) -> dict[str, Any] | None: try: data = json.loads(path.read_text(encoding="utf-8")) except (OSError, json.JSONDecodeError): return None if not isinstance(data, dict): return None previous = _dict_or_empty(data.get("previous")) candidate = _dict_or_empty(data.get("candidate")) current = _dict_or_empty(data.get("current")) return { "path": str(path), "status": data.get("status"), "target": data.get("target"), "started_at": data.get("started_at"), "promoted_at": data.get("promoted_at"), "previous_sha256": previous.get("sha256"), "previous_size": previous.get("size"), "candidate_sha256": candidate.get("sha256"), "candidate_size": candidate.get("size"), "current_sha256": current.get("sha256"), "current_size": current.get("size"), } def _dict_or_empty(value: Any) -> dict[str, Any]: return value if isinstance(value, dict) else {} def _artifact_status() -> dict[str, Any]: """Return shipped graph/wiki artifact file state and promotion metadata.""" wiki = _wiki_dir() graph_dir = wiki / "graphify-out" claude_graph_dir = _claude_dir() / "graph" repo_graph_dir = _repo_graph_dir() promotion_paths = sorted( { *graph_dir.glob("*.promotion.json"), *wiki.glob("*.promotion.json"), *claude_graph_dir.glob("*.promotion.json"), }, key=lambda path: str(path), ) promotions = [ promotion for promotion in (_promotion_status(path) for path in promotion_paths) if promotion is not None ] return { "graph_json": _file_status(graph_dir / "graph.json"), "graph_delta_json": _file_status(graph_dir / "graph-delta.json"), "communities_json": _file_status(graph_dir / "communities.json"), "wiki_graph_tar": _first_existing_file_status( claude_graph_dir / "wiki-graph.tar.gz", repo_graph_dir / "wiki-graph.tar.gz", ), "skills_sh_catalog": _first_existing_file_status( wiki / "external-catalogs" / "skills-sh" / "catalog.json", claude_graph_dir / "skills-sh-catalog.json.gz", repo_graph_dir / "skills-sh-catalog.json.gz", ), "promotion_count": len(promotions), "promotions": promotions, } def _status_payload() -> dict[str, Any]: return { "queue": _queue_status(), "artifacts": _artifact_status(), } def _read_jsonl(path: Path, limit: int | None = None) -> list[dict]: if not path.exists(): return [] if limit is not None and limit <= 0: return [] out: deque[dict] | list[dict] out = deque(maxlen=limit) if limit is not None else [] with path.open(encoding="utf-8") as f: for line in f: line = line.strip() if not line: continue try: event = json.loads(line) except json.JSONDecodeError: continue if isinstance(event, dict): out.append(event) return list(out) def _runtime_lifecycle_events(limit: int | None = 200) -> list[dict[str, Any]]: events = _read_jsonl(_runtime_lifecycle_path(), limit=limit) return [ event for event in events if event.get("action") in {"validation", "escalation"} ] def _runtime_escalation_key(event: dict[str, Any]) -> str: for field in ("escalation_id", "event_id", "id"): value = event.get(field) if value: return str(value) return "\0".join( str(event.get(field) or "") for field in ("session_id", "trigger", "reason", "severity") ) def _runtime_lifecycle_summary(limit: int = 200) -> dict[str, Any]: events = _runtime_lifecycle_events(limit=None) validations = [ event for event in events if event.get("action") == "validation" ] escalations = [ event for event in events if event.get("action") == "escalation" ] open_by_key: dict[str, dict[str, Any]] = {} for event in escalations: key = _runtime_escalation_key(event) status = str(event.get("status") or "open").lower() if status == "open": open_by_key[key] = event else: open_by_key.pop(key, None) open_escalations = list(open_by_key.values()) validation_failures = [ event for event in validations if str(event.get("status") or "").lower() in {"failed", "error"} ] sessions = sorted({ str(event.get("session_id") or "") for event in events if event.get("session_id") }) return { "path": str(_runtime_lifecycle_path()), "events_total": len(events), "validations_total": len(validations), "validation_failures": len(validation_failures), "escalations_total": len(escalations), "open_escalations_total": len(open_escalations), "latest_validation": validations[-1] if validations else None, "recent_validations": validations[-20:], "open_escalations": open_escalations[-20:], "sessions": sessions, } def _sidecar_entity_type(sidecar: dict, fallback: str = "skill") -> str: raw = str( sidecar.get("entity_type") or sidecar.get("subject_type") or sidecar.get("type") or fallback ) return { "skills": "skill", "skill": "skill", "agents": "agent", "agent": "agent", "mcp": "mcp-server", "mcp-server": "mcp-server", "mcp-servers": "mcp-server", "harness": "harness", "harnesses": "harness", }.get(raw, raw) def _sidecar_fallback_type(path: Path) -> str: return "mcp-server" if path.parent.name == "mcp" else "skill" def _read_sidecar_file(path: Path) -> dict | None: try: sidecar = json.loads(path.read_text(encoding="utf-8")) except (OSError, json.JSONDecodeError): return None if not isinstance(sidecar, dict): return None etype = _sidecar_entity_type(sidecar, _sidecar_fallback_type(path)) sidecar.setdefault("slug", path.stem) sidecar["subject_type"] = etype return sidecar def _load_sidecar(slug: str, entity_type: str | None = None) -> dict | None: if not _is_safe_slug(slug): return None for path in ( _sidecar_dir() / f"{slug}.json", _sidecar_dir() / "mcp" / f"{slug}.json", ): if not path.exists(): continue sidecar = _read_sidecar_file(path) if sidecar is None: continue if entity_type is None or _sidecar_entity_type(sidecar) == entity_type: return sidecar if entity_type is not None: return _sidecar_index().get((slug, entity_type)) return None def _sidecar_files() -> list[Path]: files: list[Path] = [] for root in (_sidecar_dir(), _sidecar_dir() / "mcp"): if not root.is_dir(): continue files.extend( p for p in sorted(root.glob("*.json")) if not p.name.startswith(".") and not p.name.endswith(".lifecycle.json") ) return files def _sidecar_index_cache_key() -> tuple[tuple[Path, float, int], ...]: keys: list[tuple[Path, float, int]] = [] for root in (_sidecar_dir(), _sidecar_dir() / "mcp"): if not root.is_dir(): continue stat = root.stat() keys.append((root.resolve(), stat.st_mtime, stat.st_size)) return tuple(keys) def _sidecar_index() -> dict[tuple[str, str], dict]: global _SIDECAR_INDEX_CACHE_KEY, _SIDECAR_INDEX_CACHE_VALUE cache_key = _sidecar_index_cache_key() if _SIDECAR_INDEX_CACHE_KEY == cache_key and _SIDECAR_INDEX_CACHE_VALUE is not None: return _SIDECAR_INDEX_CACHE_VALUE index: dict[tuple[str, str], dict] = {} for path in _sidecar_files(): sidecar = _read_sidecar_file(path) if sidecar is None: continue slug = str(sidecar.get("slug") or path.stem) entity_type = _sidecar_entity_type(sidecar) index.setdefault((slug, entity_type), sidecar) _SIDECAR_INDEX_CACHE_KEY = cache_key _SIDECAR_INDEX_CACHE_VALUE = index return index def _all_sidecars() -> list[dict]: out: list[dict] = [] for p in _sidecar_files(): sidecar = _read_sidecar_file(p) if sidecar is not None: out.append(sidecar) return out # ─── Aggregations ──────────────────────────────────────────────────────────── def _summarize_sessions() -> list[dict]: """Join audit-log session events with skill-events.jsonl load/unloads.""" audit = _read_jsonl(_audit_log_path()) events = _read_jsonl(_events_jsonl_path()) by_session: dict[str, dict[str, Any]] = defaultdict( lambda: { "session_id": "", "first_seen": None, "last_seen": None, "skills_loaded": set(), "skills_unloaded": set(), "agents_loaded": set(), "agents_unloaded": set(), "mcps_loaded": set(), "mcps_unloaded": set(), "score_updates": 0, "lifecycle_transitions": 0, } ) for line in audit: sid = line.get("session_id") or "unknown" row = by_session[sid] row["session_id"] = sid ts = line.get("ts") if ts and (row["first_seen"] is None or ts < row["first_seen"]): row["first_seen"] = ts if ts and (row["last_seen"] is None or ts > row["last_seen"]): row["last_seen"] = ts event = line.get("event", "") if event == "skill.loaded": row["skills_loaded"].add(line.get("subject", "")) elif event == "skill.unloaded": row["skills_unloaded"].add(line.get("subject", "")) elif event == "agent.loaded": row["agents_loaded"].add(line.get("subject", "")) elif event == "agent.unloaded": row["agents_unloaded"].add(line.get("subject", "")) elif event == "toolbox.triggered": raw_meta = line.get("meta") meta: dict[str, Any] = raw_meta if isinstance(raw_meta, dict) else {} if meta.get("entity_type") == "mcp-server": action = meta.get("action") if action == "loaded": row["mcps_loaded"].add(line.get("subject", "")) elif action == "unloaded": row["mcps_unloaded"].add(line.get("subject", "")) elif event.endswith(".score_updated"): row["score_updates"] += 1 elif event in ("skill.archived", "skill.demoted", "skill.restored", "skill.deleted", "agent.archived", "agent.demoted", "agent.restored", "agent.deleted"): row["lifecycle_transitions"] += 1 for line in events: sid = line.get("session_id") or "unknown" row = by_session[sid] row["session_id"] = sid ts = line.get("timestamp") if ts and (row["first_seen"] is None or ts < row["first_seen"]): row["first_seen"] = ts if ts and (row["last_seen"] is None or ts > row["last_seen"]): row["last_seen"] = ts action = line.get("event") entity_type = ( _audit_entity_type(line) or ("agent" if line.get("agent") else None) or ("mcp-server" if line.get("mcp") or line.get("mcp_server") else None) or ("skill" if line.get("skill") else None) ) if entity_type == "agent": subject = line.get("agent") elif entity_type == "mcp-server": subject = line.get("mcp") or line.get("mcp_server") else: subject = line.get("skill") if action == "load" and subject: if entity_type == "agent": row["agents_loaded"].add(subject) elif entity_type == "mcp-server": row["mcps_loaded"].add(subject) else: row["skills_loaded"].add(subject) elif action == "unload" and subject: if entity_type == "agent": row["agents_unloaded"].add(subject) elif entity_type == "mcp-server": row["mcps_unloaded"].add(subject) else: row["skills_unloaded"].add(subject) summaries: list[dict] = [] for row in by_session.values(): summaries.append({ "session_id": row["session_id"], "first_seen": row["first_seen"], "last_seen": row["last_seen"], "skills_loaded": sorted(row["skills_loaded"]), "skills_unloaded": sorted(row["skills_unloaded"]), "agents_loaded": sorted(row["agents_loaded"]), "agents_unloaded": sorted(row["agents_unloaded"]), "mcps_loaded": sorted(row["mcps_loaded"]), "mcps_unloaded": sorted(row["mcps_unloaded"]), "score_updates": row["score_updates"], "lifecycle_transitions": row["lifecycle_transitions"], }) summaries.sort(key=lambda r: r.get("last_seen") or "", reverse=True) return summaries def _grade_distribution() -> dict[str, int]: dist = {"A": 0, "B": 0, "C": 0, "D": 0, "F": 0} for s in _all_sidecars(): g = s.get("grade") if g in dist: dist[g] += 1 return dist def _session_detail(session_id: str) -> dict: audit = _read_jsonl(_audit_log_path()) events = _read_jsonl(_events_jsonl_path()) session_audit = [r for r in audit if r.get("session_id") == session_id] session_events = [e for e in events if e.get("session_id") == session_id] return { "session_id": session_id, "audit_entries": session_audit, "load_events": session_events, } # ─── HTML rendering ────────────────────────────────────────────────────────── _CSS = """ :root { color-scheme: light dark; --bg: #f5f7fb; --surface: #ffffff; --surface-2: #f8fafc; --surface-3: #eef2f7; --text: #0f172a; --muted-text: #64748b; --border: #d8e1ee; --accent: #2563eb; --accent-strong: #1d4ed8; --accent-soft: #dbeafe; --ok: #059669; --warning: #d97706; --danger: #dc2626; --radius: 8px; --shadow: 0 12px 34px rgba(15, 23, 42, 0.08); } * { box-sizing: border-box; } body { font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", sans-serif; max-width: 1180px; margin: 0 auto; padding: 1.25rem 1rem 2.5rem; line-height: 1.5; background: var(--bg); color: var(--text); } h1 { margin-top: 0; letter-spacing: 0; } h2, h3 { letter-spacing: 0; } a { color: var(--accent); text-decoration: none; } a:hover { text-decoration: underline; } table { border-collapse: collapse; width: 100%; margin: 1rem 0; } th, td { text-align: left; padding: 0.4rem 0.8rem; border-bottom: 1px solid var(--border); font-size: 0.92rem; } th { background: var(--surface-3); font-weight: 600; } tr:hover { background: rgba(0,0,0,0.02); } button, input, select, textarea { font: inherit; color: inherit; border: 1px solid var(--border); border-radius: 6px; background: var(--surface); } button { cursor: pointer; padding: 0.38rem 0.7rem; background: var(--accent); border-color: var(--accent); color: #fff; font-weight: 650; } button:hover { background: var(--accent-strong); border-color: var(--accent-strong); } button.secondary { background: var(--surface); color: var(--text); border-color: var(--border); } button.secondary:hover { background: var(--surface-3); } input, select, textarea { padding: 0.42rem 0.55rem; } input:focus, select:focus, textarea:focus, button:focus { outline: 2px solid var(--accent-soft); outline-offset: 2px; } .pill { display: inline-block; padding: 0.15rem 0.55rem; border-radius: 999px; font-size: 0.8rem; font-weight: 600; background: #e5e7eb; color: #111; } .entity-type-skill { background: #e0e7ff; color: #312e81; } .entity-type-agent { background: #fef3c7; color: #78350f; } .entity-type-mcp-server { background: #fee2e2; color: #7f1d1d; } .entity-type-harness { background: #dcfce7; color: #14532d; } .grade-A { background: #d1fae5; color: #065f46; } .grade-B { background: #dbeafe; color: #1e3a8a; } .grade-C { background: #fef3c7; color: #78350f; } .grade-D { background: #fed7aa; color: #7c2d12; } .grade-F { background: #fee2e2; color: #7f1d1d; } code, pre { background: rgba(15,23,42,0.06); padding: 0 0.3rem; border-radius: 3px; font-family: "SF Mono", Monaco, Consolas, monospace; font-size: 0.85rem; } pre { padding: 0.6rem 0.8rem; overflow-x: auto; } .muted { color: var(--muted-text); font-size: 0.85rem; } .error { color: #991b1b; background: #fee2e2; border: 1px solid #fecaca; border-radius: 6px; padding: 0.5rem 0.65rem; } .nav { display: flex; gap: 0.35rem; margin: 0 0 1.4rem; align-items: center; flex-wrap: wrap; position: sticky; top: 0; z-index: 10; padding: 0.55rem 0.45rem; border: 1px solid var(--border); border-radius: var(--radius); background: rgba(255,255,255,0.92); box-shadow: var(--shadow); backdrop-filter: blur(10px); } .nav a[draggable="true"] { cursor: grab; border-radius: 6px; padding: 0.28rem 0.48rem; font-weight: 620; color: #334155; } .nav a[draggable="true"]:hover { background: var(--surface-3); text-decoration: none; } .nav a.nav-dragging { opacity: 0.45; cursor: grabbing; } .nav a.nav-drag-over { outline: 2px solid #93c5fd; background: rgba(147,197,253,0.18); } .nav-reset { border: 1px solid #d1d5db; border-radius: 4px; background: transparent; color: #6b7280; cursor: pointer; padding: 0.1rem 0.35rem; font-size: 0.78rem; } .card { border: 1px solid var(--border); border-radius: var(--radius); padding: 1rem 1.25rem; margin-bottom: 1rem; background: var(--surface); box-shadow: 0 1px 2px rgba(15,23,42,0.04); } .stat-card { background: var(--surface); border: 1px solid var(--border); border-radius: var(--radius); } .wiki-entity-grid { display: grid; grid-template-columns: minmax(0, 1fr) minmax(280px, 360px); gap: 1rem; align-items: start; } .wiki-body { overflow-wrap: anywhere; } .wiki-body h1, .wiki-body h2, .wiki-body h3 { margin-top: 0.9rem; } .wiki-body h1:first-child, .wiki-body h2:first-child, .wiki-body h3:first-child { margin-top: 0; } .wiki-body ul, .wiki-body ol { padding-left: 1.35rem; } .wiki-body li { margin: 0.2rem 0; } .wiki-body pre { white-space: pre-wrap; } .frontmatter-table { table-layout: fixed; font-size: 0.85rem; } .frontmatter-table td:last-child code { white-space: normal; overflow-wrap: anywhere; } .entity-tabs { display: flex; gap: 0.4rem; margin: 0.8rem 0 1rem; flex-wrap: wrap; } .entity-tab-button { border: 1px solid #d1d5db; border-radius: 6px; background: #fff; color: inherit; cursor: pointer; padding: 0.35rem 0.7rem; } .entity-tab-button.active { background: #111827; color: #fff; border-color: #111827; } .entity-tab-panel[hidden] { display: none; } .docs-shell { display: flex; flex-direction: column; gap: 1rem; } .docs-hero { border: 1px solid var(--border); border-radius: 12px; background: linear-gradient(135deg, #ffffff 0%, #f8fbff 62%, #eef6ff 100%); box-shadow: var(--shadow); padding: 1.25rem; overflow: hidden; } .docs-hero-grid { display: grid; grid-template-columns: minmax(0, 1fr) auto; gap: 1.25rem; align-items: end; } .docs-eyebrow { color: var(--accent); font-size: 0.78rem; font-weight: 750; margin-bottom: 0.35rem; } .docs-hero h1 { margin: 0; font-size: clamp(2rem, 4vw, 3.2rem); line-height: 1.04; } .docs-hero p { max-width: 52rem; margin: 0.65rem 0 0; color: var(--muted-text); font-size: 1rem; } .docs-hero-meta { display: flex; gap: 0.5rem; flex-wrap: wrap; justify-content: flex-end; } .docs-stat { border: 1px solid rgba(37,99,235,0.18); border-radius: 999px; background: rgba(37,99,235,0.08); color: #1e3a8a; padding: 0.28rem 0.65rem; font-size: 0.82rem; font-weight: 700; white-space: nowrap; } .docs-actions { display: flex; align-items: center; gap: 0.65rem; flex-wrap: wrap; margin-top: 1rem; } .docs-search-wrap { flex: 1 1 22rem; max-width: 38rem; } .docs-search-wrap input { width: 100%; padding: 0.62rem 0.75rem; border-radius: var(--radius); border-color: rgba(37,99,235,0.24); background: rgba(255,255,255,0.86); } .docs-public-link { font-weight: 700; white-space: nowrap; } .docs-tabs { display: flex; gap: 0.4rem; flex-wrap: wrap; margin: 0; border: 1px solid var(--border); border-radius: 12px; background: var(--surface); padding: 0.45rem; box-shadow: 0 1px 2px rgba(15,23,42,0.04); } .docs-tab-button { border: 1px solid transparent; border-radius: 999px; background: transparent; color: var(--muted-text); padding: 0.42rem 0.85rem; } .docs-tab-button:hover { background: var(--surface-3); color: var(--text); } .docs-tab-button.active { background: #111827; color: #fff; border-color: #111827; } .docs-tab-panel[hidden] { display: none; } .docs-reader { display: grid; grid-template-columns: minmax(220px, 280px) minmax(0, 1fr); gap: 1rem; align-items: start; } .docs-page-list { position: sticky; top: 4.5rem; display: flex; flex-direction: column; gap: 0.25rem; border: 1px solid var(--border); border-radius: 12px; background: var(--surface); padding: 0.6rem; box-shadow: 0 1px 2px rgba(15,23,42,0.04); } .docs-toc-page { border-bottom: 1px solid var(--border); padding: 0.2rem 0 0.35rem; } .docs-toc-page:last-child { border-bottom: 0; } .docs-page-list a { display: block; padding: 0.38rem 0.55rem; border-radius: 8px; color: var(--muted-text); line-height: 1.25; } .docs-page-list a:hover { background: var(--surface-3); color: var(--text); text-decoration: none; } .docs-page-link { color: var(--text) !important; font-weight: 750; } .docs-heading-list { display: flex; flex-direction: column; gap: 0.05rem; margin: 0.1rem 0 0.2rem; } .docs-heading-link { font-size: 0.82rem; font-weight: 600; } .docs-heading-level-2 { margin-left: 0.45rem; } .docs-heading-level-3 { margin-left: 1.1rem; } .docs-heading-level-4 { margin-left: 1.75rem; font-size: 0.78rem; } .docs-page { border: 1px solid var(--border); border-radius: 12px; background: var(--surface); padding: 1.25rem 1.45rem; margin-bottom: 1rem; box-shadow: 0 1px 2px rgba(15,23,42,0.04); } .docs-page-source { display: flex; justify-content: space-between; gap: 0.75rem; align-items: center; margin-bottom: 0.85rem; color: var(--muted-text); } .docs-page-source code { background: var(--surface-3); color: var(--text); } .docs-page .admonition { border-left: 4px solid var(--accent); background: var(--surface-2); border-radius: var(--radius); padding: 0.75rem 0.9rem; margin: 1rem 0; } .docs-page .admonition-title { font-weight: 750; margin: 0 0 0.45rem; } .docs-page .tabbed-set { border: 1px solid var(--border); border-radius: var(--radius); padding: 0.75rem; margin: 1rem 0; } .docs-page p, .docs-page li { line-height: 1.65; } .docs-page h1 { font-size: 2rem; line-height: 1.12; } .docs-page h2 { font-size: 1.45rem; margin-top: 1.45rem; } .docs-page h3 { font-size: 1.12rem; margin-top: 1.15rem; } .docs-page .grid.cards > ul { list-style: none; padding-left: 0; display: grid; grid-template-columns: repeat(auto-fit, minmax(260px, 1fr)); gap: 0.85rem; } .docs-page .grid.cards > ul > li { border: 1px solid var(--border); border-radius: 12px; background: var(--surface-2); padding: 0.95rem 1rem; box-shadow: 0 1px 2px rgba(15,23,42,0.04); } .docs-page .grid.cards > ul > li:hover { border-color: rgba(37,99,235,0.32); background: #ffffff; } .docs-page .grid.cards hr { border: 0; border-top: 1px solid var(--border); margin: 0.55rem 0; } .docs-page .headerlink { color: var(--muted-text); font-size: 0.78em; margin-left: 0.35rem; } .docs-search-results { display: grid; grid-template-columns: repeat(auto-fit, minmax(220px, 1fr)); gap: 0.45rem; margin-top: 0.75rem; } .docs-search-results[hidden] { display: none; } .docs-search-result { display: flex; flex-direction: column; gap: 0.12rem; text-align: left; background: var(--surface); color: var(--text); border-color: var(--border); border-radius: var(--radius); padding: 0.55rem 0.65rem; } .docs-search-result span { color: var(--muted-text); font-size: 0.78rem; font-weight: 500; } .docs-search-empty { color: var(--muted-text); font-size: 0.86rem; padding: 0.4rem 0.1rem; } .quality-signal-table { table-layout: fixed; } .quality-signal-table td:last-child code { white-space: pre-wrap; overflow-wrap: anywhere; } .wizard-layout { display: grid; grid-template-columns: minmax(0, 1.2fr) minmax(320px, 0.8fr); gap: 1rem; align-items: start; } .wizard-step { border-left: 3px solid var(--accent); padding-left: 0.8rem; margin-bottom: 1rem; } .wizard-grid { display: grid; grid-template-columns: repeat(2, minmax(0, 1fr)); gap: 0.75rem; } .wizard-grid label { display: flex; flex-direction: column; gap: 0.25rem; font-size: 0.9rem; } .wizard-grid .wide { grid-column: 1 / -1; } .setup-header { display: grid; grid-template-columns: minmax(0, 1fr) auto; gap: 1rem; align-items: end; margin-bottom: 1rem; } .setup-kicker { text-transform: uppercase; letter-spacing: 0; font-size: 0.74rem; font-weight: 750; color: var(--muted-text); } .setup-flow { display: grid; grid-template-columns: repeat(4, minmax(0, 1fr)); gap: 0.55rem; margin: 0.8rem 0 1rem; } .setup-flow-step { border: 1px solid var(--border); border-radius: var(--radius); background: var(--surface); padding: 0.7rem; min-height: 4.4rem; } .setup-flow-step strong { display: block; font-size: 0.88rem; margin-bottom: 0.2rem; } .command-box { background: #0f172a; color: #e2e8f0; border-radius: var(--radius); padding: 0.85rem; white-space: pre-wrap; overflow-x: auto; min-height: 8rem; } .harness-card { border: 1px solid var(--border); border-radius: var(--radius); padding: 0.8rem; background: var(--surface); display: flex; flex-direction: column; gap: 0.35rem; } .harness-card[data-fit-hidden="true"] { display: none; } .harness-card.selected { outline: 2px solid var(--accent); outline-offset: 2px; } .harness-card button { align-self: flex-start; } .manage-results { display: flex; flex-direction: column; gap: 0.45rem; max-height: 65vh; overflow-y: auto; } .manage-result { display: grid; grid-template-columns: minmax(0, 1fr) auto; gap: 0.25rem 0.5rem; text-align: left; border: 1px solid var(--border); border-radius: var(--radius); padding: 0.65rem 0.75rem; background: var(--surface); cursor: pointer; } .manage-result:hover { border-color: var(--accent); background: var(--surface-2); } .manage-result .muted { grid-column: 1 / -1; font-size: 0.82rem; } @media (max-width: 860px) { .wiki-entity-grid { grid-template-columns: 1fr; } .wizard-layout, .wizard-grid, .setup-header, .setup-flow { grid-template-columns: 1fr; } .docs-hero-grid { grid-template-columns: 1fr; } .docs-hero-meta { justify-content: flex-start; } .docs-reader { grid-template-columns: 1fr; } .docs-page-list { position: static; } } @media (prefers-color-scheme: dark) { :root { --bg: #0b1120; --surface: #111827; --surface-2: #0f172a; --surface-3: #1f2937; --text: #e5e7eb; --muted-text: #94a3b8; --border: #334155; --accent: #60a5fa; --accent-strong: #93c5fd; --accent-soft: rgba(96,165,250,0.28); --shadow: 0 12px 34px rgba(0,0,0,0.28); } body { background: var(--bg); color: var(--text); } th { background: rgba(255,255,255,0.05); } tr:hover { background: rgba(255,255,255,0.03); } .nav { background: rgba(17,24,39,0.92); } .nav a[draggable="true"] { color: #e5e7eb; } .nav a[draggable="true"]:hover { background: #1f2937; } .card { border-color: var(--border); } .entity-tab-button { background: #0f172a; border-color: #334155; } .entity-tab-button.active { background: #e2e8f0; color: #0f172a; } .docs-hero { background: linear-gradient(135deg, #111827 0%, #0f172a 62%, #0b1120 100%); } .docs-stat { border-color: rgba(96,165,250,0.28); background: rgba(96,165,250,0.12); color: #bfdbfe; } .docs-search-wrap input { background: rgba(15,23,42,0.86); border-color: #334155; } .docs-tab-button.active { background: #e2e8f0; color: #0f172a; } .docs-page .grid.cards > ul > li:hover { background: #111827; } code, pre { background: rgba(255,255,255,0.06); } .error { color: #fecaca; background: rgba(127,29,29,0.25); border-color: rgba(248,113,113,0.35); } } """ def _layout(title: str, body: str) -> str: """Wrap body HTML in the standard page chrome.""" nav_items = ( ("home", "Home", "/"), ("loaded", "Loaded", "/loaded"), ("skills", "Skills", "/skills"), ("wiki", "Wiki", "/wiki"), ("graph", "Graph", "/graph"), ("manage", "Manage", "/manage"), ("harness", "Harness Setup", "/harness"), ("docs", "Docs", "/docs"), ("config", "Config", "/config"), ("status", "Status", "/status"), ("kpi", "KPIs", "/kpi"), ("runtime", "Runtime", "/runtime"), ("sessions", "Sessions", "/sessions"), ("logs", "Logs", "/logs"), ("events", "Live", "/events"), ) nav_html = "".join( f"" f"{html.escape(label)}" for key, label, href in nav_items ) nav_keys_json = json.dumps([key for key, _label, _href in nav_items]) nav_script = ( "" ) return ( "" "" f"{html.escape(title)} — ctx monitor" f"" "" + nav_script + body + "" ) # ─── Graph neighborhood (for /graph) ──────────────────────────────────────── def _graph_slug_from_node_id(node_id: str) -> str: return node_id.split(":", 1)[-1] if ":" in node_id else node_id def _resolve_graph_center( G: Any, slug: str, entity_type: str | None, ) -> tuple[str | None, dict[str, str] | None, list[str]]: """Resolve exact and fuzzy graph focus queries to one graph node id.""" raw_query = str(slug or "").strip() if not raw_query or "/" in raw_query or "\\" in raw_query or ".." in raw_query: return None, None, [] normalized_query = _slugish(raw_query) if not normalized_query or not _is_safe_slug(normalized_query): return None, None, [] entity_types = ( (entity_type,) if entity_type is not None else _DASHBOARD_ENTITY_TYPES ) for current_type in entity_types: for candidate_slug in (raw_query, normalized_query): candidate = f"{current_type}:{candidate_slug}" if candidate in G: return candidate, None, [candidate_slug] matches: list[tuple[tuple[int, int, int], str, str]] = [] query_tokens = set(normalized_query.split("-")) for node_id in G.nodes: node_type = _graph_type_from_node_id(str(node_id)) if node_type not in entity_types: continue data = G.nodes.get(node_id, {}) node_slug = _graph_slug_from_node_id(str(node_id)) label = str(data.get("label") or node_slug) haystacks = {_slugish(node_slug), _slugish(label)} tags = data.get("tags", []) if isinstance(tags, list): haystacks.update(_slugish(str(tag)) for tag in tags[:12]) rank = None if normalized_query in haystacks: rank = 0 elif any(h.startswith(normalized_query) for h in haystacks): rank = 1 elif any(normalized_query in h for h in haystacks): rank = 2 elif query_tokens and all( any(token in h for h in haystacks) for token in query_tokens ): rank = 3 if rank is None: continue try: degree = int(G.degree[node_id]) except Exception: # noqa: BLE001 degree = 0 matches.append(((rank, len(node_slug), -degree), str(node_id), node_slug)) matches.sort(key=lambda item: item[0]) suggestions = [] for _, _node_id, suggestion in matches[:8]: if suggestion not in suggestions: suggestions.append(suggestion) if not matches: return None, None, suggestions center = matches[0][1] resolved_slug = _graph_slug_from_node_id(center) return ( center, {"query": raw_query, "slug": resolved_slug, "id": center}, suggestions, ) def _unit_score(value: Any) -> float | None: try: score = float(value) except (TypeError, ValueError): return None if not math.isfinite(score): return None return max(0.0, min(1.0, score)) def _sidecar_score_inputs(slug: str, entity_type: str) -> tuple[float | None, float | None]: sidecar = _load_sidecar(slug, entity_type=entity_type) if not isinstance(sidecar, dict): return None, None quality = _unit_score(sidecar.get("score", sidecar.get("raw_score"))) usage = None signals = sidecar.get("signals") if isinstance(signals, dict): telemetry = signals.get("telemetry") if isinstance(telemetry, dict): usage = _unit_score(telemetry.get("score")) return quality, usage def _graph_node_size( nid: str, data: dict[str, Any], *, entity_type: str, degree: int, max_degree: int, ) -> dict[str, Any]: """Return bounded visual size metadata for a graph node.""" slug = nid.split(":", 1)[-1] quality = _unit_score(data.get("quality_score")) usage = _unit_score(data.get("usage_score")) if quality is None or usage is None: sidecar_quality, sidecar_usage = _sidecar_score_inputs(slug, entity_type) quality = quality if quality is not None else sidecar_quality usage = usage if usage is not None else sidecar_usage quality_value = 0.35 if quality is None else quality usage_value = 0.0 if usage is None else usage popularity = ( math.log1p(max(0, degree)) / math.log1p(max(1, max_degree)) if max_degree > 0 else 0.0 ) signal = max( 0.0, min(1.0, 0.45 * quality_value + 0.35 * usage_value + 0.20 * popularity), ) return { "node_size": round(8.0 + signal * 16.0, 2), "size_signal": round(signal, 4), "size_reason": ( f"quality {quality_value:.3f}; usage {usage_value:.3f}; " f"popularity {popularity:.3f}" ), } def _graph_neighborhood( slug: str, hops: int = 1, limit: int = 40, entity_type: str | None = None, ) -> dict: """Return dashboard-shaped {nodes, edges} for the N-hop neighborhood. Uses ``resolve_graph.load_graph`` so the NetworkX 'links' vs 'edges' schema is handled centrally. Returns an empty shape if the graph hasn't been built or the slug isn't a node. """ if "/" in slug or "\\" in slug or ".." in slug: return {"nodes": [], "edges": [], "center": None} try: G = _load_dashboard_graph() except Exception: # noqa: BLE001 — graph is advisory; blank on error return {"nodes": [], "edges": [], "center": None} if G.number_of_nodes() == 0: return {"nodes": [], "edges": [], "center": None} center = None normalized_entity_type = _normalize_dashboard_entity_type(entity_type) if entity_type is not None and normalized_entity_type is None: return {"nodes": [], "edges": [], "center": None} center, resolved, suggestions = _resolve_graph_center( G, slug, normalized_entity_type, ) if center is None: return {"nodes": [], "edges": [], "center": None} nodes_out: dict[str, dict] = {} edges_out: list[dict] = [] emitted_edges: set[tuple[str, str]] = set() frontier = [center] seen: set[str] = {center} try: max_degree = max((int(degree) for _node, degree in G.degree()), default=1) except Exception: # noqa: BLE001 max_degree = 1 def _add_node(nid: str, depth: int) -> None: if nid in nodes_out: return data = dict(G.nodes.get(nid, {})) label = data.get("label", nid.split(":", 1)[-1]) tags = list(data.get("tags", [])) default_type = ( "mcp-server" if nid.startswith("mcp-server:") else "harness" if nid.startswith("harness:") else "agent" if nid.startswith("agent:") else "skill" ) ntype = data.get("type") or default_type try: degree = int(G.degree[nid]) except Exception: # noqa: BLE001 degree = 0 size_data = _graph_node_size( nid, data, entity_type=str(ntype), degree=degree, max_degree=max_degree, ) nodes_out[nid] = { "data": { "id": nid, "label": label, "type": ntype, "depth": depth, "degree": degree, "tags": tags[:6], "description": data.get("description", ""), "quality_score": data.get("quality_score"), "usage_score": data.get("usage_score"), "filter_tokens": [nid, label, nid.split(":", 1)[-1], *tags], **size_data, }, } _add_node(center, 0) for depth in range(1, hops + 1): next_frontier: list[str] = [] for nid in frontier: # Sort neighbors by edge weight so we pick the strongest # connections first under the ``limit`` cap. neighbors = sorted( G[nid].items(), key=lambda kv: -kv[1].get("weight", 1), ) for other, edata in neighbors: if len(nodes_out) >= limit: break _add_node(other, depth) edge_key = tuple(sorted((nid, other))) if edge_key not in emitted_edges: emitted_edges.add(edge_key) shared_tags = edata.get("shared_tags", [])[:4] for node_id in (nid, other): tokens = nodes_out[node_id]["data"].setdefault( "filter_tokens", [] ) tokens.extend(shared_tags) edges_out.append({ "data": { "id": f"{edge_key[0]}__{edge_key[1]}", "source": nid, "target": other, "weight": edata.get("weight", 1), "shared_tags": shared_tags, "reasons": edata.get("reasons", []), "semantic": edata.get("semantic"), "tag_sim": edata.get("tag_sim"), "slug_token_sim": edata.get("slug_token_sim"), "source_overlap": edata.get("source_overlap"), }, }) if other not in seen: seen.add(other) next_frontier.append(other) if len(nodes_out) >= limit: break frontier = next_frontier if len(nodes_out) >= limit: break return { "nodes": list(nodes_out.values()), "edges": edges_out, "center": center, "resolved": resolved, "suggestions": suggestions, } def _graph_stats() -> dict: """Top-line graph stats for the home page.""" report = _wiki_dir() / "graphify-out" / "graph-report.md" try: match = _GRAPH_REPORT_RE.search( report.read_text(encoding="utf-8", errors="replace"), ) if match: return { "nodes": int(match.group(1).replace(",", "")), "edges": int(match.group(2).replace(",", "")), "available": True, } except OSError: pass try: G = _load_dashboard_graph() except Exception: # noqa: BLE001 return {"nodes": 0, "edges": 0, "available": False} return { "nodes": G.number_of_nodes(), "edges": G.number_of_edges(), "available": G.number_of_nodes() > 0, } def _wiki_stats() -> dict: """Entity counts across all dashboard-supported entity types. MCPs are sharded by first-char under ``entities/mcp-servers//`` so we recurse rather than the flat glob used for skills + agents. Home page consumes ``total`` for the headline number and the individual counts for the dashboard entity-type detail line. """ base = _wiki_dir() / "entities" skills = len(list((base / "skills").glob("*.md"))) if (base / "skills").is_dir() else 0 agents = len(list((base / "agents").glob("*.md"))) if (base / "agents").is_dir() else 0 mcp_dir = base / "mcp-servers" mcps = len(list(mcp_dir.rglob("*.md"))) if mcp_dir.is_dir() else 0 harnesses = len(list((base / "harnesses").glob("*.md"))) if (base / "harnesses").is_dir() else 0 return { "skills": skills, "agents": agents, "mcps": mcps, "harnesses": harnesses, "total": skills + agents + mcps + harnesses, } def _render_home() -> str: sessions = _summarize_sessions() grades = _grade_distribution() recent = sessions[:10] gstats = _graph_stats() wstats = _wiki_stats() runtime = _runtime_lifecycle_summary() audit_lines = sum(1 for _ in _audit_log_path().open(encoding="utf-8")) \ if _audit_log_path().exists() else 0 manifest = _read_manifest() recent_audit = _read_jsonl(_audit_log_path(), limit=10) rows = [] for s in recent: sid = s["session_id"] rows.append( f"" f"{html.escape(sid[:20])}" f"{html.escape(s['last_seen'] or '—')}" f"{len(s['skills_loaded'])}" f"{len(s['skills_unloaded'])}" f"{len(s['agents_loaded'])}" f"{s['score_updates']}" f"" ) audit_rows = "".join( f"{html.escape((r.get('ts') or '')[-8:])}" f"{html.escape(r.get('event', ''))}" f"{html.escape(r.get('subject',''))}" f"" for r in reversed(recent_audit) ) body = ( "

ctx monitor

" # ── Stat grid ──────────────────────────────────────────────── "
" + f"
Currently loaded
" f"
{len(manifest.get('load', []))}
" f"manage →
" + f"
Sidecars
" f"
{sum(grades.values())}
" f"browse →
" + f"
Wiki entities
" f"
{wstats['total']:,}
" f"" f"{wstats['skills']:,} skills · {wstats['agents']:,} agents · " f"{wstats['mcps']:,} MCPs · {wstats['harnesses']:,} harnesses
" + f"
Knowledge graph
" f"
{gstats['nodes']}
" f"{gstats['edges']:,} edges" f" · explore →
" + f"
Runtime checks
" f"
{runtime['validations_total']}
" f"" f"{runtime['validation_failures']} failed / " f"{runtime['open_escalations_total']} open escalations" f" / view ->
" + f"
Audit events
" f"
{audit_lines}
" f"view → · live →
" + f"
Sessions
" f"
{len(sessions)}
" f"browse →
" + "
" # ── Grade distribution ──────────────────────────────────────── "
Skill quality grades: " + "".join( f"{g}: {n} " for g, n in grades.items() ) + f" · total {sum(grades.values())}" "
" # ── Two-column: recent sessions + recent audit ──────────────── "
" f"
Recent sessions ({len(sessions)} total)" + ("" "" "" + "".join(rows) + "
SessionLast seenLoadUnloadAgentsScores
" if recent else "

No sessions recorded yet. Hooks start logging " "once you run a Claude Code session with ctx installed.

") + "
" "
Latest audit events" + ("" "" + audit_rows + "
TimeEventSubject
" if recent_audit else "

No audit events yet.

") + "
" "
" ) return _layout("Home", body) def _render_sessions_index() -> str: sessions = _summarize_sessions() rows = [] for s in sessions: sid = s["session_id"] rows.append( f"" f"{html.escape(sid[:32])}" f"{html.escape(s['first_seen'] or '—')}" f"{html.escape(s['last_seen'] or '—')}" f"{len(s['skills_loaded'])}" f"{len(s['skills_unloaded'])}" f"{len(s['agents_loaded'])}" f"{len(s['agents_unloaded'])}" f"{len(s['mcps_loaded'])}" f"{len(s['mcps_unloaded'])}" f"{s['lifecycle_transitions']}" f"" ) body = ( "

Sessions

" f"

{len(sessions)} unique sessions observed.

" "" "" "" "" "" + "".join(rows) + "
SessionFirst seenLast seenSkills↑Skills↓Agents↑Agents↓MCPs↑MCPs↓Lifecycle
" ) return _layout("Sessions", body) def _render_session_detail(session_id: str) -> str: detail = _session_detail(session_id) audit = detail["audit_entries"] events = detail["load_events"] audit_rows = "".join( f"{html.escape(r.get('ts', ''))}" f"{html.escape(r.get('event', ''))}" f"{html.escape(r.get('subject', ''))}" f"{html.escape(json.dumps(r.get('meta', {}))[:80])}" for r in audit ) event_rows = "".join( f"{html.escape(r.get('timestamp', ''))}" f"{html.escape(r.get('event', ''))}" f"{html.escape(r.get('skill') or r.get('agent') or '')}" for r in events ) body = ( f"

Session {html.escape(session_id)}

" f"
{len(audit)} audit entries · " f"{len(events)} load/unload events
" "

Audit timeline

" "" + audit_rows + "
tseventsubjectmeta
" "

Load/unload events

" "" + event_rows + "
tseventsubject
" ) return _layout(f"Session {session_id}", body) def _render_skills() -> str: sidecars = _all_sidecars() sidecars.sort(key=lambda s: (s.get("grade", "F"), -s.get("raw_score", 0.0))) # Sidebar stats for the filter UI. grade_counts = {"A": 0, "B": 0, "C": 0, "D": 0, "F": 0} type_counts = {entity_type: 0 for entity_type in _DASHBOARD_ENTITY_TYPES} for sc in sidecars: grade_counts[sc.get("grade", "F")] = grade_counts.get(sc.get("grade", "F"), 0) + 1 st = _sidecar_entity_type(sc) type_counts[st] = type_counts.get(st, 0) + 1 cards = "".join( f"
" f"
" f"{html.escape(s.get('slug', ''))}" f"{html.escape(s.get('grade', 'F'))}" f"
" f"
" f"score {s.get('raw_score', 0.0):.3f} · {html.escape(s.get('subject_type', 'skill'))}" f"{' · ' + html.escape(s.get('hard_floor','')) if s.get('hard_floor') else ''}" f"
" f"
" f"sidecar" f"wiki" f"graph" f"
" f"
" for s in sidecars ) grade_checkboxes = "".join( f"" for g in ("A", "B", "C", "D", "F") ) type_checkboxes = "".join( f"" for t in _DASHBOARD_ENTITY_TYPES ) body = ( "

Quality sidecars

" f"

{len(sidecars)} sidecars · click any card to drill in.

" "
" # ── Left filter sidebar ────────────────────────────────────── "" # ── Card grid ──────────────────────────────────────────────── "
" + cards + "
" "
" "" ) return _layout("Skills", body) def _render_skill_detail(slug: str, entity_type: str | None = None) -> str: sidecar = _load_sidecar(slug, entity_type=entity_type) if sidecar is None: return _layout(slug, f"

{html.escape(slug)}

No sidecar.

") requested_type = ( _normalize_dashboard_entity_type(entity_type) or _sidecar_entity_type(sidecar) ) audit = [r for r in _read_jsonl(_audit_log_path()) if r.get("subject") == slug and _audit_entity_type(r) == requested_type] audit_rows = "".join( f"{html.escape(r.get('ts', ''))}" f"{html.escape(r.get('event', ''))}" f"{html.escape(r.get('actor', ''))}" for r in audit[-100:] ) hard_floor = sidecar.get("hard_floor") hard_floor_html = ( f" · floor {html.escape(str(hard_floor))}" if hard_floor else "" ) body = ( f"

{html.escape(slug)}

" f"
" f"grade {html.escape(sidecar.get('grade', 'F'))} " f"score {sidecar.get('raw_score', 0.0):.3f} " f"· type {html.escape(sidecar.get('subject_type', ''))}" f"{hard_floor_html}" "
" "

Sidecar

" f"
{html.escape(json.dumps(sidecar, indent=2)[:4000])}
" f"

Audit timeline ({len(audit)} entries)

" "" + audit_rows + "
tseventactor
" ) return _layout(slug, body) def _top_degree_seeds(limit: int = 18, *, allow_load: bool = True) -> list[dict]: """Pick high-degree nodes from the graph as seed suggestions. Used by ``/graph`` landing page so the first-time visitor has something to click. Falls back to empty on any graph-load failure. """ try: G = _load_dashboard_graph() if allow_load else _GRAPH_CACHE_VALUE except Exception: # noqa: BLE001 return [] if G is None: return [] if G.number_of_nodes() == 0: return [] ranked = sorted(G.degree, key=lambda kv: -kv[1])[:limit] out: list[dict] = [] for node_id, degree in ranked: prefix, _, slug = node_id.partition(":") seed_type = ( "mcp-server" if prefix == "mcp-server" else "harness" if prefix == "harness" else "agent" if prefix == "agent" else "skill" ) out.append({ "slug": slug, "type": seed_type, "degree": int(degree), "label": G.nodes[node_id].get("label", slug), }) return out def _read_default_config_raw() -> dict[str, Any]: try: from ctx_config import _read_default_config # type: ignore raw = _read_default_config() return raw if isinstance(raw, dict) else {} except Exception: # noqa: BLE001 path = Path(__file__).with_name("config.json") if not path.exists(): return {} try: raw = json.loads(path.read_text(encoding="utf-8")) return raw if isinstance(raw, dict) else {} except Exception: # noqa: BLE001 return {} def _read_user_config_raw() -> dict[str, Any]: path = _user_config_path() if not path.exists(): return {} try: raw = json.loads(path.read_text(encoding="utf-8")) return raw if isinstance(raw, dict) else {} except Exception: # noqa: BLE001 return {} def _deep_merge_config(base: dict[str, Any], override: dict[str, Any]) -> None: for key, value in override.items(): if isinstance(base.get(key), dict) and isinstance(value, dict): _deep_merge_config(base[key], value) else: base[key] = value def _config_value(raw: dict[str, Any], path: str, default: Any = None) -> Any: current: Any = raw for part in path.split("."): if not isinstance(current, dict) or part not in current: return default current = current[part] return current def _set_config_value(raw: dict[str, Any], path: str, value: Any) -> None: current = raw parts = path.split(".") for part in parts[:-1]: child = current.get(part) if not isinstance(child, dict): child = {} current[part] = child current = child current[parts[-1]] = value def _delete_config_value(raw: dict[str, Any], path: str) -> None: current = raw parts = path.split(".") parents: list[tuple[dict[str, Any], str]] = [] for part in parts[:-1]: child = current.get(part) if not isinstance(child, dict): return parents.append((current, part)) current = child current.pop(parts[-1], None) for parent, key in reversed(parents): child = parent.get(key) if isinstance(child, dict) and not child: parent.pop(key, None) def _config_field_specs() -> tuple[dict[str, Any], ...]: return ( {"group": "Knowledge", "path": "knowledge.mode", "type": "choice", "choices": ("shipped", "local", "enriched"), "required": True, "label": "Knowledge source mode", "help": "shipped uses ctx's packaged graph/wiki, local stays private, enriched starts from shipped knowledge and adds your own.", "example": "enriched"}, {"group": "Recommendation", "path": "resolver.recommendation_top_k", "type": "int", "min": 1, "max": 5, "required": True, "label": "Max mixed recommendations", "help": "Hard cap for the combined skills/agents/MCP recommendation bundle.", "example": 5}, {"group": "Recommendation", "path": "resolver.recommendation_min_normalized_score", "type": "float", "min": 0.0, "max": 1.0, "step": 0.01, "required": True, "label": "Minimum recommendation score", "help": "Drops weak skill/agent/MCP matches instead of recommending at all cost.", "example": 0.30}, {"group": "Recommendation", "path": "resolver.max_skills", "type": "int", "min": 1, "max": 50, "label": "Resolver hard skill ceiling", "help": "Maximum load candidates considered by a resolver call.", "example": 15}, {"group": "Harness", "path": "harness.recommendation_min_fit_score", "type": "float", "min": 0.0, "max": 1.0, "step": 0.01, "required": True, "label": "Minimum harness fit score", "help": "Custom/API/local model users only see harnesses at or above this fit floor.", "example": 0.85}, {"group": "Harness", "path": "harness.recommendation_min_normalized_score", "type": "float", "min": 0.0, "max": 1.0, "step": 0.01, "label": "Harness normalized score floor", "help": "Compatibility display floor for older configs.", "example": 0.85}, {"group": "Micro-skills", "path": "skill_transformer.line_threshold", "type": "int", "min": 1, "max": 2000, "required": True, "label": "Micro-skill line threshold", "help": "Any SKILL.md above this many lines triggers the micro-skills conversion gate.", "example": 180}, {"group": "Micro-skills", "path": "skill_transformer.max_stage_lines", "type": "int", "min": 1, "max": 300, "label": "Max staged reference lines", "help": "Target maximum lines for each generated reference stage.", "example": 40}, {"group": "Micro-skills", "path": "skill_transformer.stage_count", "type": "int", "min": 1, "max": 20, "label": "Stage count", "help": "Target number of staged references for long skills.", "example": 5}, {"group": "Graph", "path": "graph.min_edge_weight", "type": "float", "min": 0.0, "max": 1.0, "step": 0.01, "required": True, "label": "Minimum final edge weight", "help": "Edges below this blended score are dropped from graph.json during rebuild.", "example": 0.03}, {"group": "Graph", "path": "graph.edge_weights.semantic", "type": "float", "min": 0.0, "max": 1.0, "step": 0.01, "required": True, "label": "Semantic edge weight", "help": "Semantic portion of the blended edge score. Semantic/tags/slug tokens should sum to 1.", "example": 0.70}, {"group": "Graph", "path": "graph.edge_weights.tags", "type": "float", "min": 0.0, "max": 1.0, "step": 0.01, "required": True, "label": "Tag edge weight", "help": "Tag-overlap portion of the blended edge score.", "example": 0.15}, {"group": "Graph", "path": "graph.edge_weights.slug_tokens", "type": "float", "min": 0.0, "max": 1.0, "step": 0.01, "required": True, "label": "Slug-token edge weight", "help": "Slug-token overlap portion of the blended edge score.", "example": 0.15}, {"group": "Graph", "path": "graph.semantic.top_k", "type": "int", "min": 1, "max": 200, "label": "Semantic neighbors per entity", "help": "Maximum nearest semantic neighbors retained per entity during graph build.", "example": 20}, {"group": "Graph", "path": "graph.semantic.build_floor", "type": "float", "min": 0.0, "max": 1.0, "step": 0.01, "label": "Semantic build floor", "help": "Low inclusion bar used when graph embeddings are rebuilt.", "example": 0.50}, {"group": "Graph", "path": "graph.semantic.min_cosine", "type": "float", "min": 0.0, "max": 1.0, "step": 0.01, "required": True, "label": "Semantic display floor", "help": "Read-time semantic filter. Raising this is stricter without forcing a rebuild.", "example": 0.80}, {"group": "Graph", "path": "graph.tag_edges.dense_tag_threshold", "type": "int", "min": 1, "max": 10000, "label": "Dense tag cutoff", "help": "Tags shared by more than this many entities do not create broad noisy cliques.", "example": 500}, {"group": "Graph", "path": "graph.token_edges.dense_token_threshold", "type": "int", "min": 1, "max": 10000, "label": "Dense slug-token cutoff", "help": "Slug words shared by too many entities are ignored as edge creators.", "example": 30}, {"group": "Intake", "path": "intake.enabled", "type": "bool", "required": True, "label": "Intake quality gate", "help": "Runs duplicate/near-duplicate and body-quality checks when entities are added or updated.", "example": True}, {"group": "Intake", "path": "intake.dup_threshold", "type": "float", "min": 0.0, "max": 1.0, "step": 0.01, "label": "Duplicate threshold", "help": "Similarity at or above this is treated as a duplicate.", "example": 0.93}, {"group": "Intake", "path": "intake.near_dup_threshold", "type": "float", "min": 0.0, "max": 1.0, "step": 0.01, "label": "Near-duplicate threshold", "help": "Similarity at or above this asks the user to update/merge instead of blindly adding.", "example": 0.80}, {"group": "Paths", "path": "paths.wiki_dir", "type": "str", "required": True, "label": "Wiki directory", "help": "Runtime llm-wiki directory used by dashboard, graph, and recommendation flows.", "example": "~/.claude/skill-wiki"}, {"group": "Paths", "path": "paths.skills_dir", "type": "str", "required": True, "label": "Skills directory", "help": "Installed local skills directory.", "example": "~/.claude/skills"}, {"group": "Paths", "path": "paths.agents_dir", "type": "str", "required": True, "label": "Agents directory", "help": "Installed local agents directory.", "example": "~/.claude/agents"}, ) _CONFIG_REMOVE = object() def _coerce_config_value(spec: dict[str, Any], raw_value: Any) -> Any: if raw_value is None or (isinstance(raw_value, str) and raw_value.strip() == ""): return _CONFIG_REMOVE kind = spec.get("type", "str") if kind == "bool": if isinstance(raw_value, bool): return raw_value text = str(raw_value).strip().lower() if text in {"true", "1", "yes", "on"}: return True if text in {"false", "0", "no", "off"}: return False raise ValueError(f"{spec['path']} must be true or false") if kind == "int": if isinstance(raw_value, bool): raise ValueError(f"{spec['path']} must be an integer") value: int | float = 0 value = int(raw_value) elif kind == "float": if isinstance(raw_value, bool): raise ValueError(f"{spec['path']} must be a number") value = float(raw_value) elif kind == "choice": choice_value = str(raw_value).strip() if choice_value not in spec.get("choices", ()): raise ValueError(f"{spec['path']} must be one of {spec.get('choices')}") return choice_value else: text_value = str(raw_value).strip() return text_value if text_value else _CONFIG_REMOVE if "min" in spec and value < spec["min"]: raise ValueError(f"{spec['path']} must be >= {spec['min']}") if "max" in spec and value > spec["max"]: raise ValueError(f"{spec['path']} must be <= {spec['max']}") return value def _effective_config_payload() -> dict[str, Any]: defaults = _read_default_config_raw() user = _read_user_config_raw() effective = json.loads(json.dumps(defaults)) _deep_merge_config(effective, user) return { "defaults": defaults, "user": user, "effective": effective, "path": str(_user_config_path()), } def _save_config_updates(updates: dict[str, Any]) -> dict[str, Any]: specs = {spec["path"]: spec for spec in _config_field_specs()} unknown = sorted(set(updates) - set(specs)) if unknown: return {"ok": False, "detail": f"unknown config keys: {', '.join(unknown)}"} user_config = _read_user_config_raw() try: for path, raw_value in updates.items(): value = _coerce_config_value(specs[path], raw_value) if value is _CONFIG_REMOVE: _delete_config_value(user_config, path) else: _set_config_value(user_config, path, value) except (TypeError, ValueError) as exc: return {"ok": False, "detail": str(exc)} config_path = _user_config_path() with file_lock(config_path): _atomic_write_text( config_path, json.dumps(user_config, indent=2, sort_keys=True) + "\n", ) return {"ok": True, "detail": f"saved {len(updates)} config keys"} def _render_config() -> str: payload = _effective_config_payload() effective = payload["effective"] user = payload["user"] rows_by_group: dict[str, list[str]] = defaultdict(list) for spec in _config_field_specs(): path = spec["path"] value = _config_value(effective, path, "") default = _config_value(payload["defaults"], path, "") user_value = _config_value(user, path, _CONFIG_REMOVE) is_override = user_value is not _CONFIG_REMOVE required = bool(spec.get("required")) req_html = " Required" if required else "" help_text = html.escape(str(spec.get("help", ""))) default_html = html.escape(json.dumps(default) if not isinstance(default, str) else default) example_value = spec.get("example") example_html = html.escape(json.dumps(example_value) if not isinstance(example_value, str) else str(example_value)) common_attrs = ( f"name='{html.escape(path)}' data-config-path='{html.escape(path)}' " f"data-original-value='{html.escape(str(value))}' " f"data-default='{default_html}' {'required' if required else ''}" ) if spec.get("type") == "choice": options = "".join( f"" for choice in spec.get("choices", ()) ) control = f"" elif spec.get("type") == "bool": control = ( f"" ) elif spec.get("type") in {"int", "float"}: step = spec.get("step", 1 if spec.get("type") == "int" else 0.01) control = ( f"" ) else: control = ( f"" ) override_html = ( "override" if is_override else "default" ) clear_html = ( f"" if is_override else "" ) rows_by_group[str(spec["group"])].append( "
" f"" f"
{control}
" f"{clear_html}" f"

{help_text}
" f"Default: {default_html} · Example: {example_html} · " f"{override_html}

" "
" ) group_html = "".join( "
" f"

{html.escape(group)}

" + "".join(rows) + "
" for group, rows in rows_by_group.items() ) token = _MONITOR_TOKEN or "" body = ( "

Config

" "

Edit ctx runtime defaults from the dashboard. Saves only changed fields. For existing overrides, use remove user override to fall back to the shipped default. Important fields are marked Required.

" f"

User config: {html.escape(payload['path'])}

" "
" + group_html + "
" " " " " "" "
" "" ) return _layout("Config", body) def _render_graph(focus: str | None = None, focus_type: str | None = None) -> str: """Interactive graph view backed by a dependency-free SVG renderer.""" focus_slug = focus or "" focus_js = json.dumps(focus_slug) focus_type_js = json.dumps(focus_type or "") gstats = _graph_stats() seeds = ( _top_degree_seeds(allow_load=False) if not focus_slug and gstats.get("available") else [] ) seed_html = "" if seeds: chips = "".join( f"" f"{html.escape(s['slug'])} " f"· deg {s['degree']}" f"" for s in seeds ) seed_html = ( "
Popular seed slugs " "" "(click to explore 1-hop neighborhood)" f"
{chips}
" ) stats_html = ( f"{gstats.get('nodes', 0):,} nodes · " f"{gstats.get('edges', 0):,} edges" ) body = ( "

Knowledge graph

" f"

Enter an entity slug to explore its 1-hop " f"neighborhood. Edges blend semantic + tag + slug-token " f"signals (weight = final_weight). {stats_html}

" + seed_html # Two-column layout — filter sidebar on the left (mirrors /wiki), # graph list on the right. Client-side JS hides nodes by # type + tag without hitting the server so a user can carve out # a subgraph without rebuilding anything. + "
" # Left sidebar "" # Right: graph list panel "
" "
" "" ) return _layout("Graph", body) def _render_wiki_entity(slug: str, entity_type: str | None = None) -> str: """Render one wiki entity page (frontmatter + body).""" path = _wiki_entity_path(slug, entity_type=entity_type) if path is None: return _layout( slug, f"

{html.escape(slug)}

" f"

No wiki page found for {html.escape(slug)}. " f"Try the skills index.

", ) try: raw = path.read_text(encoding="utf-8", errors="replace") except OSError as exc: return _layout( slug, f"

{html.escape(slug)}

read error: {html.escape(str(exc))}

", ) meta, md_body = _parse_frontmatter(raw) sidecar = _load_sidecar(slug, entity_type=entity_type) type_suffix = ( f"&type={html.escape(entity_type)}" if entity_type in _DASHBOARD_ENTITY_TYPES else "" ) fm_row_parts = [] for k, v in sorted(meta.items()): value, truncated = _truncate_text(_frontmatter_text(v), 120) marker = " (truncated)" if truncated else "" fm_row_parts.append( f"{html.escape(k)}" f"{html.escape(value)}{marker}" ) fm_rows = "".join(fm_row_parts) quality_summary_html = "" if sidecar is not None: quality_summary_html = ( "
" f"Quality " f"{html.escape(sidecar.get('grade', 'F'))} " f"score {sidecar.get('raw_score', 0.0):.3f}" f"{' · floor ' + html.escape(sidecar.get('hard_floor','')) if sidecar.get('hard_floor') else ''}" f"
" ) md_body_without_quality, embedded_quality_markdown = _extract_embedded_quality_block(md_body) display_body = _strip_duplicate_wiki_heading(md_body_without_quality, slug) body_preview, body_truncated = _truncate_text(display_body, 12000) body_html = _render_wiki_markdown(body_preview) body_truncated_html = ( "

Body preview truncated at 12,000 characters.

" if body_truncated else "" ) overview_html = ( "
" f"
{body_html}" f"{body_truncated_html}
" f"
Frontmatter" "" "" + (fm_rows or "") + "
FieldValue
none
" "
" ) subgraph_html = _render_entity_subgraph(slug, entity_type=entity_type) quality_html = _render_quality_drilldown(sidecar, embedded_quality_markdown) tab_script = """ """ body = ( f"

{html.escape(slug)}

" + quality_summary_html + "
" "" "" "" "
" f"
{overview_html}
" f"" f"" + tab_script ) return _layout(slug, body) def _wiki_index_entries( limit_per_type: int | None = _WIKI_INDEX_LIMIT_PER_TYPE, ) -> list[dict]: """List every wiki entity page under ~/.claude/skill-wiki/entities/. Returns ``{slug, type, tags, description}`` rows. The full Skills.sh corpus is too large to render as one HTML page, so the dashboard samples a bounded number of pages per entity type. """ base = _wiki_dir() / "entities" if not base.is_dir(): return [] # MCPs are sharded (one dir per first-char) so we glob recursively; # all other dashboard entity types are flat. sources = _DASHBOARD_ENTITY_SOURCES out: list[dict] = [] for sub, entity_type, recursive in sources: d = base / sub if not d.is_dir(): continue paths = sorted( d.rglob("*.md") if recursive else d.glob("*.md"), key=lambda path: (path.stem.lower(), path.relative_to(d).as_posix().lower()), ) seen_for_type = 0 for path in paths: if limit_per_type is not None and seen_for_type >= limit_per_type: break slug = path.stem if not _is_safe_slug(slug): continue try: # Read only the first ~2 KB — enough for frontmatter. head = path.read_text(encoding="utf-8", errors="replace")[:2048] except OSError: continue meta, _ = _parse_frontmatter(head) all_tags = _frontmatter_tags(meta.get("tags", ""), limit=None) description, _truncated = _truncate_text( _frontmatter_text(meta.get("description", "")), 200, ) out.append({ "slug": slug, "type": entity_type, "tags": all_tags[:6], "search_tags": all_tags, "description": description, }) seen_for_type += 1 return out def _render_wiki_index() -> str: """Card grid of every wiki entity — search + type filter + sidecar grades.""" entries = _wiki_index_entries() wstats = _wiki_stats() total_available = int(wstats.get("total") or len(entries)) # Join with grade pills where a sidecar exists. grade_by_key: dict[tuple[str, str], str] = {} for sc in _all_sidecars(): slug = sc.get("slug") if slug: grade_by_key[(str(slug), _sidecar_entity_type(sc))] = sc.get("grade", "") type_counts = { "skill": int(wstats.get("skills") or 0), "agent": int(wstats.get("agents") or 0), "mcp-server": int(wstats.get("mcps") or 0), "harness": int(wstats.get("harnesses") or 0), } cards = "".join( "" "
" f"{html.escape(e['slug'])}" + (f"" f"{html.escape(grade_by_key[(e['slug'], e['type'])])}" if grade_by_key.get((e['slug'], e['type'])) else f"{html.escape(e['type'])}") + "
" f"
" f"{html.escape(e['description'] or '(no description)')}" "
" + (f"
" f"{' · '.join(html.escape(t) for t in e['tags'][:5])}
" if e["tags"] else "") + "
" for e in entries ) type_checkboxes = "".join( f"" for t in _DASHBOARD_ENTITY_TYPES ) body = ( "

Wiki

" f"

{len(entries):,} shown of {total_available:,} entity pages under " f"~/.claude/skill-wiki/entities/ · " "search by slug / description / tag within the visible sample, " "or click a card to read the page.

" "
" # Left sidebar "" # Card grid "
" + (cards or "

No wiki entities found. " "Extract graph/wiki-graph.tar.gz into " "~/.claude/skill-wiki/ to populate.

") + "
" "
" "" ) return _layout("Wiki", body) def _docs_roots() -> list[Path]: roots: list[Path] = [] for root in (Path.cwd(), Path(__file__).resolve().parent.parent): if root not in roots and (root / "docs").is_dir(): roots.append(root) return roots def _doc_title(text: str, fallback: str) -> str: for line in text.splitlines(): match = re.match(r"^#\s+(.+?)\s*$", line) if match: return match.group(1).strip() return fallback def _doc_summary(text: str) -> str: in_frontmatter = text.startswith("---\n") for block in re.split(r"\n\s*\n", text): chunk = block.strip() if not chunk: continue if in_frontmatter: if chunk == "---" or chunk.endswith("\n---"): in_frontmatter = False continue if chunk.startswith("#") or chunk.startswith("```") or chunk.startswith("