"""Nursing Knowledge Base — LM Wiki for Nursing Education.
Inspired by Karpathy's LLM Wiki pattern, adapted for the Nursing Citizen Development Organisation.
Students and educators add raw sources; Claude builds and maintains a structured nursing wiki.
"""
import streamlit as st
import anthropic
import json
import zipfile
import io
import datetime
import re
import sys
import os
try:
from pypdf import PdfReader
_PDF_AVAILABLE = True
except ImportError:
_PDF_AVAILABLE = False
import requests as _requests
sys.path.insert(0, os.path.dirname(__file__))
from wiki.starter import get_starter_wiki
from core.compiler import compile_source, rebuild_index
from core.qa import answer_question, file_answer_to_wiki
from core.linter import lint_wiki, generate_missing_article
# ─── Page config ───────────────────────────────────────────────────────────────
st.set_page_config(
page_title="Nursing Knowledge Base | CQAI",
page_icon="📖",
layout="wide",
initial_sidebar_state="expanded",
)
# ─── Styles ────────────────────────────────────────────────────────────────────
st.markdown("""
""", unsafe_allow_html=True)
# ─── Session state ──────────────────────────────────────────────────────────────
if "wiki" not in st.session_state:
st.session_state.wiki = get_starter_wiki()
if "selected_article" not in st.session_state:
st.session_state.selected_article = None
if "qa_history" not in st.session_state:
st.session_state.qa_history = []
if "lint_report" not in st.session_state:
st.session_state.lint_report = None
if "compile_status" not in st.session_state:
st.session_state.compile_status = ""
wiki = st.session_state.wiki
def get_client() -> anthropic.Anthropic | None:
key = st.session_state.get("api_key", "").strip()
if not key:
return None
return anthropic.Anthropic(api_key=key)
def log(entry: str):
today = datetime.date.today().isoformat()
wiki["log"].append(f"## [{today}] {entry}")
def add_or_update_article(article: dict):
slug = article["slug"]
wiki["articles"][slug] = {
"title": article["title"],
"category": article["category"],
"tags": article.get("tags", []),
"last_updated": article.get("last_updated", datetime.date.today().isoformat()),
"sources": article.get("sources", []),
"content": article["content"],
}
wiki["metadata"]["article_count"] = len(wiki["articles"])
def fetch_pdf_from_url(url: str, timeout: int = 60) -> bytes:
"""Fetch a PDF from a URL server-side (bypasses HF proxy upload limits)."""
headers = {"User-Agent": "NursingKnowledgeBase/1.0 (nursing education tool)"}
resp = _requests.get(url, headers=headers, timeout=timeout, stream=True)
resp.raise_for_status()
return resp.content
def extract_pdf_text(file_bytes: bytes) -> tuple[str, int]:
"""Extract all text from a PDF. Returns (text, page_count)."""
reader = PdfReader(io.BytesIO(file_bytes))
pages = []
for i, page in enumerate(reader.pages):
text = page.extract_text() or ""
if text.strip():
pages.append(f"--- Page {i + 1} ---\n{text}")
return "\n\n".join(pages), len(reader.pages)
def export_wiki_zip() -> bytes:
buf = io.BytesIO()
with zipfile.ZipFile(buf, "w", zipfile.ZIP_DEFLATED) as zf:
# Index
zf.writestr("wiki/index.md", wiki.get("index_summary", ""))
# Log
zf.writestr("wiki/log.md", "\n\n".join(wiki.get("log", [])))
# Articles
for slug, art in wiki["articles"].items():
category = art.get("category", "general")
zf.writestr(f"wiki/{category}/{slug}.md", art["content"])
# Raw sources
for src_id, src in wiki.get("sources", {}).items():
zf.writestr(f"raw/{src_id}.md", f"# {src['title']}\n\n{src['content']}")
# Full JSON backup
zf.writestr("wiki_backup.json", json.dumps(wiki, indent=2))
buf.seek(0)
return buf.getvalue()
def category_color(cat: str) -> str:
colors = {
"standards": "#003087", "clinical": "#007F3B", "pharmacology": "#8B0000",
"evidence": "#6B21A8", "frameworks": "#0369A1", "safety": "#C05621",
"law": "#1F2937", "mental_health": "#065F46", "research": "#4338CA",
"ethics": "#92400E",
}
return colors.get(cat, "#005EB8")
# ─── Sidebar ────────────────────────────────────────────────────────────────────
with st.sidebar:
st.markdown('
📖 Nursing Wiki
', unsafe_allow_html=True)
st.caption("Nursing Citizen Development Organisation")
st.divider()
# API Key
with st.expander("🔑 Claude API Key (BYOK)", expanded=not st.session_state.get("api_key")):
api_key = st.text_input(
"Anthropic API Key",
type="password",
value=st.session_state.get("api_key", ""),
help="Required for Compile, Q&A, and Lint features. Never stored — session only.",
key="api_key_input",
)
if api_key != st.session_state.get("api_key", ""):
st.session_state.api_key = api_key
st.rerun()
if st.session_state.get("api_key"):
st.success("API key set")
else:
st.info("Enter key to enable AI features")
st.divider()
# Wiki stats
st.markdown("**Wiki Statistics**")
articles = wiki["articles"]
sources = wiki.get("sources", {})
categories = {}
for art in articles.values():
cat = art.get("category", "other")
categories[cat] = categories.get(cat, 0) + 1
col1, col2 = st.columns(2)
with col1:
st.markdown(f'', unsafe_allow_html=True)
with col2:
st.markdown(f'', unsafe_allow_html=True)
st.markdown("**Categories**")
for cat, count in sorted(categories.items(), key=lambda x: -x[1]):
color = category_color(cat)
st.markdown(f'● {cat.replace("_", " ").title()}: **{count}**', unsafe_allow_html=True)
st.divider()
# Import/Export
st.markdown("**Import / Export**")
uploaded = st.file_uploader("Import wiki (JSON)", type="json", key="wiki_import")
if uploaded:
try:
imported = json.load(uploaded)
if "articles" in imported:
st.session_state.wiki = imported
st.success(f"Imported {len(imported['articles'])} articles")
st.rerun()
except Exception as e:
st.error(f"Import failed: {e}")
zip_bytes = export_wiki_zip()
st.download_button(
"📥 Export Wiki (ZIP)",
data=zip_bytes,
file_name=f"nursing_wiki_{datetime.date.today()}.zip",
mime="application/zip",
use_container_width=True,
)
json_bytes = json.dumps(wiki, indent=2).encode()
st.download_button(
"💾 Save Wiki (JSON)",
data=json_bytes,
file_name=f"nursing_wiki_{datetime.date.today()}.json",
mime="application/json",
use_container_width=True,
)
# ─── Main tabs ──────────────────────────────────────────────────────────────────
tab_browse, tab_sources, tab_compile, tab_qa, tab_lint, tab_log = st.tabs([
"📚 Browse Wiki",
"➕ Add Sources",
"🔨 Compile",
"💬 Ask",
"🔍 Health Check",
"📋 Log",
])
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
# TAB 1: BROWSE
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
with tab_browse:
st.markdown('📖 This wiki supports nursing education and CPD. It does not replace clinical judgment, current NMC guidance, or local trust policies.
', unsafe_allow_html=True)
col_list, col_reader = st.columns([1, 2])
with col_list:
st.markdown("### Articles")
# Search
search = st.text_input("🔍 Search", placeholder="e.g. ABCDE, medications, safeguarding")
# Category filter
all_cats = sorted(set(a.get("category", "other") for a in wiki["articles"].values()))
cat_filter = st.selectbox("Category", ["All"] + [c.replace("_", " ").title() for c in all_cats])
# Filter articles
filtered = {}
for slug, art in wiki["articles"].items():
cat_match = cat_filter == "All" or art.get("category", "").replace("_", " ").title() == cat_filter
if search:
text = (art["title"] + " " + " ".join(art.get("tags", []))).lower()
search_match = all(term in text for term in search.lower().split())
else:
search_match = True
if cat_match and search_match:
filtered[slug] = art
# Sort by category then title
sorted_articles = sorted(filtered.items(), key=lambda x: (x[1].get("category", ""), x[1]["title"]))
# Group by category
current_cat = None
for slug, art in sorted_articles:
cat = art.get("category", "other")
if cat != current_cat:
current_cat = cat
color = category_color(cat)
st.markdown(f'{cat.replace("_"," ")}
', unsafe_allow_html=True)
if st.button(
art["title"],
key=f"art_{slug}",
use_container_width=True,
type="secondary" if st.session_state.selected_article != slug else "primary",
):
st.session_state.selected_article = slug
st.rerun()
st.caption(f"{len(filtered)} of {len(wiki['articles'])} articles")
with col_reader:
if st.session_state.selected_article and st.session_state.selected_article in wiki["articles"]:
art = wiki["articles"][st.session_state.selected_article]
slug = st.session_state.selected_article
# Header
col_title, col_meta = st.columns([3, 1])
with col_title:
st.markdown(f"## {art['title']}")
with col_meta:
color = category_color(art.get("category", ""))
st.markdown(f'{art.get("category", "").replace("_", " ")}', unsafe_allow_html=True)
st.caption(f"Updated: {art.get('last_updated', 'n/a')}")
# Tags
tags_html = " ".join([f'{t}' for t in art.get("tags", [])])
st.markdown(tags_html, unsafe_allow_html=True)
st.divider()
# Content — render backlinks as bold
content = art["content"]
content = re.sub(r'\[\[([^\]]+)\]\]', r'**\1**', content)
st.markdown(content)
st.divider()
# Download article
st.download_button(
"📄 Download article (.md)",
data=art["content"].encode(),
file_name=f"{slug}.md",
mime="text/markdown",
)
# Edit option (for power users)
with st.expander("✏️ Edit this article"):
new_content = st.text_area("Content (markdown)", value=art["content"], height=400, key=f"edit_{slug}")
if st.button("Save changes", key=f"save_{slug}"):
wiki["articles"][slug]["content"] = new_content
wiki["articles"][slug]["last_updated"] = datetime.date.today().isoformat()
log(f"edit | {art['title']} — manually edited")
st.success("Saved")
st.rerun()
else:
st.markdown("### Welcome to the Nursing Knowledge Base")
st.markdown("""
This wiki is a living knowledge base for nursing education, powered by Claude AI.
**Getting started:**
1. **Browse** articles using the list on the left — click any article to read it
2. **Add Sources** — paste guidelines, articles, or clinical notes
3. **Compile** — Claude integrates your sources into the wiki
4. **Ask** — ask nursing questions; Claude answers from the wiki
5. **Health Check** — audit the wiki for gaps, contradictions, and suggestions
**Pre-loaded content** covers:
- NMC Code & Proficiency Standards 2018
- ABCDE Assessment and NEWS2
- Drug calculations and the Nine Rights
- PICO and Evidence-Based Practice
- Person-Centred Care and the Six Cs
- Mental Capacity Act 2005
- Safeguarding Adults and Children
- Infection Prevention and Control
- Duty of Candour
*Select an article on the left to begin reading.*
""")
st.markdown(f'This tool supports but does not replace clinical judgment. Always refer to current NMC guidelines, your local trust policy, and senior clinical colleagues.
', unsafe_allow_html=True)
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
# TAB 2: ADD SOURCES
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
with tab_sources:
st.markdown("### Add Raw Sources")
st.markdown("""
Add source material to the wiki. Claude will integrate it when you run **Compile**.
Suitable sources include NICE clinical guidelines, NMC documents, NHS trust protocols,
research papers, textbook chapters, or clinical audit findings — as **PDF or pasted text**.
Large PDFs (100+ pages) are supported; text is extracted from every page automatically.
""")
col_add, col_list_src = st.columns([1, 1])
with col_add:
st.markdown("#### Add New Source")
src_title = st.text_input("Source title", placeholder="e.g. NICE NG51 — Sepsis (2016)")
src_type = st.selectbox("Type", ["Clinical Guideline", "Research Paper", "NMC Document", "NHS Protocol", "Textbook", "Other"])
input_method = st.radio(
"Input method",
["PDF from URL", "Upload PDF", "Paste text"],
horizontal=True,
help="Use 'PDF from URL' for large files — the server fetches it directly.",
)
src_content = ""
pdf_meta = None
if input_method == "PDF from URL":
st.caption("Paste a direct link to any PDF — NICE guidelines, NMC documents, research papers, etc. The server fetches it, so there is no size limit.")
pdf_url = st.text_input(
"PDF URL",
placeholder="https://www.nice.org.uk/guidance/ng51/resources/sepsis-pdf-...",
key="pdf_url",
)
if pdf_url and st.button("Fetch & Extract", key="fetch_pdf"):
with st.spinner("Fetching PDF from URL..."):
try:
raw_bytes = fetch_pdf_from_url(pdf_url)
extracted, page_count = extract_pdf_text(raw_bytes)
src_content = extracted
pdf_meta = {"pages": page_count, "size_kb": len(raw_bytes) // 1024}
st.session_state["fetched_pdf_content"] = extracted
st.session_state["fetched_pdf_meta"] = pdf_meta
st.success(f"Fetched {page_count} pages / {len(extracted):,} characters")
with st.expander("Preview extracted text"):
st.text(extracted[:1500] + ("..." if len(extracted) > 1500 else ""))
except Exception as e:
st.error(f"Fetch failed: {e}")
# Persist fetched content across reruns
if not src_content and st.session_state.get("fetched_pdf_content"):
src_content = st.session_state["fetched_pdf_content"]
pdf_meta = st.session_state.get("fetched_pdf_meta")
elif input_method == "Upload PDF":
if not _PDF_AVAILABLE:
st.error("pypdf not installed — PDF upload unavailable.")
else:
st.caption("For large PDFs (>50 MB) use 'PDF from URL' instead — HF Spaces limits browser uploads.")
uploaded_pdf = st.file_uploader(
"Upload PDF",
type=["pdf"],
key="pdf_upload",
)
if uploaded_pdf is not None:
with st.spinner(f"Extracting text from {uploaded_pdf.name}..."):
raw_bytes = uploaded_pdf.read()
try:
extracted, page_count = extract_pdf_text(raw_bytes)
src_content = extracted
pdf_meta = {"pages": page_count, "size_kb": len(raw_bytes) // 1024}
st.success(f"Extracted {page_count} pages / {len(extracted):,} characters")
with st.expander("Preview extracted text"):
st.text(extracted[:1500] + ("..." if len(extracted) > 1500 else ""))
except Exception as e:
st.error(f"PDF extraction failed: {e}")
if not src_title and uploaded_pdf:
src_title = uploaded_pdf.name.replace(".pdf", "").replace("_", " ")
else:
src_content = st.text_area(
"Paste text here",
height=300,
placeholder="Paste the full text of the guideline, paper, or document here...",
)
if st.button("➕ Add Source", type="primary", disabled=not (src_title and src_content)):
src_id = f"src_{len(wiki.get('sources', {})) + 1:04d}"
if "sources" not in wiki:
wiki["sources"] = {}
entry = {
"title": src_title,
"type": src_type,
"content": src_content,
"added": datetime.date.today().isoformat(),
"processed": False,
}
if pdf_meta:
entry["pdf_pages"] = pdf_meta["pages"]
entry["pdf_size_kb"] = pdf_meta["size_kb"]
wiki["sources"][src_id] = entry
log(f"ingest | Added source: {src_title} ({len(src_content):,} chars)")
st.session_state.pop("fetched_pdf_content", None)
st.session_state.pop("fetched_pdf_meta", None)
st.success(f"Source added: **{src_title}**")
st.rerun()
with col_list_src:
st.markdown("#### Sources")
sources = wiki.get("sources", {})
if not sources:
st.info("No sources added yet. Add your first source on the left.")
else:
for src_id, src in sources.items():
status = "✅ Compiled" if src.get("processed") else "⏳ Pending compile"
with st.expander(f"{src['title']} — {status}"):
st.caption(f"Type: {src['type']} | Added: {src['added']}")
st.text(src["content"][:400] + ("..." if len(src["content"]) > 400 else ""))
if st.button("🗑️ Remove", key=f"del_{src_id}"):
del wiki["sources"][src_id]
log(f"delete | Removed source: {src['title']}")
st.rerun()
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
# TAB 3: COMPILE
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
with tab_compile:
st.markdown("### Compile Wiki")
st.markdown("""
Claude reads your raw sources and integrates them into the wiki — updating existing articles,
creating new ones, adding cross-references, and keeping the index current.
This is the core Karpathy pattern: **you add sources, Claude maintains the knowledge base.**
""")
client = get_client()
if not client:
st.warning("Enter your Anthropic API key in the sidebar to use Compile.")
else:
pending = {sid: s for sid, s in wiki.get("sources", {}).items() if not s.get("processed")}
if not pending:
st.info("No pending sources. Add sources in the **Add Sources** tab, then compile.")
else:
st.markdown(f"**{len(pending)} source(s) ready to compile:**")
for src_id, src in pending.items():
st.markdown(f"- {src['title']} ({src['type']})")
model = st.selectbox("Model", ["claude-sonnet-4-6", "claude-opus-4-6"],
help="Sonnet is faster and cheaper; Opus produces richer articles.")
if st.button("🔨 Compile Now", type="primary"):
progress = st.progress(0)
status = st.empty()
results_container = st.container()
for i, (src_id, src) in enumerate(pending.items()):
char_count = len(src["content"])
chunk_note = f" — {char_count:,} chars, will chunk" if char_count > 7000 else ""
status.markdown(f"⚙️ Compiling: **{src['title']}** ({i+1}/{len(pending)}){chunk_note}...")
try:
result = compile_source(
client=client,
source_title=src["title"],
source_content=src["content"],
existing_index=wiki.get("index_summary", ""),
existing_articles=wiki["articles"],
model=model,
)
updated_count = len(result.get("articles_updated", []))
created_count = len(result.get("articles_created", []))
for art in result.get("articles_updated", []):
add_or_update_article(art)
for art in result.get("articles_created", []):
add_or_update_article(art)
wiki["sources"][src_id]["processed"] = True
log(f"compile | {src['title']} — updated {updated_count} articles, created {created_count} new articles")
with results_container:
st.success(f"✅ **{src['title']}**: {updated_count} updated, {created_count} created")
if result.get("summary"):
st.caption(result["summary"])
except Exception as e:
with results_container:
st.error(f"❌ Failed to compile {src['title']}: {e}")
progress.progress((i + 1) / len(pending))
# Rebuild index
status.markdown("📑 Rebuilding wiki index...")
try:
new_index = rebuild_index(client, wiki["articles"], model=model)
wiki["index_summary"] = new_index
log(f"index | Wiki index rebuilt — {len(wiki['articles'])} articles")
except Exception as e:
st.warning(f"Index rebuild failed: {e}")
status.markdown("✅ Compilation complete!")
progress.progress(1.0)
st.rerun()
# Manual rebuild index
st.divider()
st.markdown("**Rebuild Index**")
st.caption("Regenerate the wiki index from all current articles (useful after manual edits).")
if st.button("📑 Rebuild Index"):
if client:
with st.spinner("Rebuilding..."):
try:
new_index = rebuild_index(client, wiki["articles"])
wiki["index_summary"] = new_index
log("index | Manual index rebuild")
st.success("Index rebuilt")
st.text_area("Updated Index", value=new_index, height=300)
except Exception as e:
st.error(f"Failed: {e}")
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
# TAB 4: Q&A
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
with tab_qa:
st.markdown("### Ask the Wiki")
st.markdown('Answers are generated from the wiki content and are for educational use only. This tool does not replace clinical judgment or current NMC/NICE guidelines.
', unsafe_allow_html=True)
client = get_client()
if not client:
st.warning("Enter your Anthropic API key in the sidebar to use Q&A.")
else:
# Example questions
st.markdown("**Example questions:**")
examples = [
"What are the five statutory principles of the Mental Capacity Act?",
"How do I calculate an IV drip rate for 1000 mL over 6 hours?",
"What does the NMC Code say about delegation?",
"What is the NEWS2 threshold for emergency escalation?",
"How do I apply the PICO framework to a clinical question about wound care?",
]
cols = st.columns(3)
for i, ex in enumerate(examples):
if cols[i % 3].button(ex, key=f"ex_{i}", use_container_width=True):
st.session_state["qa_question"] = ex
# Question input
question = st.text_area(
"Your question",
value=st.session_state.get("qa_question", ""),
height=100,
placeholder="Ask any nursing question...",
key="qa_input",
)
model = st.selectbox("Model", ["claude-sonnet-4-6", "claude-opus-4-6"], key="qa_model")
col_ask, col_file = st.columns([3, 1])
ask_clicked = col_ask.button("💬 Ask", type="primary", disabled=not question)
file_last = col_file.checkbox("File answer to wiki", value=False,
help="Save valuable Q&A answers as new wiki articles")
if ask_clicked and question:
with st.spinner("Searching wiki and composing answer..."):
try:
answer = answer_question(client, question, wiki["articles"], model=model)
# Add to history
st.session_state.qa_history.append({
"question": question,
"answer": answer,
"timestamp": datetime.datetime.now().isoformat(),
})
log(f"query | {question[:80]}")
# Optionally file to wiki
if file_last:
with st.spinner("Filing answer to wiki..."):
new_art = file_answer_to_wiki(client, question, answer, model="claude-haiku-4-5-20251001")
if new_art:
add_or_update_article(new_art)
log(f"file | Created article from Q&A: {new_art['title']}")
st.success(f"Filed as new article: **{new_art['title']}**")
st.session_state["qa_question"] = ""
except Exception as e:
st.error(f"Error: {e}")
# Display Q&A history (newest first)
if st.session_state.qa_history:
st.divider()
st.markdown("### Recent Questions")
for qa in reversed(st.session_state.qa_history[-10:]):
with st.expander(f"❓ {qa['question'][:80]}{'...' if len(qa['question'])>80 else ''}", expanded=False):
st.markdown(qa["answer"])
st.download_button(
"📄 Save answer",
data=f"# {qa['question']}\n\n{qa['answer']}".encode(),
file_name="answer.md",
mime="text/markdown",
key=f"dl_{qa['timestamp']}",
)
if st.button("🗑️ Clear history"):
st.session_state.qa_history = []
st.rerun()
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
# TAB 5: HEALTH CHECK (LINT)
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
with tab_lint:
st.markdown("### Wiki Health Check")
st.markdown("""
Claude audits the wiki for:
- **Contradictions** between articles
- **Stale content** that may need updating
- **Orphan articles** with few cross-references
- **Missing links** between related articles
- **Clinical safety gaps** — important missing content
- **Suggested new articles** to expand the wiki
""")
client = get_client()
if not client:
st.warning("Enter your Anthropic API key in the sidebar to run a health check.")
else:
model = st.selectbox("Model", ["claude-sonnet-4-6", "claude-opus-4-6"], key="lint_model")
if st.button("🔍 Run Health Check", type="primary"):
with st.spinner("Auditing wiki... this may take a moment"):
try:
report = lint_wiki(client, wiki["articles"], wiki.get("index_summary", ""), model=model)
st.session_state.lint_report = report
log(f"lint | Health check completed — {report.get('total_issues', 0)} issues found")
except Exception as e:
st.error(f"Health check failed: {e}")
report = st.session_state.lint_report
if report:
st.divider()
# Overall status
health = report.get("overall_health", "Unknown")
health_color = {"Good": "#007F3B", "Fair": "#FFB81C", "Needs attention": "#D93025"}.get(health, "#666")
st.markdown(f'Overall Health: {health}
', unsafe_allow_html=True)
st.markdown(report.get("summary", ""))
col_issues, col_suggestions = st.columns([1, 1])
with col_issues:
st.markdown(f"### Issues ({report.get('total_issues', 0)})")
for issue in report.get("issues", []):
sev = issue.get("severity", "low")
css = f"issue-{sev}"
icon = {"high": "🔴", "medium": "🟡", "low": "🟢"}.get(sev, "●")
st.markdown(
f'{icon} {issue.get("type", "").replace("_", " ").title()} '
f'— {issue.get("article", "wiki-wide")}
'
f'{issue.get("description", "")}
'
f'Fix: {issue.get("recommendation", "")}
',
unsafe_allow_html=True,
)
if report.get("strengths"):
st.markdown("### Strengths")
for s in report["strengths"]:
st.markdown(f"✅ {s}")
with col_suggestions:
st.markdown("### Suggested New Articles")
suggested = report.get("suggested_new_articles", [])
if not suggested:
st.info("No new articles suggested.")
else:
for suggestion in suggested:
with st.expander(f"📝 {suggestion['title']} ({suggestion['category']})"):
st.caption(suggestion.get("rationale", ""))
if suggestion.get("key_topics"):
st.markdown("**Key topics**: " + ", ".join(suggestion["key_topics"]))
if st.button(f"Generate article: {suggestion['title'][:30]}...",
key=f"gen_{suggestion['title'][:20]}"):
with st.spinner(f"Generating: {suggestion['title']}..."):
try:
new_art = generate_missing_article(
client,
suggestion["title"],
suggestion["category"],
suggestion.get("key_topics", []),
wiki.get("index_summary", ""),
model=model,
)
add_or_update_article(new_art)
log(f"generate | Created article: {suggestion['title']} (from lint suggestion)")
st.success(f"Created: **{new_art['title']}**")
st.rerun()
except Exception as e:
st.error(f"Failed: {e}")
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
# TAB 6: LOG
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
with tab_log:
st.markdown("### Operation Log")
st.caption("Append-only chronological record of all wiki operations.")
log_entries = wiki.get("log", [])
if not log_entries:
st.info("No log entries yet.")
else:
# Show newest first
for entry in reversed(log_entries):
st.markdown(entry)
st.download_button(
"📄 Download Log",
data="\n\n".join(log_entries).encode(),
file_name=f"wiki_log_{datetime.date.today()}.md",
mime="text/markdown",
)