import streamlit as st from core.config import BIDDER_NAMES, DATA_DIR from core.fallback import load_criteria from core.llm_client import LLM, LLMUnavailable from core.pdf_utils import render_page_to_image from core.schemas import Criterion from ui.components import confidence_bar, verdict_pill _VERDICT_CFG = { "eligible": ("rgba(34,197,94,0.12)", "#22C55E", "✅ PASSED"), "not_eligible": ("rgba(239,68,68,0.12)", "#EF4444", "❌ FAILED"), "needs_review": ("rgba(245,158,11,0.12)", "#F59E0B", "⚠️ NEEDS REVIEW"), } _RULE_PLAIN = { "numeric_threshold": lambda r: f"must be {r['operator']} {r['value']:,} {r.get('unit') or ''}".strip(), "count_threshold": lambda r: f"must have completed at least {int(r['value'])}", "certification_present": lambda _: "valid certificate must be present", "document_present": lambda _: "supporting document must be present", } def _get_criteria() -> list[Criterion]: data = st.session_state.get("criteria") return [Criterion(**c) for c in data] if data else load_criteria() def _explain(v: dict, crit: Criterion | None) -> str: verdict = v.get("verdict", "") extracted = v.get("extracted_value", "") or "" reason = v.get("reason", "") or "" if not crit: return reason rule = crit.rule rule_desc = _RULE_PLAIN.get(rule.type, lambda _: "")(rule.model_dump()) if verdict == "eligible": return (f"Found **{extracted}**. " if extracted else "") + reason elif verdict == "not_eligible": return ((f"Found **{extracted}** — does not meet requirement ({rule_desc}). " if extracted else f"Requirement: {rule_desc}. ") + reason) else: return (f"Extracted value: **{extracted}**. " if extracted else "") + reason def _qa_context(bid: str, verdicts: list[dict], criteria: list[Criterion]) -> str: cm = {c.id: c for c in criteria} lines = [f"BIDDER: {BIDDER_NAMES.get(bid, bid)}", ""] for v in verdicts: c = cm.get(v["criterion_id"]) rule = _RULE_PLAIN.get(c.rule.type, lambda _: "")(c.rule.model_dump()) if c else "" lines += [ f"{v['criterion_id']} — {c.title if c else '?'} " f"[{'Mandatory' if c and c.mandatory else 'Optional'}]: {v['verdict'].upper()}", f" Requirement: {rule}", f" Extracted: {v.get('extracted_value') or 'not found'}", f" Confidence: {v.get('combined_confidence', 0):.0%}", f" Reason: {v.get('reason', '')}", ] if v.get("source"): s = v["source"] lines.append(f" Evidence: {s.get('doc_name')} page {s.get('page')} " f"[{s.get('source_type')}]") if s.get("snippet"): lines.append(f' Snippet: "{s["snippet"][:200]}"') lines.append("") return "\n".join(lines) def _answer(question: str, context: str) -> str: system = ( "You are a procurement compliance assistant. Answer questions about an AI-generated " "tender evaluation in plain professional English. Always cite specific document names " "and page numbers. Be concise (2-4 sentences). Never invent information not in the context. " 'Return JSON: {"answer": ""}' ) try: result = LLM().chat_json(system, f"{context}\n\nQUESTION: {question}") return result.get("answer", "") except LLMUnavailable: return _rule_answer(question, context) def _rule_answer(q: str, context: str) -> str: q = q.lower() lines = context.splitlines() if any(w in q for w in ["reject", "fail", "not eligible", "disqualif"]): fails = [l.strip() for l in lines if "NOT_ELIGIBLE" in l or "NEEDS_REVIEW" in l] return ("Failing criteria: " + "; ".join(fails[:3]) + ".") if fails else "No failing criteria found." if any(w in q for w in ["pass", "eligible", "meet"]): passes = [l.strip() for l in lines if "ELIGIBLE" in l and "NOT_ELIGIBLE" not in l] return ("Passing criteria: " + "; ".join(passes[:3]) + ".") if passes else "No passing criteria." if any(w in q for w in ["turnover", "financial", "c1", "revenue"]): rel = [l.strip() for l in lines if "C1" in l or "turnover" in l.lower() or "Extracted" in l] return " ".join(rel[:4]) if rel else "Turnover information not found." return "Live LLM is unavailable. The evaluation summary above contains the full details." def render() -> None: st.markdown( '

Interpretability

' '

' 'Plain-English explanations with source citations. Ask any question about the evaluation.

', unsafe_allow_html=True, ) vdata = st.session_state.get("verdicts", {}) if not vdata: st.info("No results yet. Load the pre-computed demo from Overview, or run evaluation.") return criteria = _get_criteria() crit_map = {c.id: c for c in criteria} bid = st.selectbox("Select bidder", options=list(vdata.keys()), format_func=lambda x: BIDDER_NAMES.get(x, x)) verdicts = vdata.get(bid, []) if not verdicts: st.warning("No verdicts for this bidder.") return company = BIDDER_NAMES.get(bid, bid) mand = [v for v in verdicts if crit_map.get(v["criterion_id"]) and crit_map[v["criterion_id"]].mandatory] failed = [v for v in mand if v["verdict"] == "not_eligible"] review = [v for v in mand if v["verdict"] == "needs_review"] passed = [v for v in mand if v["verdict"] == "eligible"] if failed: ov, fg, icon = "not_eligible", "#EF4444", "❌" summary = f"Failed {len(failed)} mandatory criterion/criteria. Must meet all to qualify." elif review: ov, fg, icon = "needs_review", "#F59E0B", "⚠️" summary = (f"Passed {len(passed)} mandatory criteria, but {len(review)} " f"require officer sign-off.") else: ov, fg, icon = "eligible", "#22C55E", "✅" summary = f"All {len(passed)} mandatory criteria satisfied." bg, _, label = _VERDICT_CFG.get(ov, ("rgba(128,128,128,0.1)", "#888", ov)) st.markdown( f'
' f'
{icon}
' f'
' f'
' f'{company} — {label}
' f'
{summary}
' f'
', unsafe_allow_html=True, ) st.markdown( '
' 'Criterion-by-Criterion Breakdown
', unsafe_allow_html=True, ) for v in verdicts: crit = crit_map.get(v["criterion_id"]) verdict = v.get("verdict", "needs_review") cbg, cfg_, clabel = _VERDICT_CFG.get(verdict, ("rgba(128,128,128,0.1)", "var(--text-color)", verdict)) mand_txt = "Mandatory" if (crit and crit.mandatory) else "Optional" title = crit.title if crit else v["criterion_id"] with st.container(border=True): left, right = st.columns([1, 3]) with left: st.markdown( f'
' f'
{clabel}
' f'
{mand_txt}
' f'
', unsafe_allow_html=True, ) confidence_bar(v.get("combined_confidence", 0.0), "Certainty") with right: st.markdown( f'
' f'{v["criterion_id"]}: {title}
', unsafe_allow_html=True, ) explanation = _explain(v, crit) if explanation: st.markdown( f'

{explanation}

', unsafe_allow_html=True, ) src = v.get("source") or {} if src: doc, page = src.get("doc_name", ""), src.get("page", "") tier_labels = {"text_pdf": "typed PDF", "tesseract": "Tesseract OCR", "vision_llm": "Vision LLM"} tier = tier_labels.get(src.get("source_type", ""), "") st.markdown( f'
' f'📄' f'{doc}' f'page {page}' f'·' f'{tier}' f'
', unsafe_allow_html=True, ) doc_path = DATA_DIR / "bidders" / bid / doc if doc_path.exists() and doc_path.suffix.lower() == ".pdf": with st.expander(f"View source: {doc}, page {page}", expanded=False): try: img = render_page_to_image(doc_path, int(page)) st.image(img, caption=f"{doc} — Page {page}", use_column_width=True) except Exception: st.caption("Page preview unavailable.") elif doc_path.exists() and doc_path.suffix.lower() in {".png", ".jpg"}: with st.expander(f"View: {doc}", expanded=False): st.image(str(doc_path), use_column_width=True) st.divider() st.markdown( '
' 'Ask About This Evaluation
' '

' 'Answers cite specific documents and pages.

', unsafe_allow_html=True, ) with st.expander("Example questions", expanded=False): for e in ["Why was this bidder rejected?", "What turnover figure was found, and from which document?", "Does this bidder have a valid ISO 9001:2015 certificate?", "Why is the turnover verdict in review?"]: st.markdown(f"- _{e}_") question = st.text_input("", placeholder="Ask anything about this bidder's evaluation…", key=f"qa_{bid}", label_visibility="collapsed") if st.button("Get Answer", type="primary", key=f"qa_btn_{bid}"): if not question.strip(): st.warning("Please enter a question.") else: context = _qa_context(bid, verdicts, criteria) with st.spinner("Looking up the answer…"): answer = _answer(question, context) st.markdown( f'
' f'
Answer
' f'
' f'{answer}
', unsafe_allow_html=True, ) with st.expander("Full context used", expanded=False): st.code(context, language="text")