import gradio as gr import uuid import os import tempfile import logging import preload from dotenv import load_dotenv from src.graph import run_recon from src.memory import init_db, load_session load_dotenv() logging.basicConfig(level=logging.WARNING) logger = logging.getLogger(__name__) init_db() # --------------------------------------------------------------------------- # Helpers # --------------------------------------------------------------------------- VERDICT_META = { "PASS": ("โœ…", "#22c55e", "Pass"), "FORCED_PASS": ("โš ๏ธ", "#f59e0b", "Forced Pass"), "STALE": ("๐Ÿ•ฐ๏ธ", "#f59e0b", "Stale"), "CONTRADICTED":("โšก", "#ef4444", "Contradicted"), "INSUFFICIENT":("๐Ÿ“‰", "#ef4444", "Insufficient"), } CONF_META = { "high": ("๐ŸŸข", "#22c55e"), "medium": ("๐ŸŸก", "#f59e0b"), "low": ("๐Ÿ”ด", "#ef4444"), } def _highlight_citations(text: str) -> str: """Wrap [Author et al., Year] citations in styled spans.""" import re return re.sub( r"(\[[A-Za-z][^,\[\]]{1,40},?\s*(?:et al\.?)?,?\s*\d{4}[a-z]?\])", r'\1', text ) SIGNAL_COLORS = { "FOUNDATIONAL": "#22c55e", "CURRENT": "#3b82f6", "DECLINING": "#f59e0b", "SUPERSEDED": "#ef4444", } def _paper_cards_html(papers, reliability_scores: dict = {}) -> str: """Render retrieved papers as styled cards.""" if not papers: return "

No papers retrieved.

" cards = [] for p in papers[:8]: score_color = "#22c55e" if p.hybrid_score >= 0.6 else "#f59e0b" if p.hybrid_score >= 0.4 else "#ef4444" authors = ", ".join(p.authors[:2]) + (" et al." if len(p.authors) > 2 else "") if p.authors else "Unknown" abstract_preview = (p.abstract[:180] + "...") if p.abstract and len(p.abstract) > 180 else (p.abstract or "") rs = reliability_scores.get(p.paper_id) if rs: dominant = rs.get("dominant_signal", "DECLINING") if isinstance(rs, dict) else rs.dominant_signal sig_color = SIGNAL_COLORS.get(dominant, "#6b7280") signal_badge = ( f'' f'{dominant}' ) else: signal_badge = "" cards.append(f"""
{p.title}
{p.hybrid_score:.3f} {signal_badge}
{authors} ยท {p.year} ยท {p.citation_count:,} citations ยท {p.source}
{abstract_preview}
""") return "".join(cards) def _verdict_badge_html(verdict: str, notes: str, retry: int, papers: int, latency: float, decay: str, rewritten: list) -> str: emoji, color, label = VERDICT_META.get(verdict, ("โ“", "#6b7280", verdict)) rw_html = "" if rewritten: items = "".join(f"
  • {q}
  • " for q in rewritten) rw_html = f"
    Rewritten queries
    " return f"""
    {emoji} {label} {latency:.0f}ms
    {notes}
    ๐Ÿ“„ {papers} papers ๐Ÿ” {retry} retries ๐Ÿ“ {decay} decay
    {rw_html}
    """ def _claims_html(claims) -> str: if not claims: return "

    No claims extracted.

    " rows = "" for c in claims: emoji, color = CONF_META.get(c.confidence, ("โšช", "#6b7280")) flag = " โš ๏ธ" if c.flagged else "" rows += f""" {emoji} {c.confidence.upper()} {c.text}{flag} {c.source_title[:35]}... {c.source_year} """ return f""" {rows}
    Confidence Claim Source Year
    """ def _session_html(session_ctx, session_id: str) -> str: turns = len(session_ctx.prior_queries) if turns == 0: return f"

    Session {session_id[:8]}... โ€” no turns yet.

    " items = "".join( f"
  • {q[:70]}
  • " for q in session_ctx.prior_queries ) contradictions = "" if session_ctx.flagged_contradictions: c_items = "".join( f"
  • {c[:80]}
  • " for c in session_ctx.flagged_contradictions[:3] ) contradictions = f"""
    โšก Contradictions flagged
    """ return f"""
    {session_id[:8]}... {turns} turn{"s" if turns != 1 else ""}
    Queries
    {contradictions}
    """ # --------------------------------------------------------------------------- # Core pipeline runner # --------------------------------------------------------------------------- def run_query(query, session_id, decay_config, history): if not query.strip(): yield history, session_id, "", "", "", "", "", None return if not session_id.strip(): session_id = str(uuid.uuid4()) history = history + [{"role": "user", "content": query}] yield history, session_id, \ _verdict_badge_html("", "๐Ÿ” Running pipeline...", 0, 0, 0, decay_config, []), \ "", "", "", "", None try: result = run_recon(query=query, session_id=session_id, decay_config=decay_config) except Exception as e: logger.error(f"Pipeline error: {e}") history = history + [{"role": "assistant", "content": f"โŒ Error: {e}"}] yield history, session_id, f"

    โŒ {e}

    ", "", "", "", "", None return position = result.get("synthesized_position", "No position generated.") highlighted = _highlight_citations(position) history = history + [{"role": "assistant", "content": highlighted}] verdict = result.get("critic_verdict", "N/A") critic_notes = result.get("critic_notes", "") retry_count = result.get("retry_count", 0) latency = result.get("latency_ms", 0) papers_used = len(result.get("retrieved_papers") or []) rewritten = result.get("rewritten_questions") or [] verdict_html = _verdict_badge_html(verdict, critic_notes, retry_count, papers_used, latency, decay_config, rewritten) claims_html = _claims_html(result.get("claim_confidences") or []) papers_html = _paper_cards_html(result.get("retrieved_papers") or [], result.get("paper_reliability_scores") or {}) session_ctx = load_session(session_id) session_html = _session_html(session_ctx, session_id) export_md = result.get("export_md", "") yield history, session_id, verdict_html, claims_html, papers_html, session_html, export_md, None def export_md_file(export_md_content, session_id): if not export_md_content.strip(): return None try: path = os.path.join(tempfile.gettempdir(), f"recon_{session_id[:8]}.md") with open(path, "w", encoding="utf-8") as f: f.write(export_md_content) return path except Exception as e: logger.error(f"Export failed: {e}") return None def new_session(): new_id = str(uuid.uuid4()) return new_id, [], "", "", "", "", "", None # --------------------------------------------------------------------------- # UI # --------------------------------------------------------------------------- CSS = """ .gradio-container { font-family: 'Inter', system-ui, sans-serif !important; } .chatbot-wrap .message-wrap { font-size: 0.92em; line-height: 1.7; } footer { display: none !important; } """ with gr.Blocks(title="RECON") as demo: gr.HTML("""
    ๐Ÿ”

    RECON

    MULTI-AGENT

    Temporally-aware ML literature research ยท Live Semantic Scholar ยท Staleness detection ยท Contradiction flagging

    """) session_id_state = gr.State(str(uuid.uuid4())) export_md_state = gr.State("") with gr.Row(equal_height=False): # โ”€โ”€ Left column โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ with gr.Column(scale=3): chatbot = gr.Chatbot( label="Research Position", height=480, render_markdown=True, elem_classes=["chatbot-wrap"], ) with gr.Row(): query_input = gr.Textbox( placeholder="e.g. What is the current state of KV cache compression in LLMs?", label="Research Query", lines=2, scale=4, ) submit_btn = gr.Button("๐Ÿ” Research", variant="primary", scale=1, min_width=120) with gr.Row(): decay_dropdown = gr.Dropdown( choices=["linear", "log", "none"], value="linear", label="Recency decay", scale=1, ) new_session_btn = gr.Button("๐Ÿ”„ New Session", scale=1) session_display = gr.Textbox( label="Session ID", interactive=False, scale=2, ) with gr.Accordion("๐Ÿ“„ Retrieved Papers", open=False): papers_output = gr.HTML( value="

    Run a query to see retrieved papers.

    " ) with gr.Accordion("๐Ÿ“Š Claim Confidence Table", open=True): claims_output = gr.HTML( value="

    Run a query to see claim confidence scores.

    " ) # โ”€โ”€ Right column โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ with gr.Column(scale=2): gr.HTML("
    Critic Debug Panel
    ") critic_output = gr.HTML( value="

    Critic verdict will appear here.

    " ) gr.HTML("
    ") gr.HTML("
    Session Memory
    ") session_output = gr.HTML( value="

    Session history will appear here.

    " ) gr.HTML("
    ") export_btn = gr.Button("๐Ÿ“ฅ Export Session (.md)", variant="secondary") export_file = gr.File(label="Download") # โ”€โ”€ Events โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ def on_submit(query, session_id, decay_config, history): for r in run_query(query, session_id, decay_config, history): chat, sid, critic, claims, papers, session, export_md, _ = r yield chat, sid, critic, claims, papers, session, export_md, sid submit_btn.click( fn=on_submit, inputs=[query_input, session_id_state, decay_dropdown, chatbot], outputs=[chatbot, session_id_state, critic_output, claims_output, papers_output, session_output, export_md_state, session_display], ) query_input.submit( fn=on_submit, inputs=[query_input, session_id_state, decay_dropdown, chatbot], outputs=[chatbot, session_id_state, critic_output, claims_output, papers_output, session_output, export_md_state, session_display], ) new_session_btn.click( fn=new_session, outputs=[session_id_state, chatbot, critic_output, claims_output, papers_output, session_output, export_md_state, export_file], ) export_btn.click( fn=export_md_file, inputs=[export_md_state, session_id_state], outputs=[export_file], ) demo.launch()