Spaces:
Running
Running
| """ | |
| app/frontend/gradio_app.py β Full page warm gray UI | |
| """ | |
| import os | |
| import gradio as gr | |
| from datetime import datetime, timezone, timedelta | |
| from langchain_core.messages import HumanMessage | |
| IST = timezone(timedelta(hours=5, minutes=30)) | |
| def _now_ist() -> str: | |
| return datetime.now(IST).strftime("%d %b %Y %I:%M:%S %p IST") | |
| os.environ["GRADIO_MODE"] = "true" | |
| os.environ["HITL_ENABLED"] = os.getenv("HITL_ENABLED", "true") | |
| from app.graph.builder import build_graph | |
| from app.state import AgentState | |
| from app.nodes.hitl import HITLPauseException | |
| _graph = build_graph() | |
| _thread_config = {"configurable": {"thread_id": "gradio-session-001"}} | |
| # Frontend holds NO conversation history. | |
| # All message history is managed inside the graph via output_node. | |
| # LangGraph MemorySaver persists state across invocations automatically. | |
| _pending_hitl_state: AgentState | None = None | |
| def run_graph(query: str) -> AgentState: | |
| # Restore persisted messages from MemorySaver checkpoint | |
| # This gives the LLM full clean conversation history across turns | |
| prior_messages = [] | |
| try: | |
| checkpoint = _graph.get_state(_thread_config) | |
| if checkpoint and checkpoint.values: | |
| prior_messages = checkpoint.values.get("messages", []) | |
| except Exception: | |
| pass # First turn β no checkpoint yet | |
| initial_state: AgentState = { | |
| "messages": prior_messages, # Prior safe history; safety_node adds current HumanMessage | |
| "query": query, | |
| "route": "", "rag_context": "", "tool_calls": [], "tool_results": [], | |
| "response": "", "retry_count": 0, "hitl_approved": False, | |
| "evaluation_score": 0.0, "guardrail_passed": True, "is_harmful": False, | |
| "memory_summary": "", "node_log": [], | |
| } | |
| return _graph.invoke(initial_state, config=_thread_config) | |
| def resume_graph_after_hitl(state: AgentState, approved: bool) -> AgentState: | |
| from app.nodes.evaluation import evaluation_node, eval_route | |
| from app.nodes.guardrails import guardrails_node | |
| from app.nodes.output import output_node | |
| if not approved: | |
| return {**state, "response": "π« Response rejected by human reviewer."} | |
| s = evaluation_node({**state, "hitl_approved": True}) | |
| if eval_route(s) == "retry": | |
| from app.nodes.llm_node import llm_node | |
| s = llm_node(s) | |
| s = guardrails_node(s) | |
| s = output_node(s) | |
| return s | |
| def format_trace(node_log: list) -> str: | |
| if not node_log: | |
| return "*Waiting for a query...*" | |
| lines = [] | |
| for node in node_log: | |
| if any(x in node for x in ["β ", "auto-pass", "approved", "output", "passed"]): | |
| icon = "β " | |
| elif any(x in node for x in ["BLOCKED", "rejected", "FAILED", "ERROR"]): | |
| icon = "β" | |
| elif any(x in node for x in ["retry", "β³", "βΈ"]): | |
| icon = "π" | |
| else: | |
| icon = "βΈ" | |
| lines.append(f"{icon} `{node}`") | |
| return "\n\n".join(lines) | |
| def user_msg(t): return {"role": "user", "content": t} | |
| def bot_msg(t): return {"role": "assistant", "content": t} | |
| def handle_submit(user_message, chat_history): | |
| global _pending_hitl_state | |
| if not user_message.strip(): | |
| return chat_history, "", "*Waiting for a query...*", "", gr.update(visible=False), gr.update(value="") | |
| chat_history = chat_history + [user_msg(user_message)] | |
| print(f"\n{'β'*60}") | |
| print(f"[QUERY] {user_message}") | |
| print(f"[TIME] {_now_ist()}") | |
| print(f"{'β'*60}") | |
| try: | |
| fs = run_graph(user_message) | |
| route = fs.get("route", "") | |
| score = fs.get("evaluation_score", 0.0) | |
| g_ok = fs.get("guardrail_passed", True) | |
| # History is managed entirely by output_node inside the graph | |
| chat_history = chat_history + [bot_msg(fs.get("response", ""))] | |
| meta = f"**Route:** {route.upper() or 'β'} Β· **Eval:** {score:.2f} Β· **Guardrail:** {'β Passed' if g_ok else 'π« Blocked'}" | |
| return (chat_history, "", format_trace(fs.get("node_log", [])), | |
| meta, gr.update(visible=False), gr.update(value="")) | |
| except HITLPauseException as e: | |
| _pending_hitl_state = e.state | |
| log = e.state.get("node_log", []) + ["βΈ hitl β awaiting approval"] | |
| chat_history = chat_history + [bot_msg("β³ *Awaiting human approval...*")] | |
| meta = f"**Route:** {e.state.get('route','').upper() or 'β'} Β· **Status:** βΈ Pending HITL" | |
| return (chat_history, "", format_trace(log), | |
| meta, gr.update(visible=True), | |
| gr.update(value=f"**Pending response:**\n\n{e.pending_response}")) | |
| except Exception as e: | |
| chat_history = chat_history + [bot_msg(f"β Error: {e}")] | |
| return (chat_history, "", f"β `{e}`", "", gr.update(visible=False), gr.update(value="")) | |
| def handle_approve(chat_history): | |
| global _pending_hitl_state | |
| if not _pending_hitl_state: | |
| return chat_history, "*No trace.*", "", gr.update(visible=False) | |
| fs = resume_graph_after_hitl(_pending_hitl_state, True) | |
| _pending_hitl_state = None | |
| if chat_history and chat_history[-1]["role"] == "assistant": | |
| chat_history = chat_history[:-1] + [bot_msg(fs.get("response", ""))] | |
| score = fs.get("evaluation_score", 0.0) | |
| g_ok = fs.get("guardrail_passed", True) | |
| meta = f"**Route:** {fs.get('route','').upper() or 'β'} Β· **Eval:** {score:.2f} Β· **Guardrail:** {'β Passed' if g_ok else 'π« Blocked'}" | |
| return chat_history, format_trace(fs.get("node_log", []) + ["β hitl approved β output"]), meta, gr.update(visible=False) | |
| def handle_reject(chat_history): | |
| global _pending_hitl_state | |
| _pending_hitl_state = None | |
| if chat_history and chat_history[-1]["role"] == "assistant": | |
| chat_history = chat_history[:-1] + [bot_msg("π« Rejected by reviewer.")] | |
| return chat_history, "β `hitl rejected β END`", "", gr.update(visible=False) | |
| def handle_clear(): | |
| global _pending_hitl_state | |
| _pending_hitl_state = None | |
| return [], "", "*Waiting for a query...*", "", gr.update(visible=False) | |
| from app.frontend.css import CSS | |
| def build_ui(): | |
| with gr.Blocks(title="LangGraph Agent", css=CSS, theme=gr.themes.Soft()) as demo: | |
| # ββ Header βββββββββββββββββββββββββββββββββββββββββββββββββββ | |
| gr.Markdown("## π€ LangGraph Agent") | |
| with gr.Row(equal_height=True): | |
| # ββ Main chat column ββββββββββββββββββββββββββββββββββββββ | |
| with gr.Column(scale=4): | |
| # Chat box | |
| with gr.Group(elem_classes="section-box"): | |
| chatbot = gr.Chatbot( | |
| type="messages", | |
| show_label=False, | |
| height=500, | |
| container=False, | |
| placeholder="Send a message to get started.", | |
| elem_classes="chatbot-block", | |
| ) | |
| # HITL box | |
| with gr.Group(visible=False, elem_classes="hitl-box") as hitl_panel: | |
| hitl_content = gr.Markdown() | |
| gr.Markdown("π **Human review required** β approve or reject before the response is sent.") | |
| with gr.Row(): | |
| approve_btn = gr.Button("β Approve", variant="primary") | |
| reject_btn = gr.Button("β Reject", variant="stop") | |
| # Input box | |
| with gr.Group(elem_classes="section-box"): | |
| with gr.Row(): | |
| user_input = gr.Textbox( | |
| placeholder="Message LangGraph Agent...", | |
| show_label=False, scale=7, lines=1, container=False, | |
| ) | |
| send_btn = gr.Button("Send", variant="primary", scale=1) | |
| clear_btn = gr.Button("π", variant="secondary", scale=0, min_width=44) | |
| meta_display = gr.Markdown("") | |
| # Examples box | |
| with gr.Group(elem_classes="section-box"): | |
| gr.Examples( | |
| examples=[ | |
| ["What is RAG?"], ["What is LangGraph?"], | |
| ["Calculate 25 * 48"], ["Weather in Mumbai?"], | |
| ["Tell me a joke"], ["Explain HITL"], | |
| ], | |
| inputs=user_input, | |
| label="Examples", | |
| ) | |
| # ββ Right sidebar ββββββββββββββββββββββββββββββββββββββββββ | |
| with gr.Column(scale=1): | |
| # Trace box | |
| with gr.Group(elem_classes="section-box"): | |
| gr.Markdown("**β‘ Execution Trace**") | |
| trace_display = gr.Markdown("*Waiting for a query...*") | |
| # Topology box | |
| with gr.Group(elem_classes="section-box"): | |
| gr.Markdown("""**πΊ Graph Topology** | |
| ``` | |
| START β router | |
| ββ rag β llm | |
| ββ tool/general β llm | |
| ββ tool_executor | |
| ββ memory β hitl | |
| ββ evaluation | |
| β ββ retry β llm | |
| β ββ guardrails β output | |
| ββ END | |
| ```""") | |
| # ββ Events βββββββββββββββββββββββββββββββββββββββββββββββββββ | |
| submit_outs = [chatbot, user_input, trace_display, meta_display, hitl_panel, hitl_content] | |
| send_btn.click(fn=handle_submit, inputs=[user_input, chatbot], outputs=submit_outs) | |
| user_input.submit(fn=handle_submit, inputs=[user_input, chatbot], outputs=submit_outs) | |
| hitl_outs = [chatbot, trace_display, meta_display, hitl_panel] | |
| approve_btn.click(fn=handle_approve, inputs=[chatbot], outputs=hitl_outs) | |
| reject_btn.click(fn=handle_reject, inputs=[chatbot], outputs=hitl_outs) | |
| clear_btn.click(fn=handle_clear, outputs=[chatbot, user_input, trace_display, meta_display, hitl_panel]) | |
| return demo | |
| if __name__ == "__main__": | |
| demo = build_ui() | |
| demo.launch(server_name="0.0.0.0", server_port=7860, show_error=True) |