Spaces:
Running
Running
| """ | |
| app/nodes/output.py β Final output node | |
| Single source of truth for message history. | |
| Harmful turns: scrub HumanMessage, store nothing β clean history guaranteed. | |
| Safe turns: append AIMessage β LLM gets full context next turn. | |
| This means memory and LLM history are always free of harmful Q&A. | |
| """ | |
| from langchain_core.messages import AIMessage, HumanMessage | |
| from app.state import AgentState | |
| def output_node(state: AgentState) -> AgentState: | |
| log = state.get("node_log", []) + ["output"] | |
| response = state["response"] | |
| messages = list(state["messages"]) | |
| is_harmful = state.get("is_harmful", False) | |
| guardrail_ok = state.get("guardrail_passed", True) | |
| if is_harmful or not guardrail_ok: | |
| # Scrub current HumanMessage β don't let harmful queries linger in history | |
| messages = [m for m in messages | |
| if not (isinstance(m, HumanMessage) and m.content == state["query"])] | |
| print(f"\nπ€ {response}\n") | |
| print("[OUTPUT] β οΈ Harmful turn scrubbed from history.") | |
| else: | |
| # Safe β HumanMessage already in messages via safety_node, add AIMessage | |
| messages = messages + [AIMessage(content=response)] | |
| print(f"\nπ€ {response}\n") | |
| return {**state, "messages": messages, "node_log": log} |