Spaces:
Running
Running
| """ | |
| app/nodes/memory.py — CHECKPOINT 5: Memory | |
| Only summarizes safe, on-topic conversation turns. | |
| Harmful turns are never in messages (scrubbed by output_node). | |
| Memory summary is topic-neutral — no harmful context bleeds through. | |
| """ | |
| from langchain_core.messages import HumanMessage, AIMessage | |
| from app.state import AgentState | |
| from app.utils.llm import llm | |
| SUMMARY_THRESHOLD = 6 # min messages before summarizing | |
| def memory_node(state: AgentState) -> AgentState: | |
| log = state.get("node_log", []) + ["memory"] | |
| if state.get("is_harmful"): | |
| return {**state, "node_log": log} | |
| clean = [ | |
| m for m in state["messages"] | |
| if isinstance(m, HumanMessage) | |
| or (isinstance(m, AIMessage) and not getattr(m, "tool_calls", [])) | |
| ] | |
| if len(clean) < SUMMARY_THRESHOLD: | |
| return {**state, "node_log": log} | |
| recent_text = "\n".join( | |
| f"{'User' if isinstance(m, HumanMessage) else 'Assistant'}: {m.content[:400]}" | |
| for m in clean[-SUMMARY_THRESHOLD:] | |
| ) | |
| try: | |
| summary = llm.invoke([HumanMessage(content=( | |
| "Summarise this conversation in 2-3 sentences.\n" | |
| "Include ONLY factual topics discussed (concepts, tools, questions answered).\n" | |
| "Do NOT include any violent, harmful, or sensitive content in the summary.\n" | |
| "If the conversation contains harmful topics, summarise only the safe parts.\n\n" | |
| + recent_text | |
| ))]).content | |
| print(f"[MEMORY] Summary updated.") | |
| print(f"[MEMORY] Summary : {summary}") | |
| return {**state, "memory_summary": summary, "node_log": log} | |
| except Exception as e: | |
| print(f"[MEMORY] Summarisation failed: {e}") | |
| return {**state, "node_log": log} |