| """Simple agent implementation using LangGraph.""" |
|
|
| import sys |
| from pathlib import Path |
| from typing import Annotated, TypedDict |
|
|
| from langchain_core.messages import BaseMessage, HumanMessage, SystemMessage |
| from langchain_openai import ChatOpenAI |
| from langgraph.graph import StateGraph, START, END |
| from langgraph.graph.message import add_messages |
|
|
| |
| sys.path.insert(0, str(Path(__file__).parent.parent)) |
|
|
| from config.settings import get_settings |
| from utils.markdown_loader import load_agent_definition, get_system_prompt, load_process_specifications |
| from tools.summarize import summarize_text |
|
|
|
|
| class AgentState(TypedDict): |
| """State schema for the agent graph.""" |
|
|
| messages: Annotated[list[BaseMessage], add_messages] |
| agent_name: str |
|
|
|
|
| def create_agent_graph(agent_definition_path: Path | None = None): |
| """ |
| Create a LangGraph agent from a markdown definition. |
| |
| Args: |
| agent_definition_path: Path to the agent definition markdown file. |
| If None, uses default path. |
| |
| Returns: |
| Compiled LangGraph graph |
| """ |
| settings = get_settings() |
| |
| |
| if agent_definition_path is None: |
| agent_definition_path = settings.docs_dir / "agent_definition.md" |
| |
| agent_def = load_agent_definition(agent_definition_path) |
| |
| |
| process_description, process_constraints = load_process_specifications(settings.docs_dir) |
| |
| |
| system_prompt = get_system_prompt( |
| agent_def, |
| process_description=process_description, |
| process_constraints=process_constraints |
| ) |
| |
| |
| llm = ChatOpenAI( |
| model=settings.openai_model, |
| temperature=settings.temperature, |
| max_tokens=settings.max_tokens, |
| api_key=settings.openai_api_key, |
| ) |
| |
| |
| tools = [summarize_text] |
| llm_with_tools = llm.bind_tools(tools) |
| |
| |
| def agent_node(state: AgentState) -> AgentState: |
| """Process messages through the agent.""" |
| messages = state["messages"] |
| |
| |
| if not messages or not isinstance(messages[0], SystemMessage): |
| messages = [SystemMessage(content=system_prompt)] + messages |
| |
| |
| response = llm_with_tools.invoke(messages) |
| |
| return { |
| "messages": [response], |
| "agent_name": agent_def.get("name", "Assistant Agent"), |
| } |
| |
| |
| def tool_node(state: AgentState) -> AgentState: |
| """Execute tools if requested by the agent.""" |
| from langgraph.prebuilt import ToolNode |
| tool_executor = ToolNode(tools) |
| return tool_executor.invoke(state) |
| |
| |
| def should_continue(state: AgentState) -> str: |
| """Determine if we should continue to tools or end.""" |
| messages = state["messages"] |
| last_message = messages[-1] |
| |
| |
| if hasattr(last_message, "tool_calls") and last_message.tool_calls: |
| return "tools" |
| |
| return "end" |
| |
| |
| workflow = StateGraph(AgentState) |
| |
| |
| workflow.add_node("agent", agent_node) |
| workflow.add_node("tools", tool_node) |
| |
| |
| workflow.add_edge(START, "agent") |
| workflow.add_conditional_edges( |
| "agent", |
| should_continue, |
| { |
| "tools": "tools", |
| "end": END, |
| } |
| ) |
| workflow.add_edge("tools", "agent") |
| |
| |
| graph = workflow.compile() |
| |
| return graph |
|
|
|
|
| def run_agent(user_input: str, agent_definition_path: Path | None = None) -> str: |
| """ |
| Run the agent with a single user input. |
| |
| Args: |
| user_input: User's message |
| agent_definition_path: Optional path to agent definition |
| |
| Returns: |
| Agent's response as a string |
| """ |
| graph = create_agent_graph(agent_definition_path) |
| |
| |
| initial_state = { |
| "messages": [HumanMessage(content=user_input)], |
| "agent_name": "Assistant Agent", |
| } |
| |
| |
| result = graph.invoke(initial_state) |
| |
| |
| last_message = result["messages"][-1] |
| return last_message.content |
|
|
| |