bpm-agent / agents /process_aware_agent.py
limonad's picture
Rename agents/process_aware_agent_old_py to agents/process_aware_agent.py
99fc9f9 verified
"""Simple agent implementation using LangGraph."""
import sys
from pathlib import Path
from typing import Annotated, TypedDict
from langchain_core.messages import BaseMessage, HumanMessage, SystemMessage
from langchain_openai import ChatOpenAI
from langgraph.graph import StateGraph, START, END
from langgraph.graph.message import add_messages
# Add parent directory to path for imports
sys.path.insert(0, str(Path(__file__).parent.parent))
from config.settings import get_settings
from utils.markdown_loader import load_agent_definition, get_system_prompt, load_process_specifications
from tools.summarize import summarize_text
class AgentState(TypedDict):
"""State schema for the agent graph."""
messages: Annotated[list[BaseMessage], add_messages]
agent_name: str
def create_agent_graph(agent_definition_path: Path | None = None):
"""
Create a LangGraph agent from a markdown definition.
Args:
agent_definition_path: Path to the agent definition markdown file.
If None, uses default path.
Returns:
Compiled LangGraph graph
"""
settings = get_settings()
# Load agent definition
if agent_definition_path is None:
agent_definition_path = settings.docs_dir / "agent_definition.md"
agent_def = load_agent_definition(agent_definition_path)
# Load process specifications
process_description, process_constraints = load_process_specifications(settings.docs_dir)
# Generate system prompt with process context
system_prompt = get_system_prompt(
agent_def,
process_description=process_description,
process_constraints=process_constraints
)
# Initialize LLM with tools
llm = ChatOpenAI(
model=settings.openai_model,
temperature=settings.temperature,
max_tokens=settings.max_tokens,
api_key=settings.openai_api_key,
)
# Bind tools to the LLM
tools = [summarize_text]
llm_with_tools = llm.bind_tools(tools)
# Define agent node
def agent_node(state: AgentState) -> AgentState:
"""Process messages through the agent."""
messages = state["messages"]
# Prepend system message if not already present
if not messages or not isinstance(messages[0], SystemMessage):
messages = [SystemMessage(content=system_prompt)] + messages
# Get response from LLM with tools
response = llm_with_tools.invoke(messages)
return {
"messages": [response],
"agent_name": agent_def.get("name", "Assistant Agent"),
}
# Define tool execution node
def tool_node(state: AgentState) -> AgentState:
"""Execute tools if requested by the agent."""
from langgraph.prebuilt import ToolNode
tool_executor = ToolNode(tools)
return tool_executor.invoke(state)
# Routing function to decide if we should use tools
def should_continue(state: AgentState) -> str:
"""Determine if we should continue to tools or end."""
messages = state["messages"]
last_message = messages[-1]
# If there are tool calls, route to tools
if hasattr(last_message, "tool_calls") and last_message.tool_calls:
return "tools"
# Otherwise, end
return "end"
# Build the graph
workflow = StateGraph(AgentState)
# Add nodes
workflow.add_node("agent", agent_node)
workflow.add_node("tools", tool_node)
# Add edges
workflow.add_edge(START, "agent")
workflow.add_conditional_edges(
"agent",
should_continue,
{
"tools": "tools",
"end": END,
}
)
workflow.add_edge("tools", "agent")
# Compile the graph
graph = workflow.compile()
return graph
def run_agent(user_input: str, agent_definition_path: Path | None = None) -> str:
"""
Run the agent with a single user input.
Args:
user_input: User's message
agent_definition_path: Optional path to agent definition
Returns:
Agent's response as a string
"""
graph = create_agent_graph(agent_definition_path)
# Create initial state
initial_state = {
"messages": [HumanMessage(content=user_input)],
"agent_name": "Assistant Agent",
}
# Run the graph
result = graph.invoke(initial_state)
# Extract the last message (agent's response)
last_message = result["messages"][-1]
return last_message.content
# Made with Bob