Spaces:
Sleeping
Sleeping
File size: 1,747 Bytes
a759596 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 | from langchain_core.prompts import ChatPromptTemplate
from langgraph.graph import StateGraph, END, MessagesState
from langchain_openai import ChatOpenAI
from langgraph.prebuilt import ToolNode
from langchain_core.messages import HumanMessage
from dotenv import load_dotenv
from tools import transfer_call, leave_message, go_to
from prompts import system_prompt, system_prompt_original
load_dotenv()
llm = ChatOpenAI(
model="gpt-4.1",
temperature=0.3,
)
# 1. Define the tools
tools = [transfer_call, leave_message, go_to]
tool_node = ToolNode(tools)
# 2. Create the graph
graph = StateGraph(MessagesState)
# 3. Define a new prompt
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
system_prompt_original,
),
("placeholder", "{messages}"),
]
)
# 4. Define the agent
agent = prompt | llm.bind_tools(tools)
# 5. Define the nodes
def agent_node(state: MessagesState):
response = agent.invoke(state)
return {"messages": [response]}
# 6. Define the edges
def should_continue(state: MessagesState):
last_message = state["messages"][-1]
if last_message.tool_calls:
return "continue"
return "end"
# 7. Build the graph
graph.add_node("agent", agent_node)
graph.add_node("action", tool_node)
graph.set_entry_point("agent")
graph.add_conditional_edges(
"agent",
should_continue,
{
"continue": "action",
"end": END,
},
)
graph.add_edge("action", "agent")
# 8. Compile the graph
app = graph.compile()
# This is a simplified conversation loop for demonstration.
def get_agent_response(conversation: list):
response = app.invoke({"messages": conversation})
return response["messages"][len(conversation) :]
|