Spaces:
Configuration error
Configuration error
Commit
·
6b5a8ab
1
Parent(s):
0693e1a
build ReActAgent class
Browse files- react_agent.py +38 -0
react_agent.py
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from dotenv import load_dotenv
|
| 3 |
+
from langchain_core.messages import HumanMessage
|
| 4 |
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
| 5 |
+
from langgraph.prebuilt import create_react_agent
|
| 6 |
+
from custom_tools import custom_tools
|
| 7 |
+
|
| 8 |
+
class ReActAgent:
|
| 9 |
+
def __init__(self):
|
| 10 |
+
load_dotenv()
|
| 11 |
+
os.environ["GOOGLE_API_KEY"] = os.getenv("GOOGLE")
|
| 12 |
+
# Initialize your LLM
|
| 13 |
+
llm = ChatGoogleGenerativeAI(
|
| 14 |
+
model="gemini-2.5-flash",
|
| 15 |
+
temperature=0,
|
| 16 |
+
max_retries=5
|
| 17 |
+
)
|
| 18 |
+
# Optional: you can set a System prompt template here
|
| 19 |
+
sys_prompt = ""
|
| 20 |
+
# Build the ReAct agent
|
| 21 |
+
self.agent = create_react_agent(
|
| 22 |
+
model=llm,
|
| 23 |
+
tools=custom_tools,
|
| 24 |
+
prompt="" # you can define a PromptTemplate if needed
|
| 25 |
+
)
|
| 26 |
+
print("ReActAgent initialized.")
|
| 27 |
+
|
| 28 |
+
def __call__(self, question: str) -> str:
|
| 29 |
+
# Wrap question in HumanMessage to match React expectations
|
| 30 |
+
input_msg = HumanMessage(content=question)
|
| 31 |
+
# Invoke the agent; returns a stream or single response
|
| 32 |
+
out = self.agent.invoke({"messages": [input_msg]})
|
| 33 |
+
# The last message contains the agent's reply
|
| 34 |
+
reply = out["messages"][-1].content
|
| 35 |
+
# Optionally, strip out “Final Answer:” headers
|
| 36 |
+
if "Final Answer:" in reply:
|
| 37 |
+
reply = reply.split("Final Answer:")[-1].strip()
|
| 38 |
+
return reply
|