byArMan456 commited on
Commit
314150e
·
verified ·
1 Parent(s): 5032624

Create agent.py

Browse files
Files changed (1) hide show
  1. agent.py +136 -0
agent.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """LangGraph‑powered autonomous agent able to use the *BetterThanMe* toolset.
2
+ Target: GAIA level 1 competency without external API keys.
3
+ Provides `build_graph()` for the evaluation harness.
4
+ """
5
+ from __future__ import annotations
6
+
7
+ import os
8
+ from typing import Any, List, TypedDict
9
+
10
+ from langchain.schema import AIMessage, HumanMessage, SystemMessage
11
+ from langchain_openai import ChatOpenAI
12
+ from langgraph.graph import StateGraph, END
13
+
14
+ from tools import TOOLS
15
+
16
+ # ---------------------------------------------------------------------------
17
+ # Agent state ----------------------------------------------------------------
18
+ # ---------------------------------------------------------------------------
19
+ class AgentState(TypedDict, total=False):
20
+ messages: List[Any]
21
+ tool_calls: list
22
+ needs_tool: bool
23
+
24
+
25
+ # ---------------------------------------------------------------------------
26
+ # LLM setup ------------------------------------------------------------------
27
+ # ---------------------------------------------------------------------------
28
+ MODEL = os.getenv("OPENAI_MODEL", "gpt-3.5-turbo")
29
+ llm = ChatOpenAI(model_name=MODEL, temperature=0)
30
+
31
+ SYSTEM_PROMPT = """
32
+ You are **BetterThanMe**, a tool‑using assistant.
33
+ **Rules**
34
+ 1. Think step‑by‑step *internally* but **never** reveal your reasoning.
35
+ 2. Use JSON function‑call format to invoke tools when external data is required.
36
+ 3. When you are completely confident in the answer, respond with **exactly** one line:
37
+ Final Answer: <answer>
38
+ – where <answer> is a single concise value (number, word, date, URL, etc.).
39
+ – Do **not** add explanations, extra words, or additional lines.
40
+ The automated grader will strip the first 14 characters ('Final Answer: ') to obtain the answer, so formatting must be perfect.
41
+ """
42
+
43
+ # ---------------------------------------------------------------------------
44
+ # LangGraph nodes ------------------------------------------------------------
45
+ # ---------------------------------------------------------------------------
46
+
47
+ def agent_node(state: AgentState) -> AgentState: # type: ignore[override]
48
+ messages = state.get("messages", [])
49
+ if not messages or not isinstance(messages[0], SystemMessage):
50
+ messages = [SystemMessage(content=SYSTEM_PROMPT)] + messages
51
+
52
+ response = llm.invoke(messages, tools=TOOLS)
53
+ messages.append(response)
54
+
55
+ tool_calls = getattr(response, "tool_calls", None)
56
+ needs_tool = bool(tool_calls)
57
+
58
+ return {
59
+ "messages": messages,
60
+ "tool_calls": tool_calls,
61
+ "needs_tool": needs_tool,
62
+ }
63
+
64
+
65
+ def tool_executor_node(state: AgentState) -> AgentState: # type: ignore[override]
66
+ messages = state["messages"]
67
+ tool_calls = state.get("tool_calls", []) or []
68
+
69
+ for call in tool_calls:
70
+ name = call["name"]
71
+ args = call.get("arguments", {})
72
+
73
+ tool = next((t for t in TOOLS if t.name == name), None)
74
+ if tool is None:
75
+ result = f"Tool '{name}' not found."
76
+ else:
77
+ try:
78
+ result = tool.run(**args)
79
+ except Exception as exc: # pylint: disable=broad-except
80
+ result = f"Error running tool: {exc}"
81
+
82
+ messages.append(AIMessage(content=result, name=name))
83
+
84
+ return {
85
+ "messages": messages,
86
+ "needs_tool": False,
87
+ }
88
+
89
+
90
+ # ---------------------------------------------------------------------------
91
+ # Build & compile graph ------------------------------------------------------
92
+ # ---------------------------------------------------------------------------
93
+
94
+ def _compile_graph():
95
+ g = StateGraph(AgentState)
96
+
97
+ g.add_node("agent", agent_node)
98
+ g.add_node("executor", tool_executor_node)
99
+
100
+ g.add_conditional_edges(
101
+ "agent",
102
+ lambda s: s["needs_tool"],
103
+ {True: "executor", False: END},
104
+ )
105
+ g.add_edge("executor", "agent")
106
+ g.set_entry_point("agent")
107
+ return g.compile()
108
+
109
+ _GRAPH = _compile_graph()
110
+
111
+
112
+ def build_graph():
113
+ """Return the compiled LangGraph, conforming to evaluation harness."""
114
+ return _GRAPH
115
+
116
+
117
+ # ---------------------------------------------------------------------------
118
+ # Convenience single‑turn wrapper (not used by evaluation harness)------------
119
+ # ---------------------------------------------------------------------------
120
+
121
+ def chat_agent(user_input: str, history: List[List[str]] | None = None) -> str:
122
+ """Single‑turn helper for interactive Gradio chat UI."""
123
+ history = history or []
124
+
125
+ messages: List[Any] = []
126
+ for user, ai in history:
127
+ messages.append(HumanMessage(content=user))
128
+ messages.append(AIMessage(content=ai))
129
+ messages.append(HumanMessage(content=user_input))
130
+
131
+ final_state: AgentState = _GRAPH.invoke({"messages": messages})
132
+
133
+ for msg in reversed(final_state["messages"]):
134
+ if isinstance(msg, AIMessage):
135
+ return msg.content
136
+ return "(no response)"