yoda1976 commited on
Commit
9319e6f
·
1 Parent(s): 78e2723

add the code for agent

Browse files
Files changed (6) hide show
  1. .env +2 -0
  2. __init__.py +0 -0
  3. app.py +21 -3
  4. poetry.lock +0 -0
  5. pyproject.toml +10 -2
  6. tools.py +66 -7
.env ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ SERPAPI_KEY = "046aa74b206478f196088246569b77000619bcae1c5ccda5d0143ce274a932e1"
2
+ OPENAI_API_KEY = "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"
__init__.py ADDED
File without changes
app.py CHANGED
@@ -3,7 +3,10 @@ import gradio as gr
3
  import requests
4
  import inspect
5
  import pandas as pd
6
-
 
 
 
7
  # (Keep Constants as is)
8
  # --- Constants ---
9
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
@@ -12,12 +15,27 @@ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
12
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
13
  class BasicAgent:
14
  def __init__(self):
 
 
 
 
 
 
 
 
 
15
  print("BasicAgent initialized.")
 
16
  def __call__(self, question: str) -> str:
 
17
  print(f"Agent received question (first 50 chars): {question[:50]}...")
18
- fixed_answer = "This is a default answer."
 
 
 
19
  print(f"Agent returning fixed answer: {fixed_answer}")
20
- return fixed_answer
 
21
 
22
  def run_and_submit_all( profile: gr.OAuthProfile | None):
23
  """
 
3
  import requests
4
  import inspect
5
  import pandas as pd
6
+ from langgraph.prebuilt import ToolNode, tools_condition
7
+ from langchain_core.messages import HumanMessage
8
+ from tools import assistant, AgentState, tools
9
+ from langgraph.graph import StateGraph, START
10
  # (Keep Constants as is)
11
  # --- Constants ---
12
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
 
15
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
16
  class BasicAgent:
17
  def __init__(self):
18
+ builder = StateGraph(AgentState)
19
+ builder.add_node("assistant", assistant)
20
+ builder.add_node("tools", ToolNode(tools))
21
+ builder.add_edge(START, "assistant")
22
+ builder.add_conditional_edges("assistant", tools_condition)
23
+ builder.add_edge("tools", "assistant")
24
+
25
+ self.agent = builder.compile()
26
+
27
  print("BasicAgent initialized.")
28
+
29
  def __call__(self, question: str) -> str:
30
+
31
  print(f"Agent received question (first 50 chars): {question[:50]}...")
32
+
33
+ human_message = [HumanMessage(content = question)]
34
+ fixed_answer = self.agent.invoke({"messages": human_message})
35
+
36
  print(f"Agent returning fixed answer: {fixed_answer}")
37
+ answer = fixed_answer["messages"][-1].content
38
+ return answer[14:]
39
 
40
  def run_and_submit_all( profile: gr.OAuthProfile | None):
41
  """
poetry.lock ADDED
The diff for this file is too large to render. See raw diff
 
pyproject.toml CHANGED
@@ -3,7 +3,7 @@ name = "poetry-gaia"
3
  version = "0.1.0"
4
  description = "GAIA Benchmarking"
5
  authors = [
6
- {name = "sharath", email = ""}
7
  ]
8
  readme = "README.md"
9
  requires-python = ">=3.9"
@@ -12,7 +12,15 @@ dependencies = [
12
  "requests",
13
  "pandas",
14
  "numpy",
15
- "langgraph"
 
 
 
 
 
 
 
 
16
  ]
17
 
18
  [build-system]
 
3
  version = "0.1.0"
4
  description = "GAIA Benchmarking"
5
  authors = [
6
+ {name = "sharath"}
7
  ]
8
  readme = "README.md"
9
  requires-python = ">=3.9"
 
12
  "requests",
13
  "pandas",
14
  "numpy",
15
+ "langgraph",
16
+ "openai",
17
+ "tqdm",
18
+ "pydantic",
19
+ "langchain",
20
+ "langchain-openai",
21
+ "serpapi",
22
+ "python-dotenv",
23
+ "google-search-results"
24
  ]
25
 
26
  [build-system]
tools.py CHANGED
@@ -1,25 +1,84 @@
1
- from typing import List, TypedDict, Annotated, Optional
2
  from langchain_openai import ChatOpenAI
3
  from langchain_core.messages import SystemMessage, HumanMessage, AnyMessage
4
  from langgraph.graph.message import add_messages
5
- from langgraph.prebuilt import ToolNode, tools_condition
 
 
6
 
 
7
  class AgentState(TypedDict):
8
  """Agent state to be passed to the tool."""
9
  messages: Annotated[List[AnyMessage], add_messages]
10
 
11
- def add(a: int, b: int) -> int:
12
  """Add two numbers."""
13
  return a + b
14
- def subtract(a: int, b: int) -> int:
15
  """Subtract two numbers."""
16
  return a - b
17
- def multiply(a: int, b: int) -> int:
18
  """Multiply two numbers."""
19
  return a * b
20
- def divide(a: int, b: int) -> Optional[float]:
21
  """Divide two numbers."""
22
  if b == 0:
23
  return None
24
  return a / b
25
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, TypedDict, Annotated, Optional, Dict, Union
2
  from langchain_openai import ChatOpenAI
3
  from langchain_core.messages import SystemMessage, HumanMessage, AnyMessage
4
  from langgraph.graph.message import add_messages
5
+ from serpapi import GoogleSearch
6
+ from dotenv import load_dotenv
7
+ import os
8
 
9
+ load_dotenv()
10
  class AgentState(TypedDict):
11
  """Agent state to be passed to the tool."""
12
  messages: Annotated[List[AnyMessage], add_messages]
13
 
14
+ def add(a: Union[float , int], b: Union[float , int]) -> Union[float , int]:
15
  """Add two numbers."""
16
  return a + b
17
+ def subtract(a: Union[float , int], b: Union[float , int]) -> Union[float , int]:
18
  """Subtract two numbers."""
19
  return a - b
20
+ def multiply(a: Union[float , int], b: Union[float , int]) -> Union[float , int]:
21
  """Multiply two numbers."""
22
  return a * b
23
+ def divide(a: Union[float , int], b: Union[float , int]) -> Union[float , int , None]:
24
  """Divide two numbers."""
25
  if b == 0:
26
  return None
27
  return a / b
28
+
29
+ def web_search(query: str) -> str:
30
+ """Perform a web search using SerpAPI."""
31
+ params = {
32
+ "engine": "google",
33
+ "q": query,
34
+ "api_key": os.getenv("SERPAPI_API_KEY"),
35
+ "num": 5
36
+ }
37
+ search = GoogleSearch(params)
38
+ results = search.get_dict()["organic_results"]
39
+ context = "\n---\n".join([
40
+ "Title: " + result['title'] + "\nLink: " + result['link'] + "\nSnippet: " + result.get('snippet', 'No snippet available')
41
+ for result in results if 'title' in result and 'link' in result
42
+ ]
43
+ )
44
+ return context if context else "No results found."
45
+
46
+ llm = ChatOpenAI(model = "gpt-4o")
47
+ tools = [add, subtract, divide, web_search]
48
+ llm_with_tools = llm.bind_tools(tools, parallel_tool_calls = False)
49
+
50
+ def assistant(state: AgentState) -> Dict:
51
+
52
+ system_message = """
53
+ You are a helpful assistant tasked with answering questions using a set of tools.
54
+ Now, I will ask you a question. Report your thoughts, and finish your answer with the following template:
55
+ FINAL ANSWER: [YOUR FINAL ANSWER].
56
+ YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings. If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise. If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise. If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string.
57
+ Your answer should only start with "FINAL ANSWER: ", then follows with the answer.
58
+ """
59
+ tools_description = """
60
+
61
+ You have the following tools available to perform actions
62
+
63
+ websearch(query: str) -> str:
64
+ Args:
65
+ query: Search query
66
+ Returns:
67
+ A string containing 5 relevant search results
68
+
69
+ add(a: Union[float , int], b: Union[float , int]) -> Union[float , int]:
70
+ Add two numbers
71
+
72
+ subtract(a: Union[float , int], b: Union[float , int]) -> Union[float , int]:
73
+ Subtract two numbers
74
+
75
+ multiply(a: Union[float , int], b: Union[float , int]) -> Union[float , int]:
76
+ Multiply two numbers
77
+
78
+ divide(a: Union[float , int], b: Union[float , int]) -> Union[float , int , None]:
79
+ Divide two numbers
80
+ """
81
+
82
+ sys_msg = SystemMessage(content=system_message + tools_description)
83
+
84
+ return {"messages": [llm_with_tools.invoke([sys_msg] + state["messages"])]}