serverdaun commited on
Commit
67a532b
·
1 Parent(s): 386e327

add test version of the agent

Browse files
Files changed (5) hide show
  1. agent.py +58 -0
  2. app.py +16 -5
  3. config.py +7 -1
  4. requirements.txt +13 -1
  5. system_prompt.yaml +11 -0
agent.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import TypedDict, Annotated
3
+ from dotenv import load_dotenv
4
+ from langgraph.graph.message import add_messages
5
+ from langchain_core.messages import AnyMessage, HumanMessage, AIMessage
6
+ from langgraph.prebuilt import ToolNode, tools_condition
7
+ from langgraph.graph import START, StateGraph, MessagesState
8
+ from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace
9
+ from tools import wiki_search, tavily_search, arxiv_search, add, subtract, multiply, divide, power, sqrt, modulus
10
+
11
+
12
+ load_dotenv()
13
+ HF_TOKEN = os.getenv("HF_TOKEN")
14
+
15
+ TOOLS = [
16
+ wiki_search,
17
+ tavily_search,
18
+ arxiv_search,
19
+ add,
20
+ subtract,
21
+ multiply,
22
+ divide,
23
+ power,
24
+ sqrt,
25
+ modulus
26
+ ]
27
+
28
+ def build_agent():
29
+ # Define llm from Hugging Face
30
+ llm = HuggingFaceEndpoint(
31
+ repo_id="Qwen/Qwen2.5-Coder-32B-Instruct",
32
+ huggingfacehub_api_token=HF_TOKEN
33
+ )
34
+
35
+ # Define chat interface and the tools
36
+ chat = ChatHuggingFace(llm=llm, verbose=True)
37
+ chat_w_tools = chat.bind_tools(TOOLS)
38
+
39
+ # Node
40
+ def assistant(state: MessagesState):
41
+ """Assistant node"""
42
+ return {"messages": [chat_w_tools.invoke(state["messages"])]}
43
+
44
+
45
+ builder = StateGraph(MessagesState)
46
+
47
+ builder.add_node("assistant", assistant)
48
+ builder.add_node("tools", ToolNode(TOOLS))
49
+
50
+ builder.add_edge(START, "assistant")
51
+ builder.add_conditional_edges(
52
+ "assistant",
53
+ tools_condition,
54
+ )
55
+ builder.add_edge("tools", "assistant")
56
+
57
+ # Compile graph
58
+ return builder.compile()
app.py CHANGED
@@ -3,6 +3,9 @@ import gradio as gr
3
  import requests
4
  import inspect
5
  import pandas as pd
 
 
 
6
 
7
  # (Keep Constants as is)
8
  # --- Constants ---
@@ -12,12 +15,19 @@ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
12
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
13
  class BasicAgent:
14
  def __init__(self):
 
15
  print("BasicAgent initialized.")
16
  def __call__(self, question: str) -> str:
17
- print(f"Agent received question (first 50 chars): {question[:50]}...")
18
- fixed_answer = "This is a default answer."
19
- print(f"Agent returning fixed answer: {fixed_answer}")
20
- return fixed_answer
 
 
 
 
 
 
21
 
22
  def run_and_submit_all( profile: gr.OAuthProfile | None):
23
  """
@@ -25,7 +35,8 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
25
  and displays the results.
26
  """
27
  # --- Determine HF Space Runtime URL and Repo URL ---
28
- space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
 
29
 
30
  if profile:
31
  username= f"{profile.username}"
 
3
  import requests
4
  import inspect
5
  import pandas as pd
6
+ from agent import build_agent
7
+ from config import SYSTEM_PROMPT, SPACE_ID
8
+ from langchain_core.messages import SystemMessage, HumanMessage
9
 
10
  # (Keep Constants as is)
11
  # --- Constants ---
 
15
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
16
  class BasicAgent:
17
  def __init__(self):
18
+ self.agent = build_agent()
19
  print("BasicAgent initialized.")
20
  def __call__(self, question: str) -> str:
21
+ messages = [
22
+ SystemMessage(content=SYSTEM_PROMPT),
23
+ HumanMessage(content=question)
24
+ ]
25
+ messages = self.graph.invoke({"messages": messages})
26
+
27
+ answer = messages['messages'][-1].content
28
+ final_answer = answer.split("FINAL ANSWER: ")[-1].strip()
29
+
30
+ return final_answer
31
 
32
  def run_and_submit_all( profile: gr.OAuthProfile | None):
33
  """
 
35
  and displays the results.
36
  """
37
  # --- Determine HF Space Runtime URL and Repo URL ---
38
+ # space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
39
+ space_id = SPACE_ID
40
 
41
  if profile:
42
  username= f"{profile.username}"
config.py CHANGED
@@ -1,7 +1,13 @@
1
  import os
 
2
  from dotenv import load_dotenv
3
 
4
 
5
  load_dotenv()
6
 
7
- TAVILY_API_KEY = os.getenv("TAVILY_API_KEY")
 
 
 
 
 
 
1
  import os
2
+ import yaml
3
  from dotenv import load_dotenv
4
 
5
 
6
  load_dotenv()
7
 
8
+ TAVILY_API_KEY = os.getenv("TAVILY_API_KEY")
9
+ SPACE_ID = os.getenv("SPACE_ID")
10
+
11
+ with open("system_prompt.yaml", "r") as f:
12
+ SYSTEM_PROMPT = yaml.safe_load(f)
13
+ SYSTEM_PROMPT = SYSTEM_PROMPT["system_prompt"]
requirements.txt CHANGED
@@ -1,2 +1,14 @@
1
  gradio
2
- requests
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  gradio
2
+ requests
3
+ pandas
4
+ langchain
5
+ python-dotenv
6
+ langgraph
7
+ langchain_openai
8
+ langchain_community
9
+ langchain_tavily
10
+ wikipedia
11
+ pymupdf
12
+ arxiv
13
+ flake8
14
+ langchain_huggingface
system_prompt.yaml ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ system_prompt: |
2
+ You are a general AI assistant. I will ask you a question. Report your thoughts, and
3
+ finish your answer with the following template: FINAL ANSWER: [YOUR FINAL ANSWER].
4
+ YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated
5
+ list of numbers and/or strings.
6
+ If you are asked for a number, don't use comma to write your number neither use units such as $ or
7
+ percent sign unless specified otherwise.
8
+ If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the
9
+ digits in plain text unless specified otherwise.
10
+ If you are asked for a comma separated list, apply the above rules depending of whether the element
11
+ to be put in the list is a number or a string