mause123 commited on
Commit
b5610f8
·
1 Parent(s): 445cb8c

Update to RobotPai - LangGraph AI Agent with tools

Browse files
Files changed (4) hide show
  1. agent.py +206 -0
  2. app.py +205 -60
  3. requirements.txt +18 -1
  4. system_prompt.txt +5 -0
agent.py ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """LangGraph Agent"""
2
+ import os
3
+ from dotenv import load_dotenv
4
+ from langgraph.graph import START, StateGraph, MessagesState
5
+ from langgraph.prebuilt import tools_condition
6
+ from langgraph.prebuilt import ToolNode
7
+ from langchain_google_genai import ChatGoogleGenerativeAI
8
+ from langchain_groq import ChatGroq
9
+ from langchain_core.messages import SystemMessage, HumanMessage
10
+ from langchain_core.tools import tool
11
+
12
+ # Optional imports - will be used if available
13
+ try:
14
+ from langchain_community.tools.tavily_search import TavilySearchResults
15
+ TAVILY_AVAILABLE = True
16
+ except ImportError:
17
+ TAVILY_AVAILABLE = False
18
+
19
+ try:
20
+ from langchain_community.document_loaders import WikipediaLoader
21
+ WIKIPEDIA_AVAILABLE = True
22
+ except ImportError:
23
+ WIKIPEDIA_AVAILABLE = False
24
+
25
+ load_dotenv()
26
+
27
+ @tool
28
+ def multiply(a: int, b: int) -> int:
29
+ """Multiply two numbers.
30
+
31
+ Args:
32
+ a: first int
33
+ b: second int
34
+ """
35
+ return a * b
36
+
37
+ @tool
38
+ def add(a: int, b: int) -> int:
39
+ """Add two numbers.
40
+
41
+ Args:
42
+ a: first int
43
+ b: second int
44
+ """
45
+ return a + b
46
+
47
+ @tool
48
+ def subtract(a: int, b: int) -> int:
49
+ """Subtract two numbers.
50
+
51
+ Args:
52
+ a: first int
53
+ b: second int
54
+ """
55
+ return a - b
56
+
57
+ @tool
58
+ def divide(a: int, b: int) -> int:
59
+ """Divide two numbers.
60
+
61
+ Args:
62
+ a: first int
63
+ b: second int
64
+ """
65
+ if b == 0:
66
+ raise ValueError("Cannot divide by zero.")
67
+ return a / b
68
+
69
+ @tool
70
+ def modulus(a: int, b: int) -> int:
71
+ """Get the modulus of two numbers.
72
+
73
+ Args:
74
+ a: first int
75
+ b: second int
76
+ """
77
+ return a % b
78
+
79
+ @tool
80
+ def wiki_search(query: str) -> str:
81
+ """Search Wikipedia for a query and return maximum 2 results.
82
+
83
+ Args:
84
+ query: The search query."""
85
+ if not WIKIPEDIA_AVAILABLE:
86
+ return {"wiki_results": "Wikipedia search is not available. Please install langchain-community to enable this feature."}
87
+
88
+ search_docs = WikipediaLoader(query=query, load_max_docs=2).load()
89
+ formatted_search_docs = "\n\n---\n\n".join(
90
+ [
91
+ f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>'
92
+ for doc in search_docs
93
+ ])
94
+ return {"wiki_results": formatted_search_docs}
95
+
96
+ @tool
97
+ def web_search(query: str) -> str:
98
+ """Search Tavily for a query and return maximum 3 results.
99
+
100
+ Args:
101
+ query: The search query."""
102
+ if not TAVILY_AVAILABLE:
103
+ return {"web_results": "Tavily search is not available. Please install langchain-community to enable this feature."}
104
+
105
+ search_docs = TavilySearchResults(max_results=3).invoke(query=query)
106
+ formatted_search_docs = "\n\n---\n\n".join(
107
+ [
108
+ f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>'
109
+ for doc in search_docs
110
+ ])
111
+ return {"web_results": formatted_search_docs}
112
+
113
+ @tool
114
+ def arvix_search(query: str) -> str:
115
+ """Search Arxiv for a query and return maximum 3 result.
116
+
117
+ Args:
118
+ query: The search query."""
119
+ if not WIKIPEDIA_AVAILABLE: # Using same check since ArxivLoader is also in community
120
+ return {"arvix_results": "Arxiv search is not available. Please install langchain-community to enable this feature."}
121
+
122
+ try:
123
+ from langchain_community.document_loaders import ArxivLoader
124
+ search_docs = ArxivLoader(query=query, load_max_docs=3).load()
125
+ formatted_search_docs = "\n\n---\n\n".join(
126
+ [
127
+ f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content[:1000]}\n</Document>'
128
+ for doc in search_docs
129
+ ])
130
+ return {"arvix_results": formatted_search_docs}
131
+ except ImportError:
132
+ return {"arvix_results": "Arxiv search is not available. Please install langchain-community to enable this feature."}
133
+
134
+ # load the system prompt from the file
135
+ try:
136
+ with open("system_prompt.txt", "r", encoding="utf-8") as f:
137
+ system_prompt = f.read()
138
+ except FileNotFoundError:
139
+ system_prompt = """You are RobotPai, a helpful AI assistant. You can help with calculations, answer questions, and search for information when needed. You have access to various tools including:
140
+ - Basic math operations (add, subtract, multiply, divide, modulus)
141
+ - Web search (if configured)
142
+ - Wikipedia search (if configured)
143
+ - Arxiv search (if configured)
144
+
145
+ Please be helpful and provide accurate information."""
146
+
147
+ # System message
148
+ sys_msg = SystemMessage(content=system_prompt)
149
+
150
+
151
+
152
+ tools = [
153
+ multiply,
154
+ add,
155
+ subtract,
156
+ divide,
157
+ modulus,
158
+ wiki_search,
159
+ web_search,
160
+ arvix_search,
161
+ ]
162
+
163
+ # Build graph function
164
+ def build_graph(provider: str = "google"):
165
+ """Build the graph"""
166
+ # Load environment variables from .env file
167
+ if provider == "google":
168
+ # Google Gemini
169
+ llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash", temperature=0)
170
+ elif provider == "groq":
171
+ # Groq https://console.groq.com/docs/models
172
+ llm = ChatGroq(model="qwen-qwq-32b", temperature=0) # optional : qwen-qwq-32b gemma2-9b-it
173
+ else:
174
+ raise ValueError("Invalid provider. Choose 'google' or 'groq'.")
175
+
176
+ # Bind tools to LLM
177
+ llm_with_tools = llm.bind_tools(tools)
178
+
179
+ # Node
180
+ def assistant(state: MessagesState):
181
+ """Assistant node"""
182
+ return {"messages": [llm_with_tools.invoke([sys_msg] + state["messages"])]}
183
+
184
+ builder = StateGraph(MessagesState)
185
+ builder.add_node("assistant", assistant)
186
+ builder.add_node("tools", ToolNode(tools))
187
+ builder.add_edge(START, "assistant")
188
+ builder.add_conditional_edges(
189
+ "assistant",
190
+ tools_condition,
191
+ )
192
+ builder.add_edge("tools", "assistant")
193
+
194
+ # Compile graph
195
+ return builder.compile()
196
+
197
+ # test
198
+ if __name__ == "__main__":
199
+ question = "When was a picture of St. Thomas Aquinas first added to the Wikipedia page on the Principle of double effect?"
200
+ # Build the graph
201
+ graph = build_graph(provider="groq")
202
+ # Run the graph
203
+ messages = [HumanMessage(content=question)]
204
+ messages = graph.invoke({"messages": messages})
205
+ for m in messages["messages"]:
206
+ m.pretty_print()
app.py CHANGED
@@ -1,64 +1,209 @@
 
 
 
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
-
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
-
9
-
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
-
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
-
26
- messages.append({"role": "user", "content": message})
27
-
28
- response = ""
29
-
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
-
42
-
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- demo = gr.ChatInterface(
47
- respond,
48
- additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
59
- ],
60
- )
61
 
62
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63
  if __name__ == "__main__":
64
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Basic Agent Evaluation Runner"""
2
+ import os
3
+ import inspect
4
  import gradio as gr
5
+ import requests
6
+ import pandas as pd
7
+ from langchain_core.messages import HumanMessage
8
+ from agent import build_graph
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
 
11
+
12
+ # (Keep Constants as is)
13
+ # --- Constants ---
14
+ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
15
+
16
+ # --- Basic Agent Definition ---
17
+ # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
18
+
19
+
20
+ class BasicAgent:
21
+ """A langgraph agent."""
22
+ def __init__(self):
23
+ print("BasicAgent initialized.")
24
+ self.graph = build_graph()
25
+
26
+ def __call__(self, question: str) -> str:
27
+ print(f"Agent received question (first 50 chars): {question[:50]}...")
28
+ # Wrap the question in a HumanMessage from langchain_core
29
+ messages = [HumanMessage(content=question)]
30
+ messages = self.graph.invoke({"messages": messages})
31
+ answer = messages['messages'][-1].content
32
+ return answer[14:]
33
+
34
+
35
+ def run_and_submit_all( profile: gr.OAuthProfile | None):
36
+ """
37
+ Fetches all questions, runs the BasicAgent on them, submits all answers,
38
+ and displays the results.
39
+ """
40
+ # --- Determine HF Space Runtime URL and Repo URL ---
41
+ space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
42
+
43
+ if profile:
44
+ username= f"{profile.username}"
45
+ print(f"User logged in: {username}")
46
+ else:
47
+ print("User not logged in.")
48
+ return "Please Login to Hugging Face with the button.", None
49
+
50
+ api_url = DEFAULT_API_URL
51
+ questions_url = f"{api_url}/questions"
52
+ submit_url = f"{api_url}/submit"
53
+
54
+ # 1. Instantiate Agent ( modify this part to create your agent)
55
+ try:
56
+ agent = BasicAgent()
57
+ except Exception as e:
58
+ print(f"Error instantiating agent: {e}")
59
+ return f"Error initializing agent: {e}", None
60
+ # In the case of an app running as a hugging Face space, this link points toward your codebase ( usefull for others so please keep it public)
61
+ agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
62
+ print(agent_code)
63
+
64
+ # 2. Fetch Questions
65
+ print(f"Fetching questions from: {questions_url}")
66
+ try:
67
+ response = requests.get(questions_url, timeout=15)
68
+ response.raise_for_status()
69
+ questions_data = response.json()
70
+ if not questions_data:
71
+ print("Fetched questions list is empty.")
72
+ return "Fetched questions list is empty or invalid format.", None
73
+ print(f"Fetched {len(questions_data)} questions.")
74
+ except requests.exceptions.RequestException as e:
75
+ print(f"Error fetching questions: {e}")
76
+ return f"Error fetching questions: {e}", None
77
+ except requests.exceptions.JSONDecodeError as e:
78
+ print(f"Error decoding JSON response from questions endpoint: {e}")
79
+ print(f"Response text: {response.text[:500]}")
80
+ return f"Error decoding server response for questions: {e}", None
81
+ except Exception as e:
82
+ print(f"An unexpected error occurred fetching questions: {e}")
83
+ return f"An unexpected error occurred fetching questions: {e}", None
84
+
85
+ # 3. Run your Agent
86
+ results_log = []
87
+ answers_payload = []
88
+ print(f"Running agent on {len(questions_data)} questions...")
89
+ for item in questions_data:
90
+ task_id = item.get("task_id")
91
+ question_text = item.get("question")
92
+ if not task_id or question_text is None:
93
+ print(f"Skipping item with missing task_id or question: {item}")
94
+ continue
95
+ try:
96
+ submitted_answer = agent(question_text)
97
+ answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
98
+ results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
99
+ except Exception as e:
100
+ print(f"Error running agent on task {task_id}: {e}")
101
+ results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
102
+
103
+ if not answers_payload:
104
+ print("Agent did not produce any answers to submit.")
105
+ return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
106
+
107
+ # 4. Prepare Submission
108
+ submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
109
+ status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
110
+ print(status_update)
111
+
112
+ # 5. Submit
113
+ print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
114
+ try:
115
+ response = requests.post(submit_url, json=submission_data, timeout=60)
116
+ response.raise_for_status()
117
+ result_data = response.json()
118
+ final_status = (
119
+ f"Submission Successful!\n"
120
+ f"User: {result_data.get('username')}\n"
121
+ f"Overall Score: {result_data.get('score', 'N/A')}% "
122
+ f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
123
+ f"Message: {result_data.get('message', 'No message received.')}"
124
+ )
125
+ print("Submission successful.")
126
+ results_df = pd.DataFrame(results_log)
127
+ return final_status, results_df
128
+ except requests.exceptions.HTTPError as e:
129
+ error_detail = f"Server responded with status {e.response.status_code}."
130
+ try:
131
+ error_json = e.response.json()
132
+ error_detail += f" Detail: {error_json.get('detail', e.response.text)}"
133
+ except requests.exceptions.JSONDecodeError:
134
+ error_detail += f" Response: {e.response.text[:500]}"
135
+ status_message = f"Submission Failed: {error_detail}"
136
+ print(status_message)
137
+ results_df = pd.DataFrame(results_log)
138
+ return status_message, results_df
139
+ except requests.exceptions.Timeout:
140
+ status_message = "Submission Failed: The request timed out."
141
+ print(status_message)
142
+ results_df = pd.DataFrame(results_log)
143
+ return status_message, results_df
144
+ except requests.exceptions.RequestException as e:
145
+ status_message = f"Submission Failed: Network error - {e}"
146
+ print(status_message)
147
+ results_df = pd.DataFrame(results_log)
148
+ return status_message, results_df
149
+ except Exception as e:
150
+ status_message = f"An unexpected error occurred during submission: {e}"
151
+ print(status_message)
152
+ results_df = pd.DataFrame(results_log)
153
+ return status_message, results_df
154
+
155
+
156
+ # --- Build Gradio Interface using Blocks ---
157
+ with gr.Blocks() as demo:
158
+ gr.Markdown("# Basic Agent Evaluation Runner")
159
+ gr.Markdown(
160
+ """
161
+ **Instructions:**
162
+
163
+ 1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ...
164
+ 2. Log in to your Hugging Face account using the button below. This uses your HF username for submission.
165
+ 3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
166
+
167
+ ---
168
+ **Disclaimers:**
169
+ Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions).
170
+ This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a seperate action or even to answer the questions in async.
171
+ """
172
+ )
173
+
174
+ gr.LoginButton()
175
+
176
+ run_button = gr.Button("Run Evaluation & Submit All Answers")
177
+
178
+ status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
179
+ # Removed max_rows=10 from DataFrame constructor
180
+ results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
181
+
182
+ run_button.click(
183
+ fn=run_and_submit_all,
184
+ outputs=[status_output, results_table]
185
+ )
186
+
187
  if __name__ == "__main__":
188
+ print("\n" + "-"*30 + " App Starting " + "-"*30)
189
+ # Check for SPACE_HOST and SPACE_ID at startup for information
190
+ space_host_startup = os.getenv("SPACE_HOST")
191
+ space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup
192
+
193
+ if space_host_startup:
194
+ print(f"✅ SPACE_HOST found: {space_host_startup}")
195
+ print(f" Runtime URL should be: https://{space_host_startup}.hf.space")
196
+ else:
197
+ print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
198
+
199
+ if space_id_startup: # Print repo URLs if SPACE_ID is found
200
+ print(f"✅ SPACE_ID found: {space_id_startup}")
201
+ print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
202
+ print(f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main")
203
+ else:
204
+ print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.")
205
+
206
+ print("-"*(60 + len(" App Starting ")) + "\n")
207
+
208
+ print("Launching Gradio Interface for Basic Agent Evaluation...")
209
+ demo.launch(debug=True, share=False)
requirements.txt CHANGED
@@ -1 +1,18 @@
1
- huggingface_hub==0.25.2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ gradio
2
+ requests
3
+ langchain
4
+ langchain-community
5
+ langchain-core
6
+ langchain-google-genai
7
+ langchain-huggingface
8
+ langchain-groq
9
+ langchain-tavily
10
+ langchain-chroma
11
+ langgraph
12
+ huggingface_hub
13
+ supabase
14
+ arxiv
15
+ pymupdf
16
+ wikipedia
17
+ pgvector
18
+ python-dotenv
system_prompt.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ You are a helpful assistant tasked with answering questions using a set of tools.
2
+ Now, I will ask you a question. Report your thoughts, and finish your answer with the following template:
3
+ FINAL ANSWER: [YOUR FINAL ANSWER].
4
+ YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings. If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise. If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise. If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string.
5
+ Your answer should only start with "FINAL ANSWER: ", then follows with the answer.