Tomkuijpers2232 commited on
Commit
a1baab2
·
verified ·
1 Parent(s): 922ff2c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +213 -151
app.py CHANGED
@@ -1,163 +1,225 @@
1
  import os
2
- from dotenv import load_dotenv
3
- from typing import TypedDict, Annotated
4
- from langgraph.graph import START, StateGraph, MessagesState
5
- from langgraph.graph.message import add_messages
6
- from langchain_core.messages import AnyMessage, HumanMessage, AIMessage, SystemMessage
7
- from langgraph.prebuilt import ToolNode
8
- from langgraph.graph import START, StateGraph
9
- from langgraph.prebuilt import tools_condition
10
- from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace
11
- from langchain_core.tools import tool
12
- from langchain_community.document_loaders import WikipediaLoader
13
- from langchain_google_genai import ChatGoogleGenerativeAI
14
- from langchain_tavily import TavilySearch
15
  import json
16
 
17
- load_dotenv()
 
 
18
 
19
- # ReAct System Prompt
20
- REACT_SYSTEM_PROMPT = """You are a general AI assistant. I will ask you a question. Report your thoughts, and finish your answer with the following template: FINAL ANSWER: [YOUR FINAL ANSWER]. YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings. If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise. If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise. If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string.w
21
- """
22
-
23
- @tool
24
- def multiply(a:int, b:int) -> int:
25
- """
26
- Multiply two numbers
27
- """
28
- return a * b
29
-
30
- @tool
31
- def add(a:int, b:int) -> int:
32
- """
33
- Add two numbers
34
- """
35
- return a + b
36
-
37
- @tool
38
- def subtract(a:int, b:int) -> int:
39
- """
40
- Subtract two numbers
41
- """
42
- return a - b
43
-
44
- @tool
45
- def divide(a:int, b:int) -> int:
46
- """
47
- Divide two numbers
48
- """
49
- return a / b
50
-
51
- @tool
52
- def wikidata_search(query: str) -> str:
53
- """
54
- Search for information on Wikipedia and return maximum 2 results.
55
-
56
- Args:
57
- query: The search query.
58
- """
59
- loader = WikipediaLoader(query=query, load_max_docs=2)
60
- docs = loader.load()
61
- formatted_search_docs = "\n\n---\n\n".join(
62
- [
63
- f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>'
64
- for doc in docs
65
- ])
66
- return {"wiki_results": formatted_search_docs}
67
-
68
- # Initialize Tavily Search Tool
69
- tavily_search_tool = TavilySearch(
70
- max_results=3,
71
- topic="general",
72
- )
73
-
74
- tools = [multiply, add, subtract, divide, wikidata_search, tavily_search_tool]
75
-
76
- def build_graph():
77
- llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash", api_key=os.getenv("GOOGLE_API_KEY"))
78
- llm_with_tools = llm.bind_tools(tools)
79
-
80
- def agent_node(state: MessagesState) -> MessagesState:
81
- """This is the agent node with ReAct methodology"""
82
- messages = state["messages"]
83
-
84
- # Add system prompt if not already present
85
- if not messages or not isinstance(messages[0], SystemMessage):
86
- messages = [SystemMessage(content=REACT_SYSTEM_PROMPT)] + messages
87
-
88
- return {"messages": [llm_with_tools.invoke(messages)]}
89
-
90
-
91
-
92
- builder = StateGraph(MessagesState)
93
- builder.add_node("agent", agent_node)
94
- builder.add_node("tools", ToolNode(tools))
95
-
96
-
97
- builder.add_edge(START, "agent")
98
- builder.add_conditional_edges("agent", tools_condition)
99
- builder.add_edge("tools", "agent")
100
-
101
- return builder.compile()
102
-
103
- class LangGraphAgent:
104
  def __init__(self):
105
- self.graph = build_graph()
106
- print("LangGraphAgent initialized with tools.")
107
-
108
  def __call__(self, question: str, task_id: str = None) -> dict:
109
- """Run the agent on a question and return structured answer with reasoning trace"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
110
  try:
111
- messages = [HumanMessage(content=question)]
112
- result = self.graph.invoke({"messages": messages})
113
-
114
- # Print all messages for debugging
115
- for m in result["messages"]:
116
- m.pretty_print()
117
-
118
- # Extract the final answer and build reasoning trace
119
- final_answer = result["messages"][-1].content
120
-
121
- # Build reasoning trace from all messages
122
- reasoning_steps = []
123
- for i, msg in enumerate(result["messages"]):
124
- if isinstance(msg, SystemMessage):
125
- reasoning_steps.append(f"Step {i+1}: System prompt loaded for ReAct methodology")
126
- elif isinstance(msg, HumanMessage):
127
- reasoning_steps.append(f"Step {i+1}: Received question: {msg.content}")
128
- elif isinstance(msg, AIMessage):
129
- if msg.tool_calls:
130
- for tool_call in msg.tool_calls:
131
- reasoning_steps.append(f"Step {i+1}: Called tool '{tool_call['name']}' with args: {tool_call['args']}")
132
- else:
133
- reasoning_steps.append(f"Step {i+1}: AI reasoning: {msg.content}")
134
- else:
135
- reasoning_steps.append(f"Step {i+1}: Tool response: {str(msg.content)[:200]}...")
136
 
137
- reasoning_trace = " | ".join(reasoning_steps)
138
-
139
- # Extract the final answer from the AI response
140
- model_answer = final_answer
141
- if "FINAL ANSWER:" in final_answer:
142
- model_answer = final_answer.split("FINAL ANSWER:")[-1].strip()
143
-
144
- return {
145
- "task_id": task_id or "unknown",
146
- "model_answer": model_answer,
147
- "reasoning_trace": reasoning_trace
148
- }
149
 
 
 
 
 
 
 
 
150
  except Exception as e:
151
- return {
152
- "task_id": task_id or "unknown",
153
- "model_answer": f"Error: {str(e)}",
154
- "reasoning_trace": f"Error occurred during processing: {str(e)}"
155
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
156
 
157
  if __name__ == "__main__":
158
- agent = LangGraphAgent()
159
- question = ".rewsna eht sa \"tfel\" drow eht fo etisoppo eht etirw ,ecnetnes siht dnatsrednu uoy fI"
160
- answer = agent(question)
161
-
162
-
163
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
+ import gradio as gr
3
+ import requests
4
+ import inspect
5
+ import pandas as pd
6
+ from agent import LangGraphAgent
 
 
 
 
 
 
 
 
7
  import json
8
 
9
+ # (Keep Constants as is)
10
+ # --- Constants ---
11
+ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
12
 
13
+ # --- Basic Agent Definition ---
14
+ # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
15
+ class BasicAgent:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  def __init__(self):
17
+ print("BasicAgent initialized.")
 
 
18
  def __call__(self, question: str, task_id: str = None) -> dict:
19
+ print(f"Agent received question (first 50 chars): {question[:50]}...")
20
+ fixed_answer = "This is a default answer."
21
+ print(f"Agent returning fixed answer: {fixed_answer}")
22
+ return {
23
+ "task_id": task_id or "unknown",
24
+ "model_answer": fixed_answer,
25
+ "reasoning_trace": "Basic agent with fixed response - no actual reasoning performed"
26
+ }
27
+
28
+ def run_and_submit_all( profile: gr.OAuthProfile | None):
29
+ """
30
+ Fetches all questions, runs the BasicAgent on them, submits all answers,
31
+ and displays the results.
32
+ """
33
+ # --- Determine HF Space Runtime URL and Repo URL ---
34
+ space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
35
+
36
+ if profile:
37
+ username= f"{profile.username}"
38
+ print(f"User logged in: {username}")
39
+ else:
40
+ print("User not logged in.")
41
+ return "Please Login to Hugging Face with the button.", None
42
+
43
+ api_url = DEFAULT_API_URL
44
+ questions_url = f"{api_url}/questions"
45
+ submit_url = f"{api_url}/submit"
46
+
47
+ # 1. Instantiate Agent ( modify this part to create your agent)
48
+ try:
49
+ agent = LangGraphAgent() # Use LangGraphAgent instead of BasicAgent
50
+ except Exception as e:
51
+ print(f"Error instantiating agent: {e}")
52
+ return f"Error initializing agent: {e}", None
53
+ # In the case of an app running as a hugging Face space, this link points toward your codebase ( usefull for others so please keep it public)
54
+ agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
55
+ print(agent_code)
56
+
57
+ # 2. Fetch Questions
58
+ print(f"Fetching questions from: {questions_url}")
59
+ try:
60
+ response = requests.get(questions_url, timeout=15)
61
+ response.raise_for_status()
62
+ questions_data = response.json()
63
+ if not questions_data:
64
+ print("Fetched questions list is empty.")
65
+ return "Fetched questions list is empty or invalid format.", None
66
+ print(f"Fetched {len(questions_data)} questions.")
67
+ except requests.exceptions.RequestException as e:
68
+ print(f"Error fetching questions: {e}")
69
+ return f"Error fetching questions: {e}", None
70
+ except requests.exceptions.JSONDecodeError as e:
71
+ print(f"Error decoding JSON response from questions endpoint: {e}")
72
+ print(f"Response text: {response.text[:500]}")
73
+ return f"Error decoding server response for questions: {e}", None
74
+ except Exception as e:
75
+ print(f"An unexpected error occurred fetching questions: {e}")
76
+ return f"An unexpected error occurred fetching questions: {e}", None
77
+
78
+ # 3. Run your Agent
79
+ results_log = []
80
+ answers_payload = []
81
+ print(f"Running agent on {len(questions_data)} questions...")
82
+ for item in questions_data:
83
+ task_id = item.get("task_id")
84
+ question_text = item.get("question")
85
+ if not task_id or question_text is None:
86
+ print(f"Skipping item with missing task_id or question: {item}")
87
+ continue
88
  try:
89
+ agent_response = agent(question_text, task_id)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90
 
91
+ # Format the submission answer to include both model_answer and reasoning_trace
92
+ submission_answer = json.dumps({
93
+ "model_answer": agent_response["model_answer"],
94
+ "reasoning_trace": agent_response["reasoning_trace"]
95
+ })
 
 
 
 
 
 
 
96
 
97
+ answers_payload.append({"task_id": task_id, "submitted_answer": submission_answer})
98
+ results_log.append({
99
+ "Task ID": task_id,
100
+ "Question": question_text,
101
+ "Model Answer": agent_response["model_answer"],
102
+ "Reasoning Trace": agent_response["reasoning_trace"][:200] + "..." if len(agent_response["reasoning_trace"]) > 200 else agent_response["reasoning_trace"]
103
+ })
104
  except Exception as e:
105
+ print(f"Error running agent on task {task_id}: {e}")
106
+ error_response = {
107
+ "model_answer": f"AGENT ERROR: {e}",
108
+ "reasoning_trace": f"Error occurred during processing: {e}"
109
+ }
110
+ submission_answer = json.dumps(error_response)
111
+ answers_payload.append({"task_id": task_id, "submitted_answer": submission_answer})
112
+ results_log.append({
113
+ "Task ID": task_id,
114
+ "Question": question_text,
115
+ "Model Answer": error_response["model_answer"],
116
+ "Reasoning Trace": error_response["reasoning_trace"]
117
+ })
118
+
119
+ if not answers_payload:
120
+ print("Agent did not produce any answers to submit.")
121
+ return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
122
+
123
+ # 4. Prepare Submission
124
+ submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
125
+ status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
126
+ print(status_update)
127
+
128
+ # 5. Submit
129
+ print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
130
+ try:
131
+ response = requests.post(submit_url, json=submission_data, timeout=60)
132
+ response.raise_for_status()
133
+ result_data = response.json()
134
+ final_status = (
135
+ f"Submission Successful!\n"
136
+ f"User: {result_data.get('username')}\n"
137
+ f"Overall Score: {result_data.get('score', 'N/A')}% "
138
+ f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
139
+ f"Message: {result_data.get('message', 'No message received.')}"
140
+ )
141
+ print("Submission successful.")
142
+ results_df = pd.DataFrame(results_log)
143
+ return final_status, results_df
144
+ except requests.exceptions.HTTPError as e:
145
+ error_detail = f"Server responded with status {e.response.status_code}."
146
+ try:
147
+ error_json = e.response.json()
148
+ error_detail += f" Detail: {error_json.get('detail', e.response.text)}"
149
+ except requests.exceptions.JSONDecodeError:
150
+ error_detail += f" Response: {e.response.text[:500]}"
151
+ status_message = f"Submission Failed: {error_detail}"
152
+ print(status_message)
153
+ results_df = pd.DataFrame(results_log)
154
+ return status_message, results_df
155
+ except requests.exceptions.Timeout:
156
+ status_message = "Submission Failed: The request timed out."
157
+ print(status_message)
158
+ results_df = pd.DataFrame(results_log)
159
+ return status_message, results_df
160
+ except requests.exceptions.RequestException as e:
161
+ status_message = f"Submission Failed: Network error - {e}"
162
+ print(status_message)
163
+ results_df = pd.DataFrame(results_log)
164
+ return status_message, results_df
165
+ except Exception as e:
166
+ status_message = f"An unexpected error occurred during submission: {e}"
167
+ print(status_message)
168
+ results_df = pd.DataFrame(results_log)
169
+ return status_message, results_df
170
+
171
+
172
+ # --- Build Gradio Interface using Blocks ---
173
+ with gr.Blocks() as demo:
174
+ gr.Markdown("# Basic Agent Evaluation Runner")
175
+ gr.Markdown(
176
+ """
177
+ **Instructions:**
178
+
179
+ 1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ...
180
+ 2. Log in to your Hugging Face account using the button below. This uses your HF username for submission.
181
+ 3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
182
+
183
+ ---
184
+ **Disclaimers:**
185
+ Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions).
186
+ This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a seperate action or even to answer the questions in async.
187
+ """
188
+ )
189
+
190
+ gr.LoginButton()
191
+
192
+ run_button = gr.Button("Run Evaluation & Submit All Answers")
193
+
194
+ status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
195
+ # Removed max_rows=10 from DataFrame constructor
196
+ results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
197
+
198
+ run_button.click(
199
+ fn=run_and_submit_all,
200
+ outputs=[status_output, results_table]
201
+ )
202
 
203
  if __name__ == "__main__":
204
+ print("\n" + "-"*30 + " App Starting " + "-"*30)
205
+ # Check for SPACE_HOST and SPACE_ID at startup for information
206
+ space_host_startup = os.getenv("SPACE_HOST")
207
+ space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup
208
+
209
+ if space_host_startup:
210
+ print(f"✅ SPACE_HOST found: {space_host_startup}")
211
+ print(f" Runtime URL should be: https://{space_host_startup}.hf.space")
212
+ else:
213
+ print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
214
+
215
+ if space_id_startup: # Print repo URLs if SPACE_ID is found
216
+ print(f"✅ SPACE_ID found: {space_id_startup}")
217
+ print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
218
+ print(f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main")
219
+ else:
220
+ print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.")
221
+
222
+ print("-"*(60 + len(" App Starting ")) + "\n")
223
+
224
+ print("Launching Gradio Interface for Basic Agent Evaluation...")
225
+ demo.launch(debug=True, share=False)