Spaces:
Sleeping
Sleeping
File size: 14,666 Bytes
10e9b7d eccf8e4 7d65c66 3c4371f 36e21ee ff8b7ff 22f50a8 039d1fb 83c578c 15b3ec6 892984a 10e9b7d d59f015 e80aab9 3db6293 e80aab9 31243f4 d59f015 d915c2c f16d063 d915c2c f16d063 ff8b7ff 9815d83 ff8b7ff 9815d83 ff8b7ff 9815d83 83c578c 9815d83 83c578c 9815d83 f0bdb43 ff8b7ff 5afc48a ff8b7ff 83c578c ff8b7ff 83c578c ff8b7ff 9815d83 ff8b7ff 9815d83 ff8b7ff 7af0359 9815d83 ff8b7ff 9815d83 2eba3d0 ff8b7ff 31243f4 87b0544 31243f4 9815d83 6219a6a 9815d83 6219a6a 83c578c 6219a6a 4021bf3 b90251f 31243f4 892984a 3c4371f 7e4a06b 31243f4 e80aab9 b177367 31243f4 3c4371f 31243f4 892984a 83c578c 3c4371f 7d65c66 31243f4 eccf8e4 31243f4 7d65c66 31243f4 3c4371f 31243f4 e80aab9 31243f4 3c4371f 7d65c66 3c4371f 7d65c66 31243f4 e80aab9 b177367 7d65c66 3c4371f 83c578c 31243f4 9815d83 83c578c 31243f4 83c578c 892984a 83c578c 31243f4 9815d83 7d65c66 f0bdb43 31243f4 7d65c66 31243f4 3c4371f 31243f4 b177367 7d65c66 3c4371f 31243f4 e80aab9 7d65c66 31243f4 e80aab9 7d65c66 e80aab9 31243f4 e80aab9 3c4371f e80aab9 31243f4 e80aab9 3c4371f e80aab9 3c4371f e80aab9 7d65c66 3c4371f 31243f4 7d65c66 31243f4 3c4371f e80aab9 31243f4 7d65c66 31243f4 e80aab9 83c578c e80aab9 83c578c 892984a 83c578c e80aab9 3c4371f 7d65c66 3c4371f 7d65c66 3c4371f 7d65c66 3c4371f 7d65c66 3c4371f 31243f4 3c4371f | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 | import os
import gradio as gr
import requests
import inspect
import pandas as pd
import time
import sys
from io import StringIO
from typing import TypedDict, Annotated
from langchain_core.tools import tool
from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace
from langchain_community.tools import DuckDuckGoSearchRun
from langgraph.graph import START, StateGraph
from langgraph.graph.message import add_messages
from langgraph.prebuilt import tools_condition, ToolNode
from langchain_core.messages import AnyMessage, HumanMessage, SystemMessage, ToolMessage
from langchain_groq import ChatGroq
# Detect if running locally or on Hugging Face Spaces
IS_LOCAL = os.getenv("SPACE_ID") is None
if IS_LOCAL:
from langchain_ollama import ChatOllama
else:
try:
from langchain_ollama import ChatOllama
except ImportError:
from langchain_community.chat_models import ChatOllama
# (Keep Constants as is)
# --- Constants ---
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
# --- Basic Agent Definition ---
# ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
if IS_LOCAL:
llm = ChatOllama(
model="gemma4:31b",
temperature=0,
)
else:
# When on Hugging Face, use a remote endpoint wrapped in ChatHuggingFace for tool support
llm_base = HuggingFaceEndpoint(
repo_id = "Qwen/Qwen2.5-Coder-32B-Instruct",
huggingfacehub_api_token=os.environ.get("HUGGINGFACEHUB_API_TOKEN")
)
llm = ChatHuggingFace(llm=llm_base)
system_prompt = """You are an AI assistant taking the GAIA benchmark. You must output ONLY the final answer. Do not include any explanations, conversational text or formatting. If the answer is a number, output just the number. If it is a list, output a comma-separated list."""
# Tool Definition
search_tool = DuckDuckGoSearchRun()
@tool
def python_repl(code:str) -> str:
"""
Executes Python code and returns the standard output.
Use this to read files (like CSVs or Excel), process data with pandas or do math.
CRITICAL: You MUST use print() to output the final result so it can be captured.
"""
old_stdout = sys.stdout
redirected_output = sys.stdout = StringIO()
try:
print(f"\n--- [Executing Python Code] ---\n{code}\n------------------------------")
# Execute the code in the global namespace
exec(code, globals())
sys.stdout = old_stdout
output = redirected_output.getvalue()
print(f"--- [Code Output] ---\n{output}\n--------------------")
return output if output else "Code executed successfully, but printed nothing. Use print() to see output."
except Exception as e:
sys.stdout = old_stdout
return f"Error executing code: {e}"
# Tools Instantiation
tools = [search_tool, python_repl]
chat_with_tools = llm.bind_tools(tools)
class AgentState(TypedDict):
messages:Annotated[list[AnyMessage], add_messages]
def assistant(state:AgentState):
# Log the last tool result if it exists
last_message = state["messages"][-1]
if isinstance(last_message, ToolMessage):
print(f"--- [Tool Result Received] ---\n{last_message.content}\n-----------------------------")
print("\n--- [Assistant is thinking] ---")
response = chat_with_tools.invoke(state["messages"])
# Log content if it's not empty
if response.content:
print(f"Assistant Response: {response.content}")
# Log tool calls
if hasattr(response, 'tool_calls') and response.tool_calls:
for tc in response.tool_calls:
print(f"Tool Call: {tc['name']} with args: {tc['args']}")
return {
"messages": [response]
}
# Graph Instantiation
builder = StateGraph(AgentState)
# Graph Nodes
builder.add_node("assistant", assistant)
builder.add_node("tools", ToolNode(tools))
#Graph Edges
builder.add_edge(START, "assistant")
builder.add_conditional_edges(
"assistant",
tools_condition
)
builder.add_edge("tools", "assistant")
# Agent Compile
my_agent = builder.compile()
class BasicAgent:
def __init__(self):
print("BasicAgent initialized.")
def __call__(self, question: str, file_name: str = None) -> str:
print(f"Agent received question (first 50 chars): {question[:50]}...")
# 1. Construct the prompt with the file path if it exists
if file_name:
# Note: You may need to adjust the path depending on where your space saves downloaded files.
# Usually, the grading space provides the file name, and it sits in the same directory.
prompt = f"Question: {question}\nAttached File: {file_name}\n\nUse the python_repl tool to read and analyze this file using pandas."
else:
prompt = f"Question: {question}"
# Inject the strict GAIA System Prompt and the Human Prompt
messages = [
SystemMessage(content=system_prompt),
HumanMessage(content=question)
]
# Invoke Agent
response_state = my_agent.invoke({"messages": messages})
# Final answer
final_answer = response_state["messages"][-1].content
print(f"--- [Final Answer for this Question] ---\n{final_answer}\n")
return final_answer
def run_and_submit_all( profile: gr.OAuthProfile | None):
"""
Fetches all questions, runs the BasicAgent on them, submits all answers,
and displays the results.
"""
if IS_LOCAL:
username = "local_user"
print(f"Running in LOCAL MODE. Using default username: {username}")
else:
if profile:
username = f"{profile.username}"
print(f"User logged in: {username}")
else:
print("User not logged in.")
return "Please Login to Hugging Face with the button.", None
api_url = DEFAULT_API_URL
questions_url = f"{api_url}/questions"
submit_url = f"{api_url}/submit"
# 1. Instantiate Agent ( modify this part to create your agent)
try:
agent = BasicAgent()
except Exception as e:
print(f"Error instantiating agent: {e}")
return f"Error initializing agent: {e}", None
if IS_LOCAL:
space_id = "local-test-space"
agent_code = "local-execution"
else:
space_id = os.getenv("SPACE_ID")
agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
print(f"Agent code link: {agent_code}")
# 2. Fetch Questions
print(f"Fetching questions from: {questions_url}")
try:
response = requests.get(questions_url, timeout=15)
response.raise_for_status()
questions_data = response.json()
if not questions_data:
print("Fetched questions list is empty.")
return "Fetched questions list is empty or invalid format.", None
print(f"Fetched {len(questions_data)} questions.")
except requests.exceptions.RequestException as e:
print(f"Error fetching questions: {e}")
return f"Error fetching questions: {e}", None
except requests.exceptions.JSONDecodeError as e:
print(f"Error decoding JSON response from questions endpoint: {e}")
print(f"Response text: {response.text[:500]}")
return f"Error decoding server response for questions: {e}", None
except Exception as e:
print(f"An unexpected error occurred fetching questions: {e}")
return f"An unexpected error occurred fetching questions: {e}", None
# 3. Run your Agent
results_log = []
answers_payload = []
print(f"Running agent on {len(questions_data)} questions...")
for item in questions_data:
task_id = item.get("task_id")
question_text = item.get("question")
file_name = item.get("file_name")
if not task_id or question_text is None:
print(f"Skipping item with missing task_id or question: {item}")
continue
# Skip questions with files if running locally
if IS_LOCAL and file_name:
print(f"Skipping task {task_id} because it requires a file: {file_name}")
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": "SKIPPED (Local run - No files)"})
continue
try:
submitted_answer = agent(question_text, file_name)
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
# Add a 15-second pause between questions!
print("Pausing for 15 seconds to respect Groq rate limits...")
time.sleep(15)
except Exception as e:
print(f"Error running agent on task {task_id}: {e}")
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
if not answers_payload:
print("Agent did not produce any answers to submit.")
return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
# 4. Prepare Submission
submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
print(status_update)
# 5. Submit
print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
try:
response = requests.post(submit_url, json=submission_data, timeout=60)
response.raise_for_status()
result_data = response.json()
final_status = (
f"Submission Successful!\n"
f"User: {result_data.get('username')}\n"
f"Overall Score: {result_data.get('score', 'N/A')}% "
f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
f"Message: {result_data.get('message', 'No message received.')}"
)
print("Submission successful.")
results_df = pd.DataFrame(results_log)
return final_status, results_df
except requests.exceptions.HTTPError as e:
error_detail = f"Server responded with status {e.response.status_code}."
try:
error_json = e.response.json()
error_detail += f" Detail: {error_json.get('detail', e.response.text)}"
except requests.exceptions.JSONDecodeError:
error_detail += f" Response: {e.response.text[:500]}"
status_message = f"Submission Failed: {error_detail}"
print(status_message)
results_df = pd.DataFrame(results_log)
return status_message, results_df
except requests.exceptions.Timeout:
status_message = "Submission Failed: The request timed out."
print(status_message)
results_df = pd.DataFrame(results_log)
return status_message, results_df
except requests.exceptions.RequestException as e:
status_message = f"Submission Failed: Network error - {e}"
print(status_message)
results_df = pd.DataFrame(results_log)
return status_message, results_df
except Exception as e:
status_message = f"An unexpected error occurred during submission: {e}"
print(status_message)
results_df = pd.DataFrame(results_log)
return status_message, results_df
def run_single_test(question):
"""Runs the agent on a single question and returns the answer."""
try:
agent = BasicAgent()
answer = agent(question)
return answer
except Exception as e:
return f"Error running agent: {e}"
# --- Build Gradio Interface using Blocks ---
with gr.Blocks() as demo:
gr.Markdown("# GAIA Agent Runner & Evaluator")
with gr.Tabs():
with gr.TabItem("Single Question Test"):
gr.Markdown("### Test your agent on a single question")
test_input = gr.Textbox(
label="Question",
placeholder="Enter a GAIA-style question here...",
lines=3,
value="How many studio albums were published by Mercedes Sosa throughout her career?"
)
test_button = gr.Button("Run Agent Test")
test_output = gr.Textbox(label="Agent Response", interactive=False, lines=5)
test_button.click(
fn=run_single_test,
inputs=[test_input],
outputs=[test_output]
)
with gr.TabItem("Full Evaluation & Submission"):
gr.Markdown(
"""
**Instructions:**
1. Log in to your Hugging Face account below (required for submission).
2. Click 'Run Evaluation' to fetch ALL questions from the benchmark, run your agent on them, and submit.
*Note: This will skip questions requiring files in local mode.*
"""
)
if not IS_LOCAL:
gr.LoginButton()
run_button = gr.Button("Run Full Evaluation & Submit All Answers", variant="primary")
status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
run_button.click(
fn=run_and_submit_all,
outputs=[status_output, results_table]
)
if __name__ == "__main__":
print("\n" + "-"*30 + " App Starting " + "-"*30)
# Check for SPACE_HOST and SPACE_ID at startup for information
space_host_startup = os.getenv("SPACE_HOST")
space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup
if space_host_startup:
print(f"✅ SPACE_HOST found: {space_host_startup}")
print(f" Runtime URL should be: https://{space_host_startup}.hf.space")
else:
print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
if space_id_startup: # Print repo URLs if SPACE_ID is found
print(f"✅ SPACE_ID found: {space_id_startup}")
print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
print(f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main")
else:
print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.")
print("-"*(60 + len(" App Starting ")) + "\n")
print("Launching Gradio Interface for Basic Agent Evaluation...")
demo.launch(debug=True, share=False) |