File size: 6,560 Bytes
05a84b0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 |
import os
import gradio as gr
import requests
import pandas as pd
from typing import Dict, List
import asyncio
# custom imports
from agents import Agent
from tool import get_tools
from model import get_model
# --- Constants ---
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
MODEL_ID = "groq/llama-3.3-70b-versatile" # Groq's fastest model
RATE_LIMIT_DELAY = 1 # Groq has generous rate limits
# --- Async Question Processing ---
async def process_question(agent, question: str, task_id: str) -> Dict:
"""Process a single question and return both answer AND full log entry"""
try:
answer = agent(question)
return {
"submission": {"task_id": task_id, "submitted_answer": answer},
"log": {"Task ID": task_id, "Question": question, "Submitted Answer": answer}
}
except Exception as e:
error_msg = f"ERROR: {str(e)}"
return {
"submission": {"task_id": task_id, "submitted_answer": error_msg},
"log": {"Task ID": task_id, "Question": question, "Submitted Answer": error_msg}
}
async def run_questions_async(agent, questions_data: List[Dict]) -> tuple:
"""Process questions sequentially with minimal rate limiting"""
submissions = []
logs = []
total = len(questions_data)
for idx, q in enumerate(questions_data):
print(f"Processing {idx+1}/{total}: {q['question'][:80]}...")
# Add small delay between requests
if idx > 0:
await asyncio.sleep(RATE_LIMIT_DELAY)
result = await process_question(agent, q["question"], q["task_id"])
submissions.append(result["submission"])
logs.append(result["log"])
return submissions, logs
async def run_and_submit_all(profile: gr.OAuthProfile | None):
"""
Fetches all questions, runs the Agent on them, submits all answers,
and displays the results.
"""
space_id = os.getenv("SPACE_ID")
if profile:
username = f"{profile.username}"
print(f"User logged in: {username}")
else:
print("User not logged in.")
return "Please Login to Hugging Face with the button.", None
api_url = DEFAULT_API_URL
questions_url = f"{api_url}/questions"
submit_url = f"{api_url}/submit"
# 1. Instantiate Agent
try:
agent = Agent(
model=get_model("LiteLLMModel", MODEL_ID),
tools=get_tools()
)
except Exception as e:
print(f"Error instantiating agent: {e}")
return f"Error initializing agent: {e}", None
agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
print(f"Agent code: {agent_code}")
# 2. Fetch Questions
print(f"Fetching questions from: {questions_url}")
try:
response = requests.get(questions_url, timeout=15)
response.raise_for_status()
questions_data = response.json()
if not questions_data:
print("Fetched questions list is empty.")
return "Fetched questions list is empty or invalid format.", None
print(f"Fetched {len(questions_data)} questions.")
estimated_time = len(questions_data) * RATE_LIMIT_DELAY / 60
print(f"β±οΈ Estimated time: {estimated_time:.1f} minutes")
except Exception as e:
print(f"Error fetching questions: {e}")
return f"Error fetching questions: {e}", None
# 3. Run Agent
print(f"Running agent on {len(questions_data)} questions...")
answers_payload, results_log = await run_questions_async(agent, questions_data)
if not answers_payload:
print("Agent did not produce any answers to submit.")
return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
# 4. Prepare Submission
submission_data = {
"username": username.strip(),
"agent_code": agent_code,
"answers": answers_payload
}
print(f"Submitting {len(answers_payload)} answers for user '{username}'...")
# 5. Submit
try:
response = requests.post(submit_url, json=submission_data, timeout=60)
response.raise_for_status()
result_data = response.json()
final_status = (
f"β
Submission Successful!\n\n"
f"User: {result_data.get('username')}\n"
f"Overall Score: {result_data.get('score', 'N/A')}% "
f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n\n"
f"Message: {result_data.get('message', 'No message received.')}\n\n"
f"Leaderboard: {api_url}/leaderboard"
)
print("Submission successful.")
results_df = pd.DataFrame(results_log)
return final_status, results_df
except Exception as e:
status_message = f"β Submission Failed: {e}"
print(status_message)
results_df = pd.DataFrame(results_log)
return status_message, results_df
# --- Build Gradio Interface ---
with gr.Blocks(theme=gr.themes.Soft()) as demo:
gr.Markdown("# π€ GAIA Agent Evaluation")
gr.Markdown(
f"""
**Instructions:**
1. Log in to your Hugging Face account using the button below
2. Click 'Run Evaluation & Submit' to test your agent
3. The agent will use web search and other tools to answer questions
**Current Setup:**
- Model: Llama 3.3 70B (via Groq)
- Tools: Web search, Wikipedia, calculation, and more
- Rate Limiting: {RATE_LIMIT_DELAY}s between requests
β οΈ **Note:** Make sure you have set your GROQ_API_KEY in the Space secrets.
"""
)
gr.LoginButton()
run_button = gr.Button("π Run Evaluation & Submit", variant="primary")
status_output = gr.Textbox(label="π Status / Results", lines=8, interactive=False)
results_table = gr.DataFrame(label="π Questions and Answers", wrap=True, max_height=400)
run_button.click(
fn=run_and_submit_all,
outputs=[status_output, results_table]
)
if __name__ == "__main__":
print("\n" + "="*70)
print("π€ GAIA Agent Starting")
print("="*70)
print(f"π Using Model: {MODEL_ID}")
space_host = os.getenv("SPACE_HOST")
space_id = os.getenv("SPACE_ID")
if space_host:
print(f"β
Runtime URL: https://{space_host}.hf.space")
if space_id:
print(f"β
Repo URL: https://huggingface.co/spaces/{space_id}")
print("="*70 + "\n")
demo.launch(debug=True, share=False) |