Files changed (7) hide show
  1. README.md +49 -9
  2. agent.py +151 -0
  3. answer_normalize.py +44 -0
  4. app.py +189 -96
  5. inference_client_factory.py +31 -0
  6. requirements.txt +11 -2
  7. run_local_eval.py +111 -0
README.md CHANGED
@@ -1,15 +1,55 @@
1
  ---
2
- title: Template Final Assignment
3
- emoji: πŸ•΅πŸ»β€β™‚οΈ
4
- colorFrom: indigo
5
- colorTo: indigo
6
  sdk: gradio
7
- sdk_version: 5.25.2
8
  app_file: app.py
9
  pinned: false
10
- hf_oauth: true
11
- # optional, default duration is 8 hours/480 minutes. Max duration is 30 days/43200 minutes.
12
- hf_oauth_expiration_minutes: 480
13
  ---
14
 
15
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ title: GAIA Unit 4 Agent
3
+ emoji: 🧭
4
+ colorFrom: gray
5
+ colorTo: blue
6
  sdk: gradio
7
+ sdk_version: 4.44.0
8
  app_file: app.py
9
  pinned: false
10
+ license: apache-2.0
 
 
11
  ---
12
 
13
+ # GAIA Unit 4 β€” Hugging Face Agents Course (final assignment)
14
+
15
+ This folder is a **drop-in replacement** for the course Space
16
+ [`agents-course/Final_Assignment_Template`](https://huggingface.co/spaces/agents-course/Final_Assignment_Template).
17
+
18
+ ## One-time: create your Space
19
+
20
+ 1. On Hugging Face, **Duplicate** the template Space above (or create a new Gradio Space and copy these files into the repo root).
21
+ 2. In the Space **Settings β†’ Repository secrets**, add:
22
+ - **`HF_TOKEN`**: a Hugging Face access token with **read** permission (for Inference API / serverless models).
23
+ 3. Optional **Variables** (or secrets) to tune models:
24
+ - `HF_INFERENCE_PROVIDER` β€” **omit by default** so the client uses **`auto`**: the first [inference provider](https://hf.co/settings/inference-providers) that supports your **chosen model** on the Hub. Do **not** set `hf-inference` unless that model lists it β€” many chat models (e.g. Qwen2.5-7B-Instruct) only support **together** / **featherless-ai**, and forcing `hf-inference` yields **404**. If the auto order hits a provider that returns **401** (e.g. Novita), reorder providers in HF settings or pin e.g. `HF_INFERENCE_PROVIDER=together`.
25
+ - `GAIA_TEXT_MODEL` β€” default `Qwen/Qwen2.5-7B-Instruct` (broad provider mapping via Together).
26
+ - `GAIA_ASR_MODEL` β€” default `openai/whisper-large-v3`
27
+ - `GAIA_VISION_MODEL` β€” default `meta-llama/Llama-3.2-11B-Vision-Instruct`
28
+ - `GAIA_API_URL` β€” default `https://agents-course-unit4-scoring.hf.space`
29
+ - `GAIA_USE_CACHE` β€” `1` (default) or `0` to disable `gaia_answers_cache.json`
30
+
31
+ Keep the Space **public** so `agent_code` (`…/tree/main`) verifies for the leaderboard.
32
+
33
+ ## Local dry-run (no submission)
34
+
35
+ ```bash
36
+ cd gaia_unit4_space
37
+ python -m venv .venv && source .venv/bin/activate
38
+ pip install -r requirements.txt
39
+ export HF_TOKEN=hf_...
40
+ python run_local_eval.py
41
+ ```
42
+
43
+ This fetches `/questions`, runs the agent, prints answers, and writes `local_eval_answers.json`. It does **not** call `/submit`.
44
+
45
+ ## What was fixed vs the stock template
46
+
47
+ - Downloads attachments when `file_name` is set (`GET /files/{task_id}`).
48
+ - Tool-using agent (web, Wikipedia, Python, Excel, ASR, vision, YouTube transcripts).
49
+ - Deterministic shortcuts for the reversed-English puzzle, Cayley-table commutativity, `.py` stdout, and `.xlsx` food-sales heuristic.
50
+ - Optional **Crypto** tab (BTC/USD demo only; not used for GAIA).
51
+
52
+ ## Leaderboard
53
+
54
+ Submit scores via the Gradio app after logging in. Student leaderboard:
55
+ [`agents-course/Students_leaderboard`](https://huggingface.co/spaces/agents-course/Students_leaderboard).
agent.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """GAIA Unit 4 agent: tool-calling loop via Hugging Face Inference API."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import os
6
+ from typing import Any, Optional
7
+
8
+ from huggingface_hub import InferenceClient
9
+
10
+ from answer_normalize import normalize_answer
11
+ from inference_client_factory import inference_client_kwargs
12
+ from tools.registry import TOOL_DEFINITIONS, deterministic_attempt, dispatch_tool
13
+
14
+ SYSTEM_PROMPT = """You solve GAIA benchmark questions for the Hugging Face Agents Course.
15
+
16
+ Hard rules:
17
+ - Call tools as needed (search, Wikipedia, fetch URL, Python, audio, image, Excel).
18
+ - Your final assistant message must contain ONLY the answer text required by the question β€” no labels like "FINAL ANSWER", no markdown fences, no extra sentences.
19
+ - Match the question's format exactly (comma-separated, alphabetical order, IOC codes, algebraic notation, two-decimal USD, first name only, etc.).
20
+ - When a local attachment path is given, use the appropriate tool with that exact path.
21
+ - For English Wikipedia tasks, use wikipedia_* tools; cross-check with web_search if needed.
22
+ - For YouTube URLs in the question, try youtube_transcript first.
23
+ """
24
+
25
+
26
+ class GaiaAgent:
27
+ def __init__(
28
+ self,
29
+ *,
30
+ hf_token: Optional[str] = None,
31
+ text_model: Optional[str] = None,
32
+ max_iterations: int = 14,
33
+ ):
34
+ self.hf_token = (
35
+ hf_token
36
+ or os.environ.get("HF_TOKEN")
37
+ or os.environ.get("HUGGINGFACEHUB_API_TOKEN")
38
+ )
39
+ self.text_model = text_model or os.environ.get(
40
+ "GAIA_TEXT_MODEL", "Qwen/Qwen2.5-7B-Instruct"
41
+ )
42
+ self.max_iterations = max_iterations
43
+ self._client: Optional[InferenceClient] = None
44
+
45
+ def _get_client(self) -> InferenceClient:
46
+ if self._client is None:
47
+ if not self.hf_token:
48
+ raise RuntimeError(
49
+ "HF_TOKEN or HUGGINGFACEHUB_API_TOKEN is required for GaiaAgent."
50
+ )
51
+ kw = inference_client_kwargs(self.hf_token)
52
+ self._client = InferenceClient(**kw)
53
+ return self._client
54
+
55
+ def __call__(
56
+ self,
57
+ question: str,
58
+ attachment_path: Optional[str] = None,
59
+ task_id: Optional[str] = None,
60
+ ) -> str:
61
+ det = deterministic_attempt(question, attachment_path)
62
+ if det is not None:
63
+ return normalize_answer(det)
64
+
65
+ if not self.hf_token:
66
+ return normalize_answer(
67
+ "Error: missing HF_TOKEN; cannot run LLM tools for this question."
68
+ )
69
+
70
+ user_text = _build_user_payload(question, attachment_path, task_id)
71
+ messages: list[dict[str, Any]] = [
72
+ {"role": "system", "content": SYSTEM_PROMPT},
73
+ {"role": "user", "content": user_text},
74
+ ]
75
+
76
+ client = self._get_client()
77
+ last_text = ""
78
+
79
+ for _ in range(self.max_iterations):
80
+ try:
81
+ completion = client.chat_completion(
82
+ messages=messages,
83
+ model=self.text_model,
84
+ tools=TOOL_DEFINITIONS,
85
+ tool_choice="auto",
86
+ max_tokens=1024,
87
+ temperature=0.15,
88
+ )
89
+ except Exception as e:
90
+ last_text = f"Inference error: {e}"
91
+ break
92
+
93
+ choice = completion.choices[0]
94
+ msg = choice.message
95
+ last_text = (msg.content or "").strip()
96
+
97
+ if msg.tool_calls:
98
+ messages.append(
99
+ {
100
+ "role": "assistant",
101
+ "content": msg.content if msg.content else None,
102
+ "tool_calls": [
103
+ {
104
+ "id": tc.id,
105
+ "type": "function",
106
+ "function": {
107
+ "name": tc.function.name,
108
+ "arguments": tc.function.arguments,
109
+ },
110
+ }
111
+ for tc in msg.tool_calls
112
+ ],
113
+ }
114
+ )
115
+ for tc in msg.tool_calls:
116
+ name = tc.function.name
117
+ args = tc.function.arguments or "{}"
118
+ result = dispatch_tool(name, args, hf_token=self.hf_token)
119
+ messages.append(
120
+ {
121
+ "role": "tool",
122
+ "tool_call_id": tc.id,
123
+ "content": result[:24_000],
124
+ }
125
+ )
126
+ continue
127
+
128
+ if last_text:
129
+ break
130
+
131
+ if choice.finish_reason == "length":
132
+ last_text = "Error: model hit max length without an answer."
133
+ break
134
+
135
+ return normalize_answer(last_text or "Error: empty response.")
136
+
137
+
138
+ def _build_user_payload(
139
+ question: str,
140
+ attachment_path: Optional[str],
141
+ task_id: Optional[str],
142
+ ) -> str:
143
+ parts = []
144
+ if task_id:
145
+ parts.append(f"task_id: {task_id}")
146
+ parts.append(f"Question:\n{question.strip()}")
147
+ if attachment_path:
148
+ parts.append(f"\nAttachment path (use with tools): {attachment_path}")
149
+ else:
150
+ parts.append("\nNo attachment.")
151
+ return "\n".join(parts)
answer_normalize.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Post-process model output for GAIA exact-match submission."""
2
+
3
+ import re
4
+ from typing import Any, Union
5
+
6
+
7
+ _FINAL_ANSWER_RE = re.compile(
8
+ r"^\s*(?:FINAL\s*ANSWER\s*[::]?\s*)",
9
+ re.IGNORECASE,
10
+ )
11
+
12
+
13
+ def normalize_answer(raw: Union[str, int, float, None]) -> Union[str, int, float]:
14
+ """
15
+ Strip wrappers and forbidden prefixes. Prefer returning a string for API compatibility.
16
+ """
17
+ if raw is None:
18
+ return ""
19
+ if isinstance(raw, (int, float)) and not isinstance(raw, bool):
20
+ return raw
21
+ text = str(raw).strip()
22
+ if not text:
23
+ return ""
24
+ text = _FINAL_ANSWER_RE.sub("", text, count=1).strip()
25
+ # Strip common wrappers (single line)
26
+ for prefix in ("The answer is", "Answer:", "ANSWER:", "```", "`"):
27
+ if text.lower().startswith(prefix.lower()):
28
+ text = text[len(prefix) :].strip()
29
+ if text.startswith('"') and text.endswith('"') and len(text) >= 2:
30
+ text = text[1:-1].strip()
31
+ if text.startswith("```"):
32
+ text = re.sub(r"^```\w*\s*", "", text)
33
+ text = re.sub(r"\s*```$", "", text).strip()
34
+ return text.strip()
35
+
36
+
37
+ def maybe_numeric(text: str) -> Union[str, int, float]:
38
+ """If the prompt expects a plain number, allow int/float submission."""
39
+ t = text.strip()
40
+ if re.fullmatch(r"-?\d+", t):
41
+ return int(t)
42
+ if re.fullmatch(r"-?\d+\.\d+", t):
43
+ return float(t)
44
+ return text
app.py CHANGED
@@ -1,105 +1,181 @@
 
1
  import os
 
 
 
2
  import gradio as gr
3
- import requests
4
- import inspect
5
  import pandas as pd
 
 
 
 
6
 
7
- # (Keep Constants as is)
8
- # --- Constants ---
9
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
- # --- Basic Agent Definition ---
12
- # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
13
- class BasicAgent:
14
- def __init__(self):
15
- print("BasicAgent initialized.")
16
- def __call__(self, question: str) -> str:
17
- print(f"Agent received question (first 50 chars): {question[:50]}...")
18
- fixed_answer = "This is a default answer."
19
- print(f"Agent returning fixed answer: {fixed_answer}")
20
- return fixed_answer
21
-
22
- def run_and_submit_all( profile: gr.OAuthProfile | None):
23
- """
24
- Fetches all questions, runs the BasicAgent on them, submits all answers,
25
- and displays the results.
26
- """
27
- # --- Determine HF Space Runtime URL and Repo URL ---
28
- space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
 
30
  if profile:
31
- username= f"{profile.username}"
32
  print(f"User logged in: {username}")
33
  else:
34
  print("User not logged in.")
35
  return "Please Login to Hugging Face with the button.", None
36
 
37
- api_url = DEFAULT_API_URL
38
  questions_url = f"{api_url}/questions"
39
  submit_url = f"{api_url}/submit"
40
 
41
- # 1. Instantiate Agent ( modify this part to create your agent)
42
  try:
43
- agent = BasicAgent()
44
  except Exception as e:
45
  print(f"Error instantiating agent: {e}")
46
  return f"Error initializing agent: {e}", None
47
- # In the case of an app running as a hugging Face space, this link points toward your codebase ( usefull for others so please keep it public)
48
  agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
49
  print(agent_code)
50
 
51
- # 2. Fetch Questions
52
  print(f"Fetching questions from: {questions_url}")
53
  try:
54
- response = requests.get(questions_url, timeout=15)
55
  response.raise_for_status()
56
  questions_data = response.json()
57
  if not questions_data:
58
- print("Fetched questions list is empty.")
59
- return "Fetched questions list is empty or invalid format.", None
60
  print(f"Fetched {len(questions_data)} questions.")
61
  except requests.exceptions.RequestException as e:
62
- print(f"Error fetching questions: {e}")
63
  return f"Error fetching questions: {e}", None
64
- except requests.exceptions.JSONDecodeError as e:
65
- print(f"Error decoding JSON response from questions endpoint: {e}")
66
- print(f"Response text: {response.text[:500]}")
67
- return f"Error decoding server response for questions: {e}", None
68
- except Exception as e:
69
- print(f"An unexpected error occurred fetching questions: {e}")
70
- return f"An unexpected error occurred fetching questions: {e}", None
71
 
72
- # 3. Run your Agent
73
  results_log = []
74
  answers_payload = []
 
75
  print(f"Running agent on {len(questions_data)} questions...")
76
  for item in questions_data:
77
  task_id = item.get("task_id")
78
  question_text = item.get("question")
 
 
79
  if not task_id or question_text is None:
80
  print(f"Skipping item with missing task_id or question: {item}")
81
  continue
82
- try:
83
- submitted_answer = agent(question_text)
84
- answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
85
- results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
86
- except Exception as e:
87
- print(f"Error running agent on task {task_id}: {e}")
88
- results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89
 
90
  if not answers_payload:
91
- print("Agent did not produce any answers to submit.")
92
  return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
93
 
94
- # 4. Prepare Submission
95
- submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
96
- status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
 
 
 
 
 
97
  print(status_update)
98
 
99
- # 5. Submit
100
  print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
101
  try:
102
- response = requests.post(submit_url, json=submission_data, timeout=60)
103
  response.raise_for_status()
104
  result_data = response.json()
105
  final_status = (
@@ -117,80 +193,97 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
117
  try:
118
  error_json = e.response.json()
119
  error_detail += f" Detail: {error_json.get('detail', e.response.text)}"
120
- except requests.exceptions.JSONDecodeError:
121
  error_detail += f" Response: {e.response.text[:500]}"
122
  status_message = f"Submission Failed: {error_detail}"
123
  print(status_message)
124
- results_df = pd.DataFrame(results_log)
125
- return status_message, results_df
126
  except requests.exceptions.Timeout:
127
  status_message = "Submission Failed: The request timed out."
128
  print(status_message)
129
- results_df = pd.DataFrame(results_log)
130
- return status_message, results_df
131
  except requests.exceptions.RequestException as e:
132
  status_message = f"Submission Failed: Network error - {e}"
133
  print(status_message)
134
- results_df = pd.DataFrame(results_log)
135
- return status_message, results_df
136
  except Exception as e:
137
  status_message = f"An unexpected error occurred during submission: {e}"
138
  print(status_message)
139
- results_df = pd.DataFrame(results_log)
140
- return status_message, results_df
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
141
 
142
 
143
- # --- Build Gradio Interface using Blocks ---
144
  with gr.Blocks() as demo:
145
- gr.Markdown("# Basic Agent Evaluation Runner")
146
  gr.Markdown(
147
  """
148
- **Instructions:**
149
 
150
- 1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ...
151
- 2. Log in to your Hugging Face account using the button below. This uses your HF username for submission.
152
- 3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
 
153
 
154
- ---
155
- **Disclaimers:**
156
- Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions).
157
- This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a seperate action or even to answer the questions in async.
158
  """
159
  )
160
 
161
  gr.LoginButton()
162
 
163
- run_button = gr.Button("Run Evaluation & Submit All Answers")
164
-
165
- status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
166
- # Removed max_rows=10 from DataFrame constructor
167
- results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
 
 
 
 
 
 
 
168
 
169
- run_button.click(
170
- fn=run_and_submit_all,
171
- outputs=[status_output, results_table]
172
- )
 
 
 
173
 
174
  if __name__ == "__main__":
175
- print("\n" + "-"*30 + " App Starting " + "-"*30)
176
- # Check for SPACE_HOST and SPACE_ID at startup for information
177
  space_host_startup = os.getenv("SPACE_HOST")
178
- space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup
179
 
180
  if space_host_startup:
181
- print(f"βœ… SPACE_HOST found: {space_host_startup}")
182
- print(f" Runtime URL should be: https://{space_host_startup}.hf.space")
183
  else:
184
- print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
185
 
186
- if space_id_startup: # Print repo URLs if SPACE_ID is found
187
- print(f"βœ… SPACE_ID found: {space_id_startup}")
188
- print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
189
- print(f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main")
190
  else:
191
- print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.")
192
-
193
- print("-"*(60 + len(" App Starting ")) + "\n")
194
 
195
- print("Launching Gradio Interface for Basic Agent Evaluation...")
196
- demo.launch(debug=True, share=False)
 
1
+ import json
2
  import os
3
+ import tempfile
4
+ from pathlib import Path
5
+
6
  import gradio as gr
 
 
7
  import pandas as pd
8
+ import requests
9
+
10
+ from agent import GaiaAgent
11
+ from answer_normalize import normalize_answer
12
 
 
 
13
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
14
+ CACHE_FILENAME = "gaia_answers_cache.json"
15
+
16
+
17
+ def _cache_path() -> Path:
18
+ return Path(__file__).resolve().parent / CACHE_FILENAME
19
+
20
+
21
+ def _load_cache() -> dict:
22
+ p = _cache_path()
23
+ if not p.is_file():
24
+ return {}
25
+ try:
26
+ return json.loads(p.read_text(encoding="utf-8"))
27
+ except json.JSONDecodeError:
28
+ return {}
29
+
30
+
31
+ def _save_cache(cache: dict) -> None:
32
+ _cache_path().write_text(json.dumps(cache, indent=2), encoding="utf-8")
33
 
34
+
35
+ def _download_attachment(api_url: str, task_id: str, file_name: str) -> str | None:
36
+ """Save task attachment to a temp file; return path or None."""
37
+ if not file_name or not str(file_name).strip():
38
+ return None
39
+ url = f"{api_url}/files/{task_id}"
40
+ try:
41
+ r = requests.get(url, timeout=120)
42
+ except requests.RequestException:
43
+ return None
44
+ if r.status_code != 200:
45
+ return None
46
+ ctype = (r.headers.get("Content-Type") or "").lower()
47
+ if "application/json" in ctype:
48
+ try:
49
+ data = r.json()
50
+ if isinstance(data, dict) and data.get("detail"):
51
+ return None
52
+ except json.JSONDecodeError:
53
+ pass
54
+ suffix = Path(file_name).suffix or ""
55
+ fd, path = tempfile.mkstemp(suffix=suffix, prefix=f"gaia_{task_id[:8]}_")
56
+ try:
57
+ with os.fdopen(fd, "wb") as f:
58
+ f.write(r.content)
59
+ except OSError:
60
+ return None
61
+ return path
62
+
63
+
64
+ def run_and_submit_all(profile: gr.OAuthProfile | None):
65
+ space_id = os.getenv("SPACE_ID")
66
+ use_cache = os.getenv("GAIA_USE_CACHE", "1").lower() in ("1", "true", "yes")
67
 
68
  if profile:
69
+ username = f"{profile.username}"
70
  print(f"User logged in: {username}")
71
  else:
72
  print("User not logged in.")
73
  return "Please Login to Hugging Face with the button.", None
74
 
75
+ api_url = os.getenv("GAIA_API_URL", DEFAULT_API_URL)
76
  questions_url = f"{api_url}/questions"
77
  submit_url = f"{api_url}/submit"
78
 
 
79
  try:
80
+ agent = GaiaAgent()
81
  except Exception as e:
82
  print(f"Error instantiating agent: {e}")
83
  return f"Error initializing agent: {e}", None
84
+
85
  agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
86
  print(agent_code)
87
 
 
88
  print(f"Fetching questions from: {questions_url}")
89
  try:
90
+ response = requests.get(questions_url, timeout=60)
91
  response.raise_for_status()
92
  questions_data = response.json()
93
  if not questions_data:
94
+ return "Fetched questions list is empty or invalid format.", None
 
95
  print(f"Fetched {len(questions_data)} questions.")
96
  except requests.exceptions.RequestException as e:
 
97
  return f"Error fetching questions: {e}", None
98
+ except json.JSONDecodeError as e:
99
+ return f"Error decoding server response for questions: {e}", None
 
 
 
 
 
100
 
101
+ cache = _load_cache() if use_cache else {}
102
  results_log = []
103
  answers_payload = []
104
+
105
  print(f"Running agent on {len(questions_data)} questions...")
106
  for item in questions_data:
107
  task_id = item.get("task_id")
108
  question_text = item.get("question")
109
+ file_name = item.get("file_name") or ""
110
+
111
  if not task_id or question_text is None:
112
  print(f"Skipping item with missing task_id or question: {item}")
113
  continue
114
+
115
+ cache_key = str(task_id)
116
+ if use_cache and cache_key in cache:
117
+ submitted_answer = normalize_answer(cache[cache_key])
118
+ print(f"Cache hit for {task_id}")
119
+ else:
120
+ local_path: str | None = None
121
+ try:
122
+ if file_name and str(file_name).strip():
123
+ local_path = _download_attachment(api_url, str(task_id), str(file_name))
124
+ if local_path:
125
+ print(f"Downloaded attachment for {task_id} -> {local_path}")
126
+ submitted_answer = agent(
127
+ str(question_text),
128
+ attachment_path=local_path,
129
+ task_id=str(task_id),
130
+ )
131
+ submitted_answer = normalize_answer(submitted_answer)
132
+ if use_cache:
133
+ cache[cache_key] = (
134
+ submitted_answer
135
+ if isinstance(submitted_answer, str)
136
+ else str(submitted_answer)
137
+ )
138
+ _save_cache(cache)
139
+ except Exception as e:
140
+ print(f"Error running agent on task {task_id}: {e}")
141
+ submitted_answer = f"AGENT ERROR: {e}"
142
+ finally:
143
+ if local_path and Path(local_path).is_file():
144
+ try:
145
+ Path(local_path).unlink(missing_ok=True)
146
+ except OSError:
147
+ pass
148
+
149
+ answers_payload.append(
150
+ {
151
+ "task_id": task_id,
152
+ "submitted_answer": submitted_answer,
153
+ }
154
+ )
155
+ results_log.append(
156
+ {
157
+ "Task ID": task_id,
158
+ "Question": question_text,
159
+ "Submitted Answer": submitted_answer,
160
+ }
161
+ )
162
 
163
  if not answers_payload:
 
164
  return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
165
 
166
+ submission_data = {
167
+ "username": username.strip(),
168
+ "agent_code": agent_code,
169
+ "answers": answers_payload,
170
+ }
171
+ status_update = (
172
+ f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
173
+ )
174
  print(status_update)
175
 
 
176
  print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
177
  try:
178
+ response = requests.post(submit_url, json=submission_data, timeout=600)
179
  response.raise_for_status()
180
  result_data = response.json()
181
  final_status = (
 
193
  try:
194
  error_json = e.response.json()
195
  error_detail += f" Detail: {error_json.get('detail', e.response.text)}"
196
+ except json.JSONDecodeError:
197
  error_detail += f" Response: {e.response.text[:500]}"
198
  status_message = f"Submission Failed: {error_detail}"
199
  print(status_message)
200
+ return status_message, pd.DataFrame(results_log)
 
201
  except requests.exceptions.Timeout:
202
  status_message = "Submission Failed: The request timed out."
203
  print(status_message)
204
+ return status_message, pd.DataFrame(results_log)
 
205
  except requests.exceptions.RequestException as e:
206
  status_message = f"Submission Failed: Network error - {e}"
207
  print(status_message)
208
+ return status_message, pd.DataFrame(results_log)
 
209
  except Exception as e:
210
  status_message = f"An unexpected error occurred during submission: {e}"
211
  print(status_message)
212
+ return status_message, pd.DataFrame(results_log)
213
+
214
+
215
+ def crypto_btc_price() -> str:
216
+ """Optional demo: live BTC/USD (not used for GAIA scoring)."""
217
+ try:
218
+ r = requests.get(
219
+ "https://api.coingecko.com/api/v3/simple/price",
220
+ params={"ids": "bitcoin", "vs_currencies": "usd"},
221
+ timeout=20,
222
+ )
223
+ r.raise_for_status()
224
+ data = r.json()
225
+ usd = data.get("bitcoin", {}).get("usd")
226
+ return f"Bitcoin (BTC) ~ ${usd:,.2f} USD (CoinGecko public API)."
227
+ except Exception as e:
228
+ return f"Could not fetch price: {e}"
229
 
230
 
 
231
  with gr.Blocks() as demo:
232
+ gr.Markdown("# GAIA Unit 4 β€” Agent Evaluation Runner")
233
  gr.Markdown(
234
  """
235
+ **Instructions**
236
 
237
+ 1. Duplicate this Space from the course template (or push this repo) and set **Secrets**: `HF_TOKEN` (read access to Inference).
238
+ 2. Optional env vars: `GAIA_TEXT_MODEL`, `GAIA_ASR_MODEL`, `GAIA_VISION_MODEL`, `GAIA_API_URL`, `GAIA_USE_CACHE` (default `1`).
239
+ 3. Log in with Hugging Face below (username is used for the leaderboard).
240
+ 4. Run **Evaluate & Submit** to answer all questions and post scores.
241
 
242
+ Attachment tasks download `GET /files/{task_id}` automatically when `file_name` is set.
243
+
244
+ ---
245
+ **Crypto demo (optional):** unrelated to GAIA; quick BTC spot check.
246
  """
247
  )
248
 
249
  gr.LoginButton()
250
 
251
+ with gr.Tab("GAIA evaluation"):
252
+ run_button = gr.Button("Run Evaluation & Submit All Answers")
253
+ status_output = gr.Textbox(
254
+ label="Run Status / Submission Result", lines=6, interactive=False
255
+ )
256
+ results_table = gr.DataFrame(
257
+ label="Questions and Agent Answers", wrap=True
258
+ )
259
+ run_button.click(
260
+ fn=run_and_submit_all,
261
+ outputs=[status_output, results_table],
262
+ )
263
 
264
+ with gr.Tab("Crypto intelligence (demo)"):
265
+ gr.Markdown(
266
+ "This tab does not affect GAIA scores. It demonstrates a simple public market data fetch."
267
+ )
268
+ cp_btn = gr.Button("Fetch BTC / USD")
269
+ cp_out = gr.Textbox(label="Output", interactive=False)
270
+ cp_btn.click(fn=crypto_btc_price, outputs=cp_out)
271
 
272
  if __name__ == "__main__":
273
+ print("\n" + "-" * 30 + " App Starting " + "-" * 30)
 
274
  space_host_startup = os.getenv("SPACE_HOST")
275
+ space_id_startup = os.getenv("SPACE_ID")
276
 
277
  if space_host_startup:
278
+ print(f"SPACE_HOST found: {space_host_startup}")
 
279
  else:
280
+ print("SPACE_HOST not set (local run?).")
281
 
282
+ if space_id_startup:
283
+ print(f"SPACE_ID found: {space_id_startup}")
284
+ print(f"Repo tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main")
 
285
  else:
286
+ print("SPACE_ID not set (local run?).")
 
 
287
 
288
+ print("-" * 62 + "\n")
289
+ demo.launch(debug=True, share=False)
inference_client_factory.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Build InferenceClient with a provider that accepts the user's HF token."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import os
6
+
7
+ from huggingface_hub import InferenceClient
8
+
9
+
10
+ def inference_client_kwargs(token: str) -> dict:
11
+ """
12
+ Default: **no** ``provider`` β†’ the library uses ``auto``: first provider for this
13
+ model per your https://hf.co/settings/inference-providers order.
14
+
15
+ Forcing ``hf-inference`` breaks many chat models (e.g. Qwen2.5-7B-Instruct is only on
16
+ together / featherless-ai β€” the router then returns **404** for …/hf-inference/models/…).
17
+
18
+ Set ``HF_INFERENCE_PROVIDER`` to pin one provider (e.g. ``together``, ``sambanova``)
19
+ or ``auto`` explicitly. Use ``hf-inference`` only for models that actually list it.
20
+ """
21
+ raw = os.environ.get("HF_INFERENCE_PROVIDER")
22
+ if raw is None:
23
+ return {"token": token}
24
+ r = raw.strip().lower()
25
+ if r in ("", "auto"):
26
+ return {"token": token}
27
+ return {"token": token, "provider": r}
28
+
29
+
30
+ def make_inference_client(token: str) -> InferenceClient:
31
+ return InferenceClient(**inference_client_kwargs(token))
requirements.txt CHANGED
@@ -1,2 +1,11 @@
1
- gradio
2
- requests
 
 
 
 
 
 
 
 
 
 
1
+ gradio>=4.44.0
2
+ requests>=2.31.0
3
+ pandas>=2.0.0
4
+ openpyxl>=3.1.0
5
+ beautifulsoup4>=4.12.0
6
+ lxml>=5.0.0
7
+ duckduckgo-search>=6.0.0
8
+ wikipedia>=1.4.0
9
+ huggingface_hub>=0.26.0
10
+ youtube-transcript-api>=0.6.0
11
+ Pillow>=10.0.0
run_local_eval.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """Fetch GAIA course questions, run GaiaAgent, save JSON β€” does not submit."""
3
+
4
+ from __future__ import annotations
5
+
6
+ import argparse
7
+ import json
8
+ import os
9
+ import sys
10
+ import tempfile
11
+ from pathlib import Path
12
+
13
+ import requests
14
+
15
+ ROOT = Path(__file__).resolve().parent
16
+ if str(ROOT) not in sys.path:
17
+ sys.path.insert(0, str(ROOT))
18
+
19
+ from agent import GaiaAgent # noqa: E402
20
+ from answer_normalize import normalize_answer # noqa: E402
21
+
22
+ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
23
+
24
+
25
+ def download_file(api_url: str, task_id: str, file_name: str) -> str | None:
26
+ if not file_name or not str(file_name).strip():
27
+ return None
28
+ url = f"{api_url}/files/{task_id}"
29
+ r = requests.get(url, timeout=120)
30
+ if r.status_code != 200:
31
+ return None
32
+ ctype = (r.headers.get("Content-Type") or "").lower()
33
+ if "application/json" in ctype:
34
+ try:
35
+ data = r.json()
36
+ if isinstance(data, dict) and data.get("detail"):
37
+ return None
38
+ except json.JSONDecodeError:
39
+ pass
40
+ suffix = Path(file_name).suffix or ""
41
+ fd, path = tempfile.mkstemp(suffix=suffix, prefix=f"gaia_{task_id[:8]}_")
42
+ with os.fdopen(fd, "wb") as f:
43
+ f.write(r.content)
44
+ return path
45
+
46
+
47
+ def main() -> None:
48
+ parser = argparse.ArgumentParser()
49
+ parser.add_argument(
50
+ "--api-url",
51
+ default=os.environ.get("GAIA_API_URL", DEFAULT_API_URL),
52
+ )
53
+ parser.add_argument(
54
+ "-o",
55
+ "--output",
56
+ default=str(ROOT / "local_eval_answers.json"),
57
+ help="Write answers JSON here",
58
+ )
59
+ args = parser.parse_args()
60
+
61
+ q_url = f"{args.api_url.rstrip('/')}/questions"
62
+ print(f"GET {q_url}")
63
+ r = requests.get(q_url, timeout=60)
64
+ r.raise_for_status()
65
+ items = r.json()
66
+ print(f"{len(items)} questions")
67
+
68
+ token = os.environ.get("HF_TOKEN") or os.environ.get("HUGGINGFACEHUB_API_TOKEN")
69
+ agent = GaiaAgent(hf_token=token) if token else None
70
+
71
+ out: list[dict] = []
72
+ for item in items:
73
+ tid = item.get("task_id")
74
+ q = item.get("question")
75
+ fn = item.get("file_name") or ""
76
+ if not tid or q is None:
77
+ continue
78
+ local = None
79
+ try:
80
+ if fn and str(fn).strip():
81
+ local = download_file(args.api_url, str(tid), str(fn))
82
+ if agent is not None:
83
+ ans = agent(str(q), attachment_path=local, task_id=str(tid))
84
+ else:
85
+ from tools.registry import deterministic_attempt
86
+
87
+ d = deterministic_attempt(str(q), local)
88
+ ans = d if d is not None else "NO_HF_TOKEN"
89
+ finally:
90
+ if local and Path(local).is_file():
91
+ Path(local).unlink(missing_ok=True)
92
+
93
+ if isinstance(ans, (int, float)) and not isinstance(ans, bool):
94
+ sub = ans
95
+ else:
96
+ sub = normalize_answer(ans)
97
+ out.append(
98
+ {
99
+ "task_id": tid,
100
+ "question": q,
101
+ "submitted_answer": sub,
102
+ }
103
+ )
104
+ print(f"--- {tid[:8]}… -> {out[-1]['submitted_answer']!r}")
105
+
106
+ Path(args.output).write_text(json.dumps(out, indent=2), encoding="utf-8")
107
+ print(f"Wrote {args.output}")
108
+
109
+
110
+ if __name__ == "__main__":
111
+ main()