Update app.py
Browse files
app.py
CHANGED
|
@@ -3,7 +3,7 @@ import gradio as gr
|
|
| 3 |
import requests
|
| 4 |
import inspect
|
| 5 |
import pandas as pd
|
| 6 |
-
from smolagents import CodeAgent, DuckDuckGoSearchTool, InferenceClientModel
|
| 7 |
|
| 8 |
# --- Constants ---
|
| 9 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
|
@@ -24,50 +24,40 @@ class BasicAgent:
|
|
| 24 |
# use_structured_outputs_internally=True # Optional: Can sometimes improve performance for capable models, but might also cause issues if the model doesn't strictly adhere. Test this if needed.
|
| 25 |
)
|
| 26 |
|
| 27 |
-
# ---
|
| 28 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 29 |
|
| 30 |
-
**Here's how you should
|
| 31 |
-
|
|
|
|
| 32 |
2. **Gather Information:**
|
| 33 |
-
*
|
| 34 |
-
*
|
| 35 |
-
* Analyze
|
| 36 |
-
3. **
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
* **Strings (words):** If the answer is
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
* **FINAL ANSWER: Tokyo**
|
| 48 |
-
|
| 49 |
-
* **Question:** How many days are in a common year?
|
| 50 |
-
* **Thought:** A common year has a fixed number of days.
|
| 51 |
-
* **FINAL ANSWER: three hundred sixty five**
|
| 52 |
-
|
| 53 |
-
* **Question:** Who developed Python and in what year?
|
| 54 |
-
* **Thought:** This requires factual recall of an inventor and a year. I will use DuckDuckGoSearchTool if uncertain.
|
| 55 |
-
* **FINAL ANSWER: Guido van Rossum, nineteen ninety one**
|
| 56 |
-
|
| 57 |
-
* **Question:** List three primary colors.
|
| 58 |
-
* **Thought:** I need to list common primary colors.
|
| 59 |
-
* **FINAL ANSWER: red, yellow, blue**
|
| 60 |
-
|
| 61 |
-
* **Question:** Calculate 15 multiplied by 7.
|
| 62 |
-
* **Thought:** This is a simple arithmetic calculation.
|
| 63 |
-
* **FINAL ANSWER: one hundred five**
|
| 64 |
-
|
| 65 |
-
Your detailed thought process (after "Thought:") should lead directly to the accurate `FINAL ANSWER` based on the specified format.
|
| 66 |
"""
|
| 67 |
self.agent.prompt_templates["system_prompt"] = self.agent.prompt_templates["system_prompt"] + SYSTEM_PROMPT
|
| 68 |
|
| 69 |
def __call__(self, question: str) -> str:
|
| 70 |
print(f"Agent received question (first 50 chars): {question[:50]}...")
|
|
|
|
|
|
|
| 71 |
final_answer = self.agent.run(question)
|
| 72 |
print(f"Agent returning final answer: {final_answer}")
|
| 73 |
return final_answer
|
|
@@ -99,8 +89,9 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
| 99 |
|
| 100 |
# In the case of an app running as a hugging Face space, this link points toward your codebase ( usefull for others so please keep it public)
|
| 101 |
# IMPORTANT: Update this to YOUR SPACE_ID if you've cloned it!
|
| 102 |
-
|
| 103 |
-
|
|
|
|
| 104 |
|
| 105 |
# 2. Fetch Questions
|
| 106 |
print(f"Fetching questions from: {questions_url}")
|
|
@@ -135,6 +126,16 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
| 135 |
continue
|
| 136 |
try:
|
| 137 |
submitted_answer = agent(question_text)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 138 |
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
|
| 139 |
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
|
| 140 |
except Exception as e:
|
|
|
|
| 3 |
import requests
|
| 4 |
import inspect
|
| 5 |
import pandas as pd
|
| 6 |
+
from smolagents import CodeAgent, DuckDuckGoSearchTool, InferenceClientModel
|
| 7 |
|
| 8 |
# --- Constants ---
|
| 9 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
|
|
|
| 24 |
# use_structured_outputs_internally=True # Optional: Can sometimes improve performance for capable models, but might also cause issues if the model doesn't strictly adhere. Test this if needed.
|
| 25 |
)
|
| 26 |
|
| 27 |
+
# --- REVISED SYSTEM PROMPT FOR EXACT MATCH ---
|
| 28 |
+
# This prompt is designed to make the agent output ONLY the answer,
|
| 29 |
+
# without any preambles, thoughts, or "FINAL ANSWER:" tags.
|
| 30 |
+
# This is critical for exact match scoring.
|
| 31 |
+
SYSTEM_PROMPT = """You are an ultra-concise and accurate AI assistant for the GAIA benchmark.
|
| 32 |
+
Your sole purpose is to provide the exact correct answer to the given question, and NOTHING ELSE.
|
| 33 |
+
Do not include any introductory phrases, thoughts, explanations, or "FINAL ANSWER:" tags.
|
| 34 |
+
Your output must be the answer only.
|
| 35 |
|
| 36 |
+
**Here's how you should determine and format your answer:**
|
| 37 |
+
|
| 38 |
+
1. **Understand the Question:** Carefully read the question to identify the precise information needed.
|
| 39 |
2. **Gather Information:**
|
| 40 |
+
* Use your internal knowledge if the answer is straightforward or involves simple arithmetic.
|
| 41 |
+
* **Crucially, use the `DuckDuckGoSearchTool`** if the question requires external knowledge, current facts, or complex research. Formulate the most effective search queries to find exact information.
|
| 42 |
+
* Analyze search results meticulously to extract the precise fact(s).
|
| 43 |
+
3. **Formulate the Answer (Strict Formatting Rules):**
|
| 44 |
+
* **Numbers:** If the answer is a number, provide it in digits ONLY. Do NOT use commas for thousands separators, currency symbols (e.g., "$"), percentage signs (e.g., "%"), or any other units (e.g., "kg", "meters").
|
| 45 |
+
* *Example 1 (number):* If the question asks "What is the square root of 81?", your answer should be "9".
|
| 46 |
+
* *Example 2 (large number):* If the question asks "What is the population of City X (approx)?", and you find "1,234,567", your answer must be "1234567".
|
| 47 |
+
* **Strings (words):** If the answer is text, use the fewest words possible. Do NOT include articles (a, an, the). Do NOT use abbreviations. If a number is part of a string (e.g., "twenty books"), write the digit in plain English words (e.g., "twenty").
|
| 48 |
+
* *Example 1 (string):* If the question asks "Who invented the telephone?", your answer should be "Alexander Graham Bell".
|
| 49 |
+
* *Example 2 (string with number in text):* If the question asks "How many primary colors are there?", your answer should be "three".
|
| 50 |
+
* **Comma-separated lists:** If the answer is a list of items, apply the above rules to each item, and separate them with a comma and a single space (ee.g., "red, green, blue").
|
| 51 |
+
* *Example:* If the question asks "List two types of fruit.", your answer should be "apple, banana".
|
| 52 |
+
|
| 53 |
+
Your final output *must* be only the answer, following these exact rules. No surrounding text.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 54 |
"""
|
| 55 |
self.agent.prompt_templates["system_prompt"] = self.agent.prompt_templates["system_prompt"] + SYSTEM_PROMPT
|
| 56 |
|
| 57 |
def __call__(self, question: str) -> str:
|
| 58 |
print(f"Agent received question (first 50 chars): {question[:50]}...")
|
| 59 |
+
# The key change: The agent's run method should directly produce the final answer,
|
| 60 |
+
# without additional wrapping, due to the hyper-specific prompt.
|
| 61 |
final_answer = self.agent.run(question)
|
| 62 |
print(f"Agent returning final answer: {final_answer}")
|
| 63 |
return final_answer
|
|
|
|
| 89 |
|
| 90 |
# In the case of an app running as a hugging Face space, this link points toward your codebase ( usefull for others so please keep it public)
|
| 91 |
# IMPORTANT: Update this to YOUR SPACE_ID if you've cloned it!
|
| 92 |
+
# Ensure this link is correct for your public space for verification.
|
| 93 |
+
agent_code = f"https://huggingface.co/spaces/{os.getenv('SPACE_ID', 'YOUR_HF_USERNAME/YOUR_SPACE_NAME')}/tree/main"
|
| 94 |
+
print(f"Agent code will link to: {agent_code}")
|
| 95 |
|
| 96 |
# 2. Fetch Questions
|
| 97 |
print(f"Fetching questions from: {questions_url}")
|
|
|
|
| 126 |
continue
|
| 127 |
try:
|
| 128 |
submitted_answer = agent(question_text)
|
| 129 |
+
# IMPORTANT: Post-process the submitted_answer if the model still adds unwanted text.
|
| 130 |
+
# While the prompt aims to prevent it, sometimes models are stubborn.
|
| 131 |
+
# This stripping makes sure only the raw answer is sent.
|
| 132 |
+
submitted_answer = submitted_answer.strip()
|
| 133 |
+
# Remove any trailing "FINAL ANSWER:" or "FINAL ANSWER:" if model still generates it.
|
| 134 |
+
if submitted_answer.startswith("FINAL ANSWER:"):
|
| 135 |
+
submitted_answer = submitted_answer[len("FINAL ANSWER:"):].strip()
|
| 136 |
+
# You might need more sophisticated stripping depending on how the model misbehaves
|
| 137 |
+
# For example, if it adds thoughts, you'd need to extract the last line or a specific pattern.
|
| 138 |
+
|
| 139 |
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
|
| 140 |
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
|
| 141 |
except Exception as e:
|