Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -3,12 +3,13 @@ import os
|
|
| 3 |
import gradio as gr
|
| 4 |
import requests
|
| 5 |
import pandas as pd
|
| 6 |
-
|
| 7 |
from transformers import BartForConditionalGeneration, BartTokenizer
|
| 8 |
-
|
| 9 |
from audio_transcriber import AudioTranscriptionTool
|
| 10 |
from image_analyzer import ImageAnalysisTool
|
| 11 |
from wikipedia_searcher import WikipediaSearcher
|
|
|
|
| 12 |
|
| 13 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
| 14 |
|
|
@@ -30,37 +31,37 @@ SYSTEM_PROMPT = (
|
|
| 30 |
"Never say 'the answer is...'. Only return the answer.\n"
|
| 31 |
)
|
| 32 |
|
|
|
|
| 33 |
class LocalBartModel:
|
| 34 |
-
def __init__(self, model_name="facebook/bart-base"
|
| 35 |
-
import torch
|
| 36 |
-
self.device = device if device else ("cuda" if torch.cuda.is_available() else "cpu")
|
| 37 |
self.tokenizer = BartTokenizer.from_pretrained(model_name)
|
| 38 |
-
self.model = BartForConditionalGeneration.from_pretrained(model_name)
|
|
|
|
|
|
|
| 39 |
|
| 40 |
-
def
|
| 41 |
-
|
| 42 |
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
|
|
|
| 47 |
num_beams=5,
|
| 48 |
early_stopping=True
|
| 49 |
)
|
| 50 |
-
|
| 51 |
-
return
|
| 52 |
|
| 53 |
class GaiaAgent:
|
| 54 |
def __init__(self):
|
| 55 |
print("Gaia Agent Initialized")
|
| 56 |
self.model = LocalBartModel()
|
| 57 |
-
|
| 58 |
self.tools = [
|
| 59 |
AudioTranscriptionTool(),
|
| 60 |
ImageAnalysisTool(),
|
| 61 |
WikipediaSearcher()
|
| 62 |
]
|
| 63 |
-
|
| 64 |
self.agent = ToolCallingAgent(
|
| 65 |
tools=self.tools,
|
| 66 |
model=self.model
|
|
@@ -143,79 +144,7 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
|
|
| 143 |
results_log.append({
|
| 144 |
"Task ID": task_id,
|
| 145 |
"Question": item.get("question", ""),
|
| 146 |
-
"
|
| 147 |
-
})
|
| 148 |
-
|
| 149 |
-
if not answers_payload:
|
| 150 |
-
return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
|
| 151 |
-
|
| 152 |
-
submission_data = {
|
| 153 |
-
"username": username.strip(),
|
| 154 |
-
"agent_code": agent_code,
|
| 155 |
-
"answers": answers_payload
|
| 156 |
-
}
|
| 157 |
-
|
| 158 |
-
print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
|
| 159 |
-
try:
|
| 160 |
-
response = requests.post(submit_url, json=submission_data, timeout=60)
|
| 161 |
-
response.raise_for_status()
|
| 162 |
-
result_data = response.json()
|
| 163 |
-
final_status = (
|
| 164 |
-
f"Submission Successful!\n"
|
| 165 |
-
f"User: {result_data.get('username')}\n"
|
| 166 |
-
f"Overall Score: {result_data.get('score', 'N/A')}% "
|
| 167 |
-
f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
|
| 168 |
-
f"Message: {result_data.get('message', 'No message received.')}"
|
| 169 |
-
)
|
| 170 |
-
results_df = pd.DataFrame(results_log)
|
| 171 |
-
return final_status, results_df
|
| 172 |
-
except requests.exceptions.HTTPError as e:
|
| 173 |
-
try:
|
| 174 |
-
detail = e.response.json().get("detail", e.response.text)
|
| 175 |
-
except Exception:
|
| 176 |
-
detail = e.response.text[:500]
|
| 177 |
-
return f"Submission Failed: {detail}", pd.DataFrame(results_log)
|
| 178 |
-
except requests.exceptions.Timeout:
|
| 179 |
-
return "Submission Failed: The request timed out.", pd.DataFrame(results_log)
|
| 180 |
-
except Exception as e:
|
| 181 |
-
return f"An unexpected error occurred during submission: {e}", pd.DataFrame(results_log)
|
| 182 |
-
|
| 183 |
-
with gr.Blocks() as demo:
|
| 184 |
-
gr.Markdown("# Basic Agent Evaluation Runner")
|
| 185 |
-
gr.Markdown("""
|
| 186 |
-
**Instructions:**
|
| 187 |
-
1. Clone this space and define your agent and tools.
|
| 188 |
-
2. Log in to your Hugging Face account using the button below.
|
| 189 |
-
3. Click 'Run Evaluation & Submit All Answers' to test your agent and submit results.
|
| 190 |
-
""")
|
| 191 |
-
|
| 192 |
-
gr.LoginButton()
|
| 193 |
-
run_button = gr.Button("Run Evaluation & Submit All Answers")
|
| 194 |
-
|
| 195 |
-
status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
|
| 196 |
-
results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
|
| 197 |
-
|
| 198 |
-
run_button.click(fn=run_and_submit_all, outputs=[status_output, results_table])
|
| 199 |
-
|
| 200 |
-
if __name__ == "__main__":
|
| 201 |
-
print("\n" + "-"*30 + " App Starting " + "-"*30)
|
| 202 |
-
space_host = os.getenv("SPACE_HOST")
|
| 203 |
-
space_id = os.getenv("SPACE_ID")
|
| 204 |
-
|
| 205 |
-
if space_host:
|
| 206 |
-
print(f"✅ SPACE_HOST found: {space_host}")
|
| 207 |
-
print(f" Runtime URL should be: https://{space_host}.hf.space")
|
| 208 |
-
else:
|
| 209 |
-
print("ℹ️ SPACE_HOST not found.")
|
| 210 |
-
|
| 211 |
-
if space_id:
|
| 212 |
-
print(f"✅ SPACE_ID found: {space_id}")
|
| 213 |
-
print(f" Repo URL: https://huggingface.co/spaces/{space_id}")
|
| 214 |
-
else:
|
| 215 |
-
print("ℹ️ SPACE_ID not found.")
|
| 216 |
-
|
| 217 |
-
print("-"*(60 + len(" App Starting ")) + "\n")
|
| 218 |
-
demo.launch(debug=True, share=False)
|
| 219 |
|
| 220 |
|
| 221 |
|
|
|
|
| 3 |
import gradio as gr
|
| 4 |
import requests
|
| 5 |
import pandas as pd
|
| 6 |
+
import torch
|
| 7 |
from transformers import BartForConditionalGeneration, BartTokenizer
|
| 8 |
+
|
| 9 |
from audio_transcriber import AudioTranscriptionTool
|
| 10 |
from image_analyzer import ImageAnalysisTool
|
| 11 |
from wikipedia_searcher import WikipediaSearcher
|
| 12 |
+
from smolagents import ToolCallingAgent
|
| 13 |
|
| 14 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
| 15 |
|
|
|
|
| 31 |
"Never say 'the answer is...'. Only return the answer.\n"
|
| 32 |
)
|
| 33 |
|
| 34 |
+
# Local wrapper for facebook/bart-base that exposes generate()
|
| 35 |
class LocalBartModel:
|
| 36 |
+
def __init__(self, model_name="facebook/bart-base"):
|
|
|
|
|
|
|
| 37 |
self.tokenizer = BartTokenizer.from_pretrained(model_name)
|
| 38 |
+
self.model = BartForConditionalGeneration.from_pretrained(model_name)
|
| 39 |
+
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 40 |
+
self.model.to(self.device)
|
| 41 |
|
| 42 |
+
def generate(self, input_ids, **generate_kwargs):
|
| 43 |
+
return self.model.generate(input_ids.to(self.device), **generate_kwargs)
|
| 44 |
|
| 45 |
+
def __call__(self, prompt: str) -> str:
|
| 46 |
+
inputs = self.tokenizer(prompt, return_tensors="pt").to(self.device)
|
| 47 |
+
output_ids = self.generate(
|
| 48 |
+
inputs.input_ids,
|
| 49 |
+
max_length=100,
|
| 50 |
num_beams=5,
|
| 51 |
early_stopping=True
|
| 52 |
)
|
| 53 |
+
output_text = self.tokenizer.decode(output_ids[0], skip_special_tokens=True)
|
| 54 |
+
return output_text.strip()
|
| 55 |
|
| 56 |
class GaiaAgent:
|
| 57 |
def __init__(self):
|
| 58 |
print("Gaia Agent Initialized")
|
| 59 |
self.model = LocalBartModel()
|
|
|
|
| 60 |
self.tools = [
|
| 61 |
AudioTranscriptionTool(),
|
| 62 |
ImageAnalysisTool(),
|
| 63 |
WikipediaSearcher()
|
| 64 |
]
|
|
|
|
| 65 |
self.agent = ToolCallingAgent(
|
| 66 |
tools=self.tools,
|
| 67 |
model=self.model
|
|
|
|
| 144 |
results_log.append({
|
| 145 |
"Task ID": task_id,
|
| 146 |
"Question": item.get("question", ""),
|
| 147 |
+
"Submi
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 148 |
|
| 149 |
|
| 150 |
|