| import os |
| import gradio as gr |
| import requests |
| import inspect |
| import pandas as pd |
| import re |
| import time |
| from datetime import datetime, timedelta |
| from collections import deque |
| import random |
| from smolagents import CodeAgent, load_tool, tool |
| from smolagents.models import Model, ChatMessage, MessageRole, Tool |
| from tools import FinalAnswerTool, WikipediaSearchTool, VisitWebpageTool, DuckDuckGoSearchTool, ReverseStringTool |
| from retriever import LastResort |
| import google.generativeai as genai |
|
|
| |
| |
| DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space" |
| MAX_RETRIES = 3 |
| INITIAL_RETRY_DELAY = 1 |
| MAX_RETRY_DELAY = 32 |
|
|
| class RateLimiter: |
| def __init__(self, requests_per_minute): |
| self.requests_per_minute = requests_per_minute |
| self.window_size = 60 |
| self.requests = deque() |
|
|
| def wait_if_needed(self): |
| now = datetime.now() |
| |
| |
| while self.requests and (now - self.requests[0]).total_seconds() > self.window_size: |
| self.requests.popleft() |
| |
| |
| if len(self.requests) >= self.requests_per_minute: |
| wait_time = self.window_size - (now - self.requests[0]).total_seconds() |
| if wait_time > 0: |
| time.sleep(wait_time + 0.1) |
| |
| |
| self.requests.append(now) |
|
|
| |
|
|
| final_answer = FinalAnswerTool() |
|
|
| class GeminiModel(Model): |
| def __init__(self, api_key, **kwargs): |
| super().__init__(**kwargs) |
| self.api_key = api_key |
| genai.configure(api_key=api_key) |
| self.model = genai.GenerativeModel('models/gemini-2.0-flash-lite') |
| self.rate_limiter = RateLimiter(requests_per_minute=25) |
| |
| file_path = os.path.join(os.path.dirname(__file__), "system_prompt.txt") |
| with open(file_path, "r", encoding="utf-8") as f: |
| self.system_prompt = f.read() |
|
|
| def generate( |
| self, |
| messages: list[ChatMessage], |
| stop_sequences: list[str] | None = None, |
| response_format: dict[str, str] | None = None, |
| tools_to_call_from: list[Tool] | None = None, |
| **kwargs, |
| ) -> ChatMessage: |
| retry_count = 0 |
| delay = INITIAL_RETRY_DELAY |
|
|
| |
| |
| conversation_history = [] |
| for message in messages: |
| content = "" |
| if isinstance(message, ChatMessage) and message.content: |
| content = message.content |
| elif isinstance(message, dict) and 'content' in message: |
| content = str(message['content']) |
| else: |
| content = str(message) |
| conversation_history.append(content) |
| |
| prompt = "\n".join(conversation_history) |
|
|
| |
| full_prompt = f"{self.system_prompt}\n\n{prompt}" |
| |
| while True: |
| try: |
| self.rate_limiter.wait_if_needed() |
|
|
| response = self.model.generate_content(full_prompt) |
| |
| response_text = "" |
| if hasattr(response, 'text'): |
| response_text = response.text |
| elif hasattr(response, 'parts') and response.parts: |
| response_text = "".join(part.text for part in response.parts if hasattr(part, 'text')) |
| elif isinstance(response, str): |
| response_text = response |
| else: |
| response_text = str(response) |
| |
| return ChatMessage( |
| role=MessageRole.ASSISTANT, |
| content=response_text, |
| raw=response |
| ) |
| |
| except Exception as e: |
| error_str = str(e) |
| |
| |
| if "429" in error_str and retry_count < MAX_RETRIES: |
| retry_count += 1 |
| |
| |
| jitter = random.uniform(0, 0.1) * delay |
| sleep_time = delay + jitter |
| |
| print(f"Rate limit hit. Retrying in {sleep_time:.2f} seconds (attempt {retry_count}/{MAX_RETRIES})") |
| time.sleep(sleep_time) |
| |
| |
| delay = min(delay * 2, MAX_RETRY_DELAY) |
| continue |
| |
| print(f"Error in generate: {e}") |
| raise e |
|
|
| class MyAgent: |
| def __init__(self): |
| gemini_api_key = os.getenv("GEMINI_API_KEY") |
| if not gemini_api_key: |
| raise ValueError("GEMINI_API_KEY not set in environment variables.") |
| |
| self.model = GeminiModel(gemini_api_key) |
| |
| self.agent = CodeAgent( |
| tools=[ |
| FinalAnswerTool(), |
| DuckDuckGoSearchTool(), |
| WikipediaSearchTool(), |
| VisitWebpageTool(), |
| ReverseStringTool(), |
| LastResort() |
| ], |
| model=self.model, |
| max_steps=10 |
| ) |
|
|
| def __call__(self, question: str) -> str: |
| print(f"\n=== Processing Question: {question} ===") |
| try: |
| |
| answer = self.agent.run(question) |
| print(f"\n=== Final Answer from Agent ===\n{answer}\n===") |
|
|
| |
| if isinstance(answer, str) and answer: |
| return answer |
| else: |
| |
| return "I was unable to find a definitive answer." |
|
|
| except Exception as e: |
| error_message = str(e) |
| print(f"An error occurred while processing the question: {error_message}") |
| |
| if "Agent stopped after" in error_message and "final_answer" in error_message: |
| return "I was unable to find a definitive answer within the allowed steps." |
| return f"An error occurred: {error_message}" |
|
|
| def run_and_submit_all( profile: gr.OAuthProfile | None): |
| """ |
| Fetches all questions, runs the BasicAgent on them, submits all answers, |
| and displays the results. |
| """ |
| |
| space_id = os.getenv("SPACE_ID") |
|
|
| if profile: |
| username= f"{profile.username}" |
| print(f"User logged in: {username}") |
| else: |
| print("User not logged in.") |
| return "Please Login to Hugging Face with the button.", None |
|
|
| api_url = DEFAULT_API_URL |
| questions_url = f"{api_url}/questions" |
| submit_url = f"{api_url}/submit" |
|
|
| |
| try: |
| agent = MyAgent() |
| except Exception as e: |
| print(f"Error instantiating agent: {e}") |
| return f"Error initializing agent: {e}", None |
| |
| agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main" |
| print(agent_code) |
|
|
| |
| print(f"Fetching questions from: {questions_url}") |
| try: |
| response = requests.get(questions_url, timeout=15) |
| response.raise_for_status() |
| questions_data = response.json() |
| if not questions_data: |
| print("Fetched questions list is empty.") |
| return "Fetched questions list is empty or invalid format.", None |
| print(f"Fetched {len(questions_data)} questions.") |
| except requests.exceptions.RequestException as e: |
| print(f"Error fetching questions: {e}") |
| return f"Error fetching questions: {e}", None |
| except requests.exceptions.JSONDecodeError as e: |
| print(f"Error decoding JSON response from questions endpoint: {e}") |
| print(f"Response text: {response.text[:500]}") |
| return f"Error decoding server response for questions: {e}", None |
| except Exception as e: |
| print(f"An unexpected error occurred fetching questions: {e}") |
| return f"An unexpected error occurred fetching questions: {e}", None |
|
|
| |
| results_log = [] |
| answers_payload = [] |
| print(f"Running agent on {len(questions_data)} questions...") |
| for item in questions_data: |
| task_id = item.get("task_id") |
| question_text = item.get("question") |
| if not task_id or question_text is None: |
| print(f"Skipping item with missing task_id or question: {item}") |
| continue |
| try: |
| submitted_answer = agent(question_text) |
| answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer}) |
| results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer}) |
| except Exception as e: |
| print(f"Error running agent on task {task_id}: {e}") |
| results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"}) |
|
|
| if not answers_payload: |
| print("Agent did not produce any answers to submit.") |
| return "Agent did not produce any answers to submit.", pd.DataFrame(results_log) |
|
|
| |
| submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload} |
| status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..." |
| print(status_update) |
|
|
| |
| print(f"Submitting {len(answers_payload)} answers to: {submit_url}") |
| try: |
| response = requests.post(submit_url, json=submission_data, timeout=60) |
| response.raise_for_status() |
| result_data = response.json() |
| final_status = ( |
| f"Submission Successful!\n" |
| f"User: {result_data.get('username')}\n" |
| f"Overall Score: {result_data.get('score', 'N/A')}% " |
| f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n" |
| f"Message: {result_data.get('message', 'No message received.')}" |
| ) |
| print("Submission successful.") |
| results_df = pd.DataFrame(results_log) |
| return final_status, results_df |
| except requests.exceptions.HTTPError as e: |
| error_detail = f"Server responded with status {e.response.status_code}." |
| try: |
| error_json = e.response.json() |
| error_detail += f" Detail: {error_json.get('detail', e.response.text)}" |
| except requests.exceptions.JSONDecodeError: |
| error_detail += f" Response: {e.response.text[:500]}" |
| status_message = f"Submission Failed: {error_detail}" |
| print(status_message) |
| results_df = pd.DataFrame(results_log) |
| return status_message, results_df |
| except requests.exceptions.Timeout: |
| status_message = "Submission Failed: The request timed out." |
| print(status_message) |
| results_df = pd.DataFrame(results_log) |
| return status_message, results_df |
| except requests.exceptions.RequestException as e: |
| status_message = f"Submission Failed: Network error - {e}" |
| print(status_message) |
| results_df = pd.DataFrame(results_log) |
| return status_message, results_df |
| except Exception as e: |
| status_message = f"An unexpected error occurred during submission: {e}" |
| print(status_message) |
| results_df = pd.DataFrame(results_log) |
| return status_message, results_df |
|
|
| |
| with gr.Blocks() as demo: |
| gr.Markdown("# Basic Agent Evaluation Runner") |
| gr.Markdown( |
| """ |
| **Instructions:** |
| |
| 1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ... |
| 2. Log in to your Hugging Face account using the button below. This uses your HF username for submission. |
| 3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score. |
| |
| --- |
| **Disclaimers:** |
| Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions). |
| This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a seperate action or even to answer the questions in async. |
| """ |
| ) |
|
|
| with gr.Tab("Main Evaluation"): |
| gr.LoginButton() |
| run_button = gr.Button("Run Evaluation & Submit All Answers") |
| status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False) |
| results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True) |
| run_button.click( |
| fn=run_and_submit_all, |
| outputs=[status_output, results_table] |
| ) |
|
|
| if __name__ == "__main__": |
| print("\n" + "-"*30 + " App Starting " + "-"*30) |
| |
| space_host_startup = os.getenv("SPACE_HOST") |
| space_id_startup = os.getenv("SPACE_ID") |
|
|
| if space_host_startup: |
| print(f"✅ SPACE_HOST found: {space_host_startup}") |
| print(f" Runtime URL should be: https://{space_host_startup}.hf.space") |
| else: |
| print("ℹ️ SPACE_HOST environment variable not found (running locally?).") |
|
|
| if space_id_startup: |
| print(f"✅ SPACE_ID found: {space_id_startup}") |
| print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}") |
| print(f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main") |
| else: |
| print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.") |
|
|
| print("-"*(60 + len(" App Starting ")) + "\n") |
|
|
| print("Launching Gradio Interface for Basic Agent Evaluation...") |
| demo.launch(debug=True, share=False) |