Spaces:
Sleeping
Sleeping
| import os | |
| import gradio as gr | |
| import requests | |
| import inspect | |
| import pandas as pd | |
| import time | |
| import sys | |
| from io import StringIO | |
| from typing import TypedDict, Annotated | |
| from langchain_core.tools import tool | |
| from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace | |
| from langchain_community.tools import DuckDuckGoSearchRun | |
| from langgraph.graph import START, StateGraph | |
| from langgraph.graph.message import add_messages | |
| from langgraph.prebuilt import tools_condition, ToolNode | |
| from langchain_core.messages import AnyMessage, HumanMessage, SystemMessage, ToolMessage | |
| from langchain_groq import ChatGroq | |
| # Detect if running locally or on Hugging Face Spaces | |
| IS_LOCAL = os.getenv("SPACE_ID") is None | |
| if IS_LOCAL: | |
| from langchain_ollama import ChatOllama | |
| else: | |
| try: | |
| from langchain_ollama import ChatOllama | |
| except ImportError: | |
| from langchain_community.chat_models import ChatOllama | |
| # (Keep Constants as is) | |
| # --- Constants --- | |
| DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space" | |
| # --- Basic Agent Definition --- | |
| # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------ | |
| if IS_LOCAL: | |
| llm = ChatOllama( | |
| model="gemma4:31b", | |
| temperature=0, | |
| ) | |
| else: | |
| # When on Hugging Face, use a remote endpoint wrapped in ChatHuggingFace for tool support | |
| llm_base = HuggingFaceEndpoint( | |
| repo_id = "Qwen/Qwen2.5-Coder-32B-Instruct", | |
| huggingfacehub_api_token=os.environ.get("HUGGINGFACEHUB_API_TOKEN") | |
| ) | |
| llm = ChatHuggingFace(llm=llm_base) | |
| system_prompt = """You are an AI assistant taking the GAIA benchmark. You must output ONLY the final answer. Do not include any explanations, conversational text or formatting. If the answer is a number, output just the number. If it is a list, output a comma-separated list.""" | |
| # Tool Definition | |
| search_tool = DuckDuckGoSearchRun() | |
| def python_repl(code:str) -> str: | |
| """ | |
| Executes Python code and returns the standard output. | |
| Use this to read files (like CSVs or Excel), process data with pandas or do math. | |
| CRITICAL: You MUST use print() to output the final result so it can be captured. | |
| """ | |
| old_stdout = sys.stdout | |
| redirected_output = sys.stdout = StringIO() | |
| try: | |
| print(f"\n--- [Executing Python Code] ---\n{code}\n------------------------------") | |
| # Execute the code in the global namespace | |
| exec(code, globals()) | |
| sys.stdout = old_stdout | |
| output = redirected_output.getvalue() | |
| print(f"--- [Code Output] ---\n{output}\n--------------------") | |
| return output if output else "Code executed successfully, but printed nothing. Use print() to see output." | |
| except Exception as e: | |
| sys.stdout = old_stdout | |
| return f"Error executing code: {e}" | |
| # Tools Instantiation | |
| tools = [search_tool, python_repl] | |
| chat_with_tools = llm.bind_tools(tools) | |
| class AgentState(TypedDict): | |
| messages:Annotated[list[AnyMessage], add_messages] | |
| def assistant(state:AgentState): | |
| # Log the last tool result if it exists | |
| last_message = state["messages"][-1] | |
| if isinstance(last_message, ToolMessage): | |
| print(f"--- [Tool Result Received] ---\n{last_message.content}\n-----------------------------") | |
| print("\n--- [Assistant is thinking] ---") | |
| response = chat_with_tools.invoke(state["messages"]) | |
| # Log content if it's not empty | |
| if response.content: | |
| print(f"Assistant Response: {response.content}") | |
| # Log tool calls | |
| if hasattr(response, 'tool_calls') and response.tool_calls: | |
| for tc in response.tool_calls: | |
| print(f"Tool Call: {tc['name']} with args: {tc['args']}") | |
| return { | |
| "messages": [response] | |
| } | |
| # Graph Instantiation | |
| builder = StateGraph(AgentState) | |
| # Graph Nodes | |
| builder.add_node("assistant", assistant) | |
| builder.add_node("tools", ToolNode(tools)) | |
| #Graph Edges | |
| builder.add_edge(START, "assistant") | |
| builder.add_conditional_edges( | |
| "assistant", | |
| tools_condition | |
| ) | |
| builder.add_edge("tools", "assistant") | |
| # Agent Compile | |
| my_agent = builder.compile() | |
| class BasicAgent: | |
| def __init__(self): | |
| print("BasicAgent initialized.") | |
| def __call__(self, question: str, file_name: str = None) -> str: | |
| print(f"Agent received question (first 50 chars): {question[:50]}...") | |
| # 1. Construct the prompt with the file path if it exists | |
| if file_name: | |
| # Note: You may need to adjust the path depending on where your space saves downloaded files. | |
| # Usually, the grading space provides the file name, and it sits in the same directory. | |
| prompt = f"Question: {question}\nAttached File: {file_name}\n\nUse the python_repl tool to read and analyze this file using pandas." | |
| else: | |
| prompt = f"Question: {question}" | |
| # Inject the strict GAIA System Prompt and the Human Prompt | |
| messages = [ | |
| SystemMessage(content=system_prompt), | |
| HumanMessage(content=question) | |
| ] | |
| # Invoke Agent | |
| response_state = my_agent.invoke({"messages": messages}) | |
| # Final answer | |
| final_answer = response_state["messages"][-1].content | |
| print(f"--- [Final Answer for this Question] ---\n{final_answer}\n") | |
| return final_answer | |
| def run_and_submit_all( profile: gr.OAuthProfile | None): | |
| """ | |
| Fetches all questions, runs the BasicAgent on them, submits all answers, | |
| and displays the results. | |
| """ | |
| if IS_LOCAL: | |
| username = "local_user" | |
| print(f"Running in LOCAL MODE. Using default username: {username}") | |
| else: | |
| if profile: | |
| username = f"{profile.username}" | |
| print(f"User logged in: {username}") | |
| else: | |
| print("User not logged in.") | |
| return "Please Login to Hugging Face with the button.", None | |
| api_url = DEFAULT_API_URL | |
| questions_url = f"{api_url}/questions" | |
| submit_url = f"{api_url}/submit" | |
| # 1. Instantiate Agent ( modify this part to create your agent) | |
| try: | |
| agent = BasicAgent() | |
| except Exception as e: | |
| print(f"Error instantiating agent: {e}") | |
| return f"Error initializing agent: {e}", None | |
| if IS_LOCAL: | |
| space_id = "local-test-space" | |
| agent_code = "local-execution" | |
| else: | |
| space_id = os.getenv("SPACE_ID") | |
| agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main" | |
| print(f"Agent code link: {agent_code}") | |
| # 2. Fetch Questions | |
| print(f"Fetching questions from: {questions_url}") | |
| try: | |
| response = requests.get(questions_url, timeout=15) | |
| response.raise_for_status() | |
| questions_data = response.json() | |
| if not questions_data: | |
| print("Fetched questions list is empty.") | |
| return "Fetched questions list is empty or invalid format.", None | |
| print(f"Fetched {len(questions_data)} questions.") | |
| except requests.exceptions.RequestException as e: | |
| print(f"Error fetching questions: {e}") | |
| return f"Error fetching questions: {e}", None | |
| except requests.exceptions.JSONDecodeError as e: | |
| print(f"Error decoding JSON response from questions endpoint: {e}") | |
| print(f"Response text: {response.text[:500]}") | |
| return f"Error decoding server response for questions: {e}", None | |
| except Exception as e: | |
| print(f"An unexpected error occurred fetching questions: {e}") | |
| return f"An unexpected error occurred fetching questions: {e}", None | |
| # 3. Run your Agent | |
| results_log = [] | |
| answers_payload = [] | |
| print(f"Running agent on {len(questions_data)} questions...") | |
| for item in questions_data: | |
| task_id = item.get("task_id") | |
| question_text = item.get("question") | |
| file_name = item.get("file_name") | |
| if not task_id or question_text is None: | |
| print(f"Skipping item with missing task_id or question: {item}") | |
| continue | |
| # Skip questions with files if running locally | |
| if IS_LOCAL and file_name: | |
| print(f"Skipping task {task_id} because it requires a file: {file_name}") | |
| results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": "SKIPPED (Local run - No files)"}) | |
| continue | |
| try: | |
| submitted_answer = agent(question_text, file_name) | |
| answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer}) | |
| results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer}) | |
| # Add a 15-second pause between questions! | |
| print("Pausing for 15 seconds to respect Groq rate limits...") | |
| time.sleep(15) | |
| except Exception as e: | |
| print(f"Error running agent on task {task_id}: {e}") | |
| results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"}) | |
| if not answers_payload: | |
| print("Agent did not produce any answers to submit.") | |
| return "Agent did not produce any answers to submit.", pd.DataFrame(results_log) | |
| # 4. Prepare Submission | |
| submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload} | |
| status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..." | |
| print(status_update) | |
| # 5. Submit | |
| print(f"Submitting {len(answers_payload)} answers to: {submit_url}") | |
| try: | |
| response = requests.post(submit_url, json=submission_data, timeout=60) | |
| response.raise_for_status() | |
| result_data = response.json() | |
| final_status = ( | |
| f"Submission Successful!\n" | |
| f"User: {result_data.get('username')}\n" | |
| f"Overall Score: {result_data.get('score', 'N/A')}% " | |
| f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n" | |
| f"Message: {result_data.get('message', 'No message received.')}" | |
| ) | |
| print("Submission successful.") | |
| results_df = pd.DataFrame(results_log) | |
| return final_status, results_df | |
| except requests.exceptions.HTTPError as e: | |
| error_detail = f"Server responded with status {e.response.status_code}." | |
| try: | |
| error_json = e.response.json() | |
| error_detail += f" Detail: {error_json.get('detail', e.response.text)}" | |
| except requests.exceptions.JSONDecodeError: | |
| error_detail += f" Response: {e.response.text[:500]}" | |
| status_message = f"Submission Failed: {error_detail}" | |
| print(status_message) | |
| results_df = pd.DataFrame(results_log) | |
| return status_message, results_df | |
| except requests.exceptions.Timeout: | |
| status_message = "Submission Failed: The request timed out." | |
| print(status_message) | |
| results_df = pd.DataFrame(results_log) | |
| return status_message, results_df | |
| except requests.exceptions.RequestException as e: | |
| status_message = f"Submission Failed: Network error - {e}" | |
| print(status_message) | |
| results_df = pd.DataFrame(results_log) | |
| return status_message, results_df | |
| except Exception as e: | |
| status_message = f"An unexpected error occurred during submission: {e}" | |
| print(status_message) | |
| results_df = pd.DataFrame(results_log) | |
| return status_message, results_df | |
| def run_single_test(question): | |
| """Runs the agent on a single question and returns the answer.""" | |
| try: | |
| agent = BasicAgent() | |
| answer = agent(question) | |
| return answer | |
| except Exception as e: | |
| return f"Error running agent: {e}" | |
| # --- Build Gradio Interface using Blocks --- | |
| with gr.Blocks() as demo: | |
| gr.Markdown("# GAIA Agent Runner & Evaluator") | |
| with gr.Tabs(): | |
| with gr.TabItem("Single Question Test"): | |
| gr.Markdown("### Test your agent on a single question") | |
| test_input = gr.Textbox( | |
| label="Question", | |
| placeholder="Enter a GAIA-style question here...", | |
| lines=3, | |
| value="How many studio albums were published by Mercedes Sosa throughout her career?" | |
| ) | |
| test_button = gr.Button("Run Agent Test") | |
| test_output = gr.Textbox(label="Agent Response", interactive=False, lines=5) | |
| test_button.click( | |
| fn=run_single_test, | |
| inputs=[test_input], | |
| outputs=[test_output] | |
| ) | |
| with gr.TabItem("Full Evaluation & Submission"): | |
| gr.Markdown( | |
| """ | |
| **Instructions:** | |
| 1. Log in to your Hugging Face account below (required for submission). | |
| 2. Click 'Run Evaluation' to fetch ALL questions from the benchmark, run your agent on them, and submit. | |
| *Note: This will skip questions requiring files in local mode.* | |
| """ | |
| ) | |
| if not IS_LOCAL: | |
| gr.LoginButton() | |
| run_button = gr.Button("Run Full Evaluation & Submit All Answers", variant="primary") | |
| status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False) | |
| results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True) | |
| run_button.click( | |
| fn=run_and_submit_all, | |
| outputs=[status_output, results_table] | |
| ) | |
| if __name__ == "__main__": | |
| print("\n" + "-"*30 + " App Starting " + "-"*30) | |
| # Check for SPACE_HOST and SPACE_ID at startup for information | |
| space_host_startup = os.getenv("SPACE_HOST") | |
| space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup | |
| if space_host_startup: | |
| print(f"✅ SPACE_HOST found: {space_host_startup}") | |
| print(f" Runtime URL should be: https://{space_host_startup}.hf.space") | |
| else: | |
| print("ℹ️ SPACE_HOST environment variable not found (running locally?).") | |
| if space_id_startup: # Print repo URLs if SPACE_ID is found | |
| print(f"✅ SPACE_ID found: {space_id_startup}") | |
| print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}") | |
| print(f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main") | |
| else: | |
| print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.") | |
| print("-"*(60 + len(" App Starting ")) + "\n") | |
| print("Launching Gradio Interface for Basic Agent Evaluation...") | |
| demo.launch(debug=True, share=False) |