sarath2803's picture
Update app.py
2af94f2 verified
raw
history blame
14.3 kB
import os
from dotenv import load_dotenv
load_dotenv()
import gradio as gr
import requests
import inspect
import pandas as pd
#model requirement
from smolagents import DuckDuckGoSearchTool, load_tool, tool, CodeAgent,InferenceClientModel
from typing import TypedDict, List, Dict, Any, Optional
from langgraph.graph import StateGraph, END
from langchain_openai import ChatOpenAI
from langchain_core.messages import HumanMessage
import wikipedia
# (Keep Constants as is)
# --- Constants ---
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
def openrouter_inference(prompt, model="deepseek/deepseek-r1-0528-qwen3-8b:free"):
api_key = os.environ["OPENROUTER_API_KEY"]
url = "https://openrouter.ai/api/v1/chat/completions"
headers = {
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json"
}
payload = {
"model": model,
"messages": [
{"role": "user", "content": prompt}
]
}
response = requests.post(url, headers=headers, json=payload)
response.raise_for_status()
data = response.json()
# Extract the answer from the response
return data["choices"][0]["message"]["content"]
@tool
def add(a:int,b:int)->int:
"""
Adds two integers.
Args:
a (int): The first integer.
b (int): The second integer.
Returns:
int: The sum of the two integers.
"""
return a + b
@tool
def subtract(a:int,b:int)->int:
"""
Subtracts two integers.
Args:
a (int): The first integer.
b (int): The second integer.
Returns:
int: The difference of the two integers.
"""
return a - b
@tool
def multiply(a:int,b:int)->int:
"""
Multiplies two integers.
Args:
a (int): The first integer.
b (int): The second integer.
Returns:
int: The product of the two integers.
"""
return a * b
@tool
def divide(a:int,b:int)->float:
"""
Divides two integers.
Args:
a (int): The numerator.
b (int): The denominator.
Returns:
float: The quotient of the two integers.
"""
if b == 0:
raise ValueError("Division by zero is not allowed.")
return a / b
search_tool = DuckDuckGoSearchTool()
@tool
def web_search(query: str) -> str:
"""
Performs a web search for the given query.
Args:
query (str): The search query.
Returns:
str: The search results as a string.
"""
result=search_tool(query)
return f"Search results for '{query}' : {result}."
@tool
def wikipedia_tool(query: str, llm: Optional[Callable[[str], str]] =openrouter_inference) -> str:
"""
Searches Wikipedia for the given query and uses the LLM to extract a direct answer.
Args:
query (str): The search term or question.
llm (callable, optional): The language model to extract the answer.
Returns:
str: A direct answer or summary from Wikipedia.
"""
try:
summary = wikipedia.summary(query, sentences=5)
if llm:
# Ask the LLM to extract a direct answer from the summary
prompt = (
f"Given the following Wikipedia summary, answer the question as directly as possible:\n"
f"Question: {query}\n"
f"Summary: {summary}\n"
f"Answer:"
)
answer = llm(prompt)
return answer.strip()
else:
return summary
except Exception as e:
return f"Wikipedia search error: {e}"
image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
# --- Basic Agent Definition ---
# ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
class BasicAgent:
def __init__(self):
print("BasicAgent initialized.")
token=os.environ["OPENROUTER_API_KEY"]
self.system_prompt= """You are a helpful assistant.
You will answer questions based on the provided context.
You will always return a valid answer, even if the question is not clear or the context is insufficient.
Always return a valid answer after validating the source.
Your final answer should be in the following format:
"your answer here".
If you are asked question related to numbers,return the answer in numeric format.
If you want more details about a topic,person, you can use the wikipedia tool.
If you want to perform web search to find the answer, use the web_search tool.
If you are given a task to create an image, you can use the image_generation_tool.
Answer only what is asked, do not add any additional information.
"""
model = InferenceClientModel(
model_id="deepseek-ai/deepseek-coder-6.7b-instruct", # Example OpenRouter model ID
token=token, # Set your Hugging Face token in the environment
provider="auto" # Use openrouter as the provider
)
self.agent= CodeAgent(
tools = [add, subtract, multiply, divide, web_search, image_generation_tool,wikipedia_tool],
model=model,
)
def __call__(self, question: str, context: str = "") -> str:
print(f"Agent received question (first 50 chars): {question[:50]}...")
# Inject system prompt + question
question_with_prompt = f"{self.system_prompt}\n\nContext: {context}\n\nQuestion: {question.strip()}"
try:
answer = openrouter_inference(question_with_prompt)
except Exception as e:
print(f"Error calling OpenRouter: {e}")
answer = "Sorry, I couldn't get an answer from the model."
print(f"Agent returning answer: {answer.strip()}")
return answer.strip()
# # Fix: handle dict or string
# if isinstance(answer, dict) and "content" in answer:
# result = answer["content"]
# else:
# result = str(answer)
# print(f"Agent returning answer: {result.strip()}")
# return result.strip()
def run_and_submit_all( profile: gr.OAuthProfile | None):
"""
Fetches all questions, runs the BasicAgent on them, submits all answers,
and displays the results.
"""
# --- Determine HF Space Runtime URL and Repo URL ---
space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
if profile:
username= f"{profile.username}"
print(f"User logged in: {username}")
else:
print("User not logged in.")
return "Please Login to Hugging Face with the button.", None
api_url = DEFAULT_API_URL
questions_url = f"{api_url}/questions"
submit_url = f"{api_url}/submit"
# 1. Instantiate Agent ( modify this part to create your agent)
try:
agent = BasicAgent()
except Exception as e:
print(f"Error instantiating agent: {e}")
return f"Error initializing agent: {e}", None
# In the case of an app running as a hugging Face space, this link points toward your codebase ( usefull for others so please keep it public)
agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
print(agent_code)
# 2. Fetch Questions
print(f"Fetching questions from: {questions_url}")
try:
response = requests.get(questions_url, timeout=15)
response.raise_for_status()
questions_data = response.json()
if not questions_data:
print("Fetched questions list is empty.")
return "Fetched questions list is empty or invalid format.", None
print(f"Fetched {len(questions_data)} questions.")
except requests.exceptions.RequestException as e:
print(f"Error fetching questions: {e}")
return f"Error fetching questions: {e}", None
except requests.exceptions.JSONDecodeError as e:
print(f"Error decoding JSON response from questions endpoint: {e}")
print(f"Response text: {response.text[:500]}")
return f"Error decoding server response for questions: {e}", None
except Exception as e:
print(f"An unexpected error occurred fetching questions: {e}")
return f"An unexpected error occurred fetching questions: {e}", None
# 3. Run your Agent
results_log = []
answers_payload = []
print(f"Running agent on {len(questions_data)} questions...")
for item in questions_data:
task_id = item.get("task_id")
question_text = item.get("question")
if not task_id or question_text is None:
print(f"Skipping item with missing task_id or question: {item}")
continue
try:
submitted_answer = agent(question_text)
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
except Exception as e:
print(f"Error running agent on task {task_id}: {e}")
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
if not answers_payload:
print("Agent did not produce any answers to submit.")
return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
# 4. Prepare Submission
submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
print(status_update)
# 5. Submit
print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
try:
response = requests.post(submit_url, json=submission_data, timeout=60)
response.raise_for_status()
result_data = response.json()
final_status = (
f"Submission Successful!\n"
f"User: {result_data.get('username')}\n"
f"Overall Score: {result_data.get('score', 'N/A')}% "
f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
f"Message: {result_data.get('message', 'No message received.')}"
)
print("Submission successful.")
results_df = pd.DataFrame(results_log)
return final_status, results_df
except requests.exceptions.HTTPError as e:
error_detail = f"Server responded with status {e.response.status_code}."
try:
error_json = e.response.json()
error_detail += f" Detail: {error_json.get('detail', e.response.text)}"
except requests.exceptions.JSONDecodeError:
error_detail += f" Response: {e.response.text[:500]}"
status_message = f"Submission Failed: {error_detail}"
print(status_message)
results_df = pd.DataFrame(results_log)
return status_message, results_df
except requests.exceptions.Timeout:
status_message = "Submission Failed: The request timed out."
print(status_message)
results_df = pd.DataFrame(results_log)
return status_message, results_df
except requests.exceptions.RequestException as e:
status_message = f"Submission Failed: Network error - {e}"
print(status_message)
results_df = pd.DataFrame(results_log)
return status_message, results_df
except Exception as e:
status_message = f"An unexpected error occurred during submission: {e}"
print(status_message)
results_df = pd.DataFrame(results_log)
return status_message, results_df
# --- Build Gradio Interface using Blocks ---
with gr.Blocks() as demo:
gr.Markdown("# Basic Agent Evaluation Runner")
gr.Markdown(
"""
**Instructions:**
1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ...
2. Log in to your Hugging Face account using the button below. This uses your HF username for submission.
3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
---
**Disclaimers:**
Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions).
This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a seperate action or even to answer the questions in async.
"""
)
gr.LoginButton()
run_button = gr.Button("Run Evaluation & Submit All Answers")
status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
# Removed max_rows=10 from DataFrame constructor
results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
run_button.click(
fn=run_and_submit_all,
outputs=[status_output, results_table]
)
if __name__ == "__main__":
print("\n" + "-"*30 + " App Starting " + "-"*30)
# Check for SPACE_HOST and SPACE_ID at startup for information
space_host_startup = os.getenv("SPACE_HOST")
space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup
if space_host_startup:
print(f"✅ SPACE_HOST found: {space_host_startup}")
print(f" Runtime URL should be: https://{space_host_startup}.hf.space")
else:
print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
if space_id_startup: # Print repo URLs if SPACE_ID is found
print(f"✅ SPACE_ID found: {space_id_startup}")
print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
print(f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main")
else:
print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.")
print("-"*(60 + len(" App Starting ")) + "\n")
print("Launching Gradio Interface for Basic Agent Evaluation...")
demo.launch(debug=True, share=False)