Spaces:
Sleeping
Sleeping
structure
Browse files- agent.py +41 -40
- app.py +52 -28
- app_template.py +53 -28
- requirements.txt +0 -4
- setup_actions.ipynb +12 -2
agent.py
CHANGED
|
@@ -1,37 +1,31 @@
|
|
| 1 |
import os
|
|
|
|
|
|
|
| 2 |
from dotenv import load_dotenv
|
| 3 |
-
from langgraph.graph import START, StateGraph, MessagesState
|
| 4 |
-
from langgraph.prebuilt import tools_condition
|
| 5 |
-
from langgraph.prebuilt import ToolNode
|
| 6 |
-
from langchain_google_genai import ChatGoogleGenerativeAI
|
| 7 |
-
from langchain_groq import ChatGroq
|
| 8 |
-
from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint, HuggingFaceEmbeddings
|
| 9 |
-
from langchain_community.tools.tavily_search import TavilySearchResults
|
| 10 |
-
from langchain_community.document_loaders import WikipediaLoader
|
| 11 |
-
from langchain_community.document_loaders import ArxivLoader
|
| 12 |
-
from langchain_community.vectorstores import SupabaseVectorStore
|
| 13 |
-
from langchain_core.messages import SystemMessage, HumanMessage
|
| 14 |
-
from langchain_core.tools import tool
|
| 15 |
-
from langchain.tools.retriever import create_retriever_tool
|
| 16 |
-
from langchain_community.vectorstores import Chroma
|
| 17 |
from langchain.embeddings import HuggingFaceEmbeddings
|
|
|
|
| 18 |
from langchain.tools.retriever import create_retriever_tool
|
| 19 |
-
import os
|
| 20 |
-
import json
|
| 21 |
-
from datasets import load_dataset
|
| 22 |
-
from langchain.embeddings import HuggingFaceEmbeddings
|
| 23 |
from langchain.vectorstores import Chroma
|
| 24 |
-
from
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
|
| 26 |
load_dotenv()
|
| 27 |
|
| 28 |
@tool
|
| 29 |
def calculator(query: str) -> str:
|
| 30 |
"""Perform basic arithmetic operations based on the provided query.
|
| 31 |
-
|
| 32 |
Args:
|
| 33 |
query: A mathematical query as a string, e.g., '2 + 2' or '5 * 6'."""
|
| 34 |
-
|
| 35 |
try:
|
| 36 |
# Evaluate the mathematical expression
|
| 37 |
result = eval(query)
|
|
@@ -43,7 +37,7 @@ def calculator(query: str) -> str:
|
|
| 43 |
@tool
|
| 44 |
def wiki_search(query: str) -> str:
|
| 45 |
"""Search Wikipedia for a query and return maximum 2 results.
|
| 46 |
-
|
| 47 |
Args:
|
| 48 |
query: The search query."""
|
| 49 |
search_docs = WikipediaLoader(query=query, load_max_docs=2).load()
|
|
@@ -51,13 +45,15 @@ def wiki_search(query: str) -> str:
|
|
| 51 |
[
|
| 52 |
f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>'
|
| 53 |
for doc in search_docs
|
| 54 |
-
]
|
|
|
|
| 55 |
return {"wiki_results": formatted_search_docs}
|
| 56 |
|
|
|
|
| 57 |
@tool
|
| 58 |
def web_search(query: str) -> str:
|
| 59 |
"""Search Tavily for a query and return maximum 3 results.
|
| 60 |
-
|
| 61 |
Args:
|
| 62 |
query: The search query."""
|
| 63 |
search_docs = TavilySearchResults(max_results=3).invoke(query=query)
|
|
@@ -65,13 +61,15 @@ def web_search(query: str) -> str:
|
|
| 65 |
[
|
| 66 |
f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>'
|
| 67 |
for doc in search_docs
|
| 68 |
-
]
|
|
|
|
| 69 |
return {"web_results": formatted_search_docs}
|
| 70 |
|
|
|
|
| 71 |
@tool
|
| 72 |
def arvix_search(query: str) -> str:
|
| 73 |
"""Search Arxiv for a query and return maximum 3 result.
|
| 74 |
-
|
| 75 |
Args:
|
| 76 |
query: The search query."""
|
| 77 |
search_docs = ArxivLoader(query=query, load_max_docs=3).load()
|
|
@@ -79,9 +77,11 @@ def arvix_search(query: str) -> str:
|
|
| 79 |
[
|
| 80 |
f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content[:1000]}\n</Document>'
|
| 81 |
for doc in search_docs
|
| 82 |
-
]
|
|
|
|
| 83 |
return {"arvix_results": formatted_search_docs}
|
| 84 |
|
|
|
|
| 85 |
system_prompt = """You are a helpful assistant tasked with answering questions using a set of tools.
|
| 86 |
Now, I will ask you a question. Report your thoughts, and finish your answer with the following template:
|
| 87 |
FINAL ANSWER: [YOUR FINAL ANSWER].
|
|
@@ -92,7 +92,9 @@ Your answer should only start with "FINAL ANSWER: ", then follows with the answe
|
|
| 92 |
sys_msg = SystemMessage(content=system_prompt)
|
| 93 |
|
| 94 |
# build a retriever
|
| 95 |
-
embeddings = HuggingFaceEmbeddings(
|
|
|
|
|
|
|
| 96 |
|
| 97 |
# Load the GAIA validation dataset
|
| 98 |
dataset = load_dataset("gaia-benchmark/GAIA", name="2023_level1", split="validation")
|
|
@@ -105,29 +107,24 @@ documents = []
|
|
| 105 |
for entry in dataset:
|
| 106 |
question = entry["Question"]
|
| 107 |
answer = entry["Final answer"]
|
| 108 |
-
|
| 109 |
# Create a document with both the question and the answer as metadata
|
| 110 |
metadata = {
|
| 111 |
"task_id": entry["task_id"],
|
| 112 |
"steps": entry["Annotator Metadata"]["Steps"],
|
| 113 |
"tools": entry["Annotator Metadata"]["Tools"],
|
| 114 |
-
"answer": answer
|
| 115 |
}
|
| 116 |
-
|
| 117 |
# Add the question to the list of documents
|
| 118 |
-
documents.append(
|
| 119 |
-
Document(
|
| 120 |
-
page_content=question,
|
| 121 |
-
metadata=metadata
|
| 122 |
-
)
|
| 123 |
-
)
|
| 124 |
|
| 125 |
# Insert the documents into Chroma
|
| 126 |
vectorstore = Chroma.from_documents(
|
| 127 |
documents=documents,
|
| 128 |
embedding=embeddings,
|
| 129 |
collection_name="gaia_validation",
|
| 130 |
-
persist_directory="./chroma_store"
|
| 131 |
)
|
| 132 |
|
| 133 |
create_retriever_tool = create_retriever_tool(
|
|
@@ -144,6 +141,7 @@ tools = [
|
|
| 144 |
arvix_search,
|
| 145 |
]
|
| 146 |
|
|
|
|
| 147 |
# Build graph function
|
| 148 |
def build_graph(provider: str = "groq"):
|
| 149 |
"""Build the graph"""
|
|
@@ -153,7 +151,9 @@ def build_graph(provider: str = "groq"):
|
|
| 153 |
llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash", temperature=0)
|
| 154 |
elif provider == "groq":
|
| 155 |
# Groq https://console.groq.com/docs/models
|
| 156 |
-
llm = ChatGroq(
|
|
|
|
|
|
|
| 157 |
elif provider == "huggingface":
|
| 158 |
# TODO: Add huggingface endpoint
|
| 159 |
llm = ChatHuggingFace(
|
|
@@ -171,7 +171,7 @@ def build_graph(provider: str = "groq"):
|
|
| 171 |
def assistant(state: MessagesState):
|
| 172 |
"""Assistant node"""
|
| 173 |
return {"messages": [llm_with_tools.invoke(state["messages"])]}
|
| 174 |
-
|
| 175 |
def retriever(state: MessagesState):
|
| 176 |
"""Retriever node"""
|
| 177 |
similar_question = vectorstore.similarity_search(state["messages"][0].content)
|
|
@@ -195,6 +195,7 @@ def build_graph(provider: str = "groq"):
|
|
| 195 |
# Compile graph
|
| 196 |
return builder.compile()
|
| 197 |
|
|
|
|
| 198 |
# test
|
| 199 |
if __name__ == "__main__":
|
| 200 |
question = "When was a picture of St. Thomas Aquinas first added to the Wikipedia page on the Principle of double effect?"
|
|
|
|
| 1 |
import os
|
| 2 |
+
|
| 3 |
+
from datasets import load_dataset
|
| 4 |
from dotenv import load_dotenv
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
from langchain.embeddings import HuggingFaceEmbeddings
|
| 6 |
+
from langchain.schema import Document
|
| 7 |
from langchain.tools.retriever import create_retriever_tool
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
from langchain.vectorstores import Chroma
|
| 9 |
+
from langchain_community.document_loaders import ArxivLoader, WikipediaLoader
|
| 10 |
+
from langchain_community.tools.tavily_search import TavilySearchResults
|
| 11 |
+
from langchain_core.messages import HumanMessage, SystemMessage
|
| 12 |
+
from langchain_core.tools import tool
|
| 13 |
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
| 14 |
+
from langchain_groq import ChatGroq
|
| 15 |
+
from langchain_huggingface import (ChatHuggingFace, HuggingFaceEmbeddings,
|
| 16 |
+
HuggingFaceEndpoint)
|
| 17 |
+
from langgraph.graph import START, MessagesState, StateGraph
|
| 18 |
+
from langgraph.prebuilt import ToolNode, tools_condition
|
| 19 |
|
| 20 |
load_dotenv()
|
| 21 |
|
| 22 |
@tool
|
| 23 |
def calculator(query: str) -> str:
|
| 24 |
"""Perform basic arithmetic operations based on the provided query.
|
| 25 |
+
|
| 26 |
Args:
|
| 27 |
query: A mathematical query as a string, e.g., '2 + 2' or '5 * 6'."""
|
| 28 |
+
|
| 29 |
try:
|
| 30 |
# Evaluate the mathematical expression
|
| 31 |
result = eval(query)
|
|
|
|
| 37 |
@tool
|
| 38 |
def wiki_search(query: str) -> str:
|
| 39 |
"""Search Wikipedia for a query and return maximum 2 results.
|
| 40 |
+
|
| 41 |
Args:
|
| 42 |
query: The search query."""
|
| 43 |
search_docs = WikipediaLoader(query=query, load_max_docs=2).load()
|
|
|
|
| 45 |
[
|
| 46 |
f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>'
|
| 47 |
for doc in search_docs
|
| 48 |
+
]
|
| 49 |
+
)
|
| 50 |
return {"wiki_results": formatted_search_docs}
|
| 51 |
|
| 52 |
+
|
| 53 |
@tool
|
| 54 |
def web_search(query: str) -> str:
|
| 55 |
"""Search Tavily for a query and return maximum 3 results.
|
| 56 |
+
|
| 57 |
Args:
|
| 58 |
query: The search query."""
|
| 59 |
search_docs = TavilySearchResults(max_results=3).invoke(query=query)
|
|
|
|
| 61 |
[
|
| 62 |
f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>'
|
| 63 |
for doc in search_docs
|
| 64 |
+
]
|
| 65 |
+
)
|
| 66 |
return {"web_results": formatted_search_docs}
|
| 67 |
|
| 68 |
+
|
| 69 |
@tool
|
| 70 |
def arvix_search(query: str) -> str:
|
| 71 |
"""Search Arxiv for a query and return maximum 3 result.
|
| 72 |
+
|
| 73 |
Args:
|
| 74 |
query: The search query."""
|
| 75 |
search_docs = ArxivLoader(query=query, load_max_docs=3).load()
|
|
|
|
| 77 |
[
|
| 78 |
f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content[:1000]}\n</Document>'
|
| 79 |
for doc in search_docs
|
| 80 |
+
]
|
| 81 |
+
)
|
| 82 |
return {"arvix_results": formatted_search_docs}
|
| 83 |
|
| 84 |
+
|
| 85 |
system_prompt = """You are a helpful assistant tasked with answering questions using a set of tools.
|
| 86 |
Now, I will ask you a question. Report your thoughts, and finish your answer with the following template:
|
| 87 |
FINAL ANSWER: [YOUR FINAL ANSWER].
|
|
|
|
| 92 |
sys_msg = SystemMessage(content=system_prompt)
|
| 93 |
|
| 94 |
# build a retriever
|
| 95 |
+
embeddings = HuggingFaceEmbeddings(
|
| 96 |
+
model_name="sentence-transformers/all-mpnet-base-v2"
|
| 97 |
+
) # dim=768
|
| 98 |
|
| 99 |
# Load the GAIA validation dataset
|
| 100 |
dataset = load_dataset("gaia-benchmark/GAIA", name="2023_level1", split="validation")
|
|
|
|
| 107 |
for entry in dataset:
|
| 108 |
question = entry["Question"]
|
| 109 |
answer = entry["Final answer"]
|
| 110 |
+
|
| 111 |
# Create a document with both the question and the answer as metadata
|
| 112 |
metadata = {
|
| 113 |
"task_id": entry["task_id"],
|
| 114 |
"steps": entry["Annotator Metadata"]["Steps"],
|
| 115 |
"tools": entry["Annotator Metadata"]["Tools"],
|
| 116 |
+
"answer": answer,
|
| 117 |
}
|
| 118 |
+
|
| 119 |
# Add the question to the list of documents
|
| 120 |
+
documents.append(Document(page_content=question, metadata=metadata))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 121 |
|
| 122 |
# Insert the documents into Chroma
|
| 123 |
vectorstore = Chroma.from_documents(
|
| 124 |
documents=documents,
|
| 125 |
embedding=embeddings,
|
| 126 |
collection_name="gaia_validation",
|
| 127 |
+
persist_directory="./chroma_store",
|
| 128 |
)
|
| 129 |
|
| 130 |
create_retriever_tool = create_retriever_tool(
|
|
|
|
| 141 |
arvix_search,
|
| 142 |
]
|
| 143 |
|
| 144 |
+
|
| 145 |
# Build graph function
|
| 146 |
def build_graph(provider: str = "groq"):
|
| 147 |
"""Build the graph"""
|
|
|
|
| 151 |
llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash", temperature=0)
|
| 152 |
elif provider == "groq":
|
| 153 |
# Groq https://console.groq.com/docs/models
|
| 154 |
+
llm = ChatGroq(
|
| 155 |
+
model="qwen-qwq-32b", temperature=0
|
| 156 |
+
) # optional : qwen-qwq-32b gemma2-9b-it
|
| 157 |
elif provider == "huggingface":
|
| 158 |
# TODO: Add huggingface endpoint
|
| 159 |
llm = ChatHuggingFace(
|
|
|
|
| 171 |
def assistant(state: MessagesState):
|
| 172 |
"""Assistant node"""
|
| 173 |
return {"messages": [llm_with_tools.invoke(state["messages"])]}
|
| 174 |
+
|
| 175 |
def retriever(state: MessagesState):
|
| 176 |
"""Retriever node"""
|
| 177 |
similar_question = vectorstore.similarity_search(state["messages"][0].content)
|
|
|
|
| 195 |
# Compile graph
|
| 196 |
return builder.compile()
|
| 197 |
|
| 198 |
+
|
| 199 |
# test
|
| 200 |
if __name__ == "__main__":
|
| 201 |
question = "When was a picture of St. Thomas Aquinas first added to the Wikipedia page on the Principle of double effect?"
|
app.py
CHANGED
|
@@ -1,16 +1,18 @@
|
|
| 1 |
# app.py
|
| 2 |
import os
|
|
|
|
| 3 |
import gradio as gr
|
| 4 |
-
import requests
|
| 5 |
import pandas as pd
|
|
|
|
| 6 |
from langchain_core.messages import HumanMessage
|
| 7 |
-
from agent import build_graph
|
| 8 |
|
|
|
|
| 9 |
|
| 10 |
# (Keep Constants as is)
|
| 11 |
# --- Constants ---
|
| 12 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
| 13 |
|
|
|
|
| 14 |
# --- Basic Agent Definition ---
|
| 15 |
# ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
|
| 16 |
class BasicAgent:
|
|
@@ -24,16 +26,17 @@ class BasicAgent:
|
|
| 24 |
result = self.graph.invoke({"messages": messages})
|
| 25 |
return result["messages"][-1].content # Simplify if needed
|
| 26 |
|
| 27 |
-
|
|
|
|
| 28 |
"""
|
| 29 |
Fetches all questions, runs the BasicAgent on them, submits all answers,
|
| 30 |
and displays the results.
|
| 31 |
"""
|
| 32 |
# --- Determine HF Space Runtime URL and Repo URL ---
|
| 33 |
-
space_id = os.getenv("SPACE_ID")
|
| 34 |
|
| 35 |
if profile:
|
| 36 |
-
username= f"{profile.username}"
|
| 37 |
print(f"User logged in: {username}")
|
| 38 |
else:
|
| 39 |
print("User not logged in.")
|
|
@@ -60,16 +63,16 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
| 60 |
response.raise_for_status()
|
| 61 |
questions_data = response.json()
|
| 62 |
if not questions_data:
|
| 63 |
-
|
| 64 |
-
|
| 65 |
print(f"Fetched {len(questions_data)} questions.")
|
| 66 |
except requests.exceptions.RequestException as e:
|
| 67 |
print(f"Error fetching questions: {e}")
|
| 68 |
return f"Error fetching questions: {e}", None
|
| 69 |
except requests.exceptions.JSONDecodeError as e:
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
except Exception as e:
|
| 74 |
print(f"An unexpected error occurred fetching questions: {e}")
|
| 75 |
return f"An unexpected error occurred fetching questions: {e}", None
|
|
@@ -86,18 +89,36 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
| 86 |
continue
|
| 87 |
try:
|
| 88 |
submitted_answer = agent(question_text)
|
| 89 |
-
answers_payload.append(
|
| 90 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 91 |
except Exception as e:
|
| 92 |
-
|
| 93 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 94 |
|
| 95 |
if not answers_payload:
|
| 96 |
print("Agent did not produce any answers to submit.")
|
| 97 |
return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
|
| 98 |
|
| 99 |
-
# 4. Prepare Submission
|
| 100 |
-
submission_data = {
|
|
|
|
|
|
|
|
|
|
|
|
|
| 101 |
status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
|
| 102 |
print(status_update)
|
| 103 |
|
|
@@ -167,20 +188,19 @@ with gr.Blocks() as demo:
|
|
| 167 |
|
| 168 |
run_button = gr.Button("Run Evaluation & Submit All Answers")
|
| 169 |
|
| 170 |
-
status_output = gr.Textbox(
|
|
|
|
|
|
|
| 171 |
# Removed max_rows=10 from DataFrame constructor
|
| 172 |
results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
|
| 173 |
|
| 174 |
-
run_button.click(
|
| 175 |
-
fn=run_and_submit_all,
|
| 176 |
-
outputs=[status_output, results_table]
|
| 177 |
-
)
|
| 178 |
|
| 179 |
if __name__ == "__main__":
|
| 180 |
-
print("\n" + "-"*30 + " App Starting " + "-"*30)
|
| 181 |
# Check for SPACE_HOST and SPACE_ID at startup for information
|
| 182 |
space_host_startup = os.getenv("SPACE_HOST")
|
| 183 |
-
space_id_startup = os.getenv("SPACE_ID")
|
| 184 |
|
| 185 |
if space_host_startup:
|
| 186 |
print(f"✅ SPACE_HOST found: {space_host_startup}")
|
|
@@ -188,14 +208,18 @@ if __name__ == "__main__":
|
|
| 188 |
else:
|
| 189 |
print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
|
| 190 |
|
| 191 |
-
if space_id_startup:
|
| 192 |
print(f"✅ SPACE_ID found: {space_id_startup}")
|
| 193 |
print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
|
| 194 |
-
print(
|
|
|
|
|
|
|
| 195 |
else:
|
| 196 |
-
print(
|
|
|
|
|
|
|
| 197 |
|
| 198 |
-
print("-"*(60 + len(" App Starting ")) + "\n")
|
| 199 |
|
| 200 |
print("Launching Gradio Interface for Basic Agent Evaluation...")
|
| 201 |
-
demo.launch(debug=True, share=False)
|
|
|
|
| 1 |
# app.py
|
| 2 |
import os
|
| 3 |
+
|
| 4 |
import gradio as gr
|
|
|
|
| 5 |
import pandas as pd
|
| 6 |
+
import requests
|
| 7 |
from langchain_core.messages import HumanMessage
|
|
|
|
| 8 |
|
| 9 |
+
from agent import build_graph
|
| 10 |
|
| 11 |
# (Keep Constants as is)
|
| 12 |
# --- Constants ---
|
| 13 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
| 14 |
|
| 15 |
+
|
| 16 |
# --- Basic Agent Definition ---
|
| 17 |
# ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
|
| 18 |
class BasicAgent:
|
|
|
|
| 26 |
result = self.graph.invoke({"messages": messages})
|
| 27 |
return result["messages"][-1].content # Simplify if needed
|
| 28 |
|
| 29 |
+
|
| 30 |
+
def run_and_submit_all(profile: gr.OAuthProfile | None):
|
| 31 |
"""
|
| 32 |
Fetches all questions, runs the BasicAgent on them, submits all answers,
|
| 33 |
and displays the results.
|
| 34 |
"""
|
| 35 |
# --- Determine HF Space Runtime URL and Repo URL ---
|
| 36 |
+
space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
|
| 37 |
|
| 38 |
if profile:
|
| 39 |
+
username = f"{profile.username}"
|
| 40 |
print(f"User logged in: {username}")
|
| 41 |
else:
|
| 42 |
print("User not logged in.")
|
|
|
|
| 63 |
response.raise_for_status()
|
| 64 |
questions_data = response.json()
|
| 65 |
if not questions_data:
|
| 66 |
+
print("Fetched questions list is empty.")
|
| 67 |
+
return "Fetched questions list is empty or invalid format.", None
|
| 68 |
print(f"Fetched {len(questions_data)} questions.")
|
| 69 |
except requests.exceptions.RequestException as e:
|
| 70 |
print(f"Error fetching questions: {e}")
|
| 71 |
return f"Error fetching questions: {e}", None
|
| 72 |
except requests.exceptions.JSONDecodeError as e:
|
| 73 |
+
print(f"Error decoding JSON response from questions endpoint: {e}")
|
| 74 |
+
print(f"Response text: {response.text[:500]}")
|
| 75 |
+
return f"Error decoding server response for questions: {e}", None
|
| 76 |
except Exception as e:
|
| 77 |
print(f"An unexpected error occurred fetching questions: {e}")
|
| 78 |
return f"An unexpected error occurred fetching questions: {e}", None
|
|
|
|
| 89 |
continue
|
| 90 |
try:
|
| 91 |
submitted_answer = agent(question_text)
|
| 92 |
+
answers_payload.append(
|
| 93 |
+
{"task_id": task_id, "submitted_answer": submitted_answer}
|
| 94 |
+
)
|
| 95 |
+
results_log.append(
|
| 96 |
+
{
|
| 97 |
+
"Task ID": task_id,
|
| 98 |
+
"Question": question_text,
|
| 99 |
+
"Submitted Answer": submitted_answer,
|
| 100 |
+
}
|
| 101 |
+
)
|
| 102 |
except Exception as e:
|
| 103 |
+
print(f"Error running agent on task {task_id}: {e}")
|
| 104 |
+
results_log.append(
|
| 105 |
+
{
|
| 106 |
+
"Task ID": task_id,
|
| 107 |
+
"Question": question_text,
|
| 108 |
+
"Submitted Answer": f"AGENT ERROR: {e}",
|
| 109 |
+
}
|
| 110 |
+
)
|
| 111 |
|
| 112 |
if not answers_payload:
|
| 113 |
print("Agent did not produce any answers to submit.")
|
| 114 |
return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
|
| 115 |
|
| 116 |
+
# 4. Prepare Submission
|
| 117 |
+
submission_data = {
|
| 118 |
+
"username": username.strip(),
|
| 119 |
+
"agent_code": agent_code,
|
| 120 |
+
"answers": answers_payload,
|
| 121 |
+
}
|
| 122 |
status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
|
| 123 |
print(status_update)
|
| 124 |
|
|
|
|
| 188 |
|
| 189 |
run_button = gr.Button("Run Evaluation & Submit All Answers")
|
| 190 |
|
| 191 |
+
status_output = gr.Textbox(
|
| 192 |
+
label="Run Status / Submission Result", lines=5, interactive=False
|
| 193 |
+
)
|
| 194 |
# Removed max_rows=10 from DataFrame constructor
|
| 195 |
results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
|
| 196 |
|
| 197 |
+
run_button.click(fn=run_and_submit_all, outputs=[status_output, results_table])
|
|
|
|
|
|
|
|
|
|
| 198 |
|
| 199 |
if __name__ == "__main__":
|
| 200 |
+
print("\n" + "-" * 30 + " App Starting " + "-" * 30)
|
| 201 |
# Check for SPACE_HOST and SPACE_ID at startup for information
|
| 202 |
space_host_startup = os.getenv("SPACE_HOST")
|
| 203 |
+
space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup
|
| 204 |
|
| 205 |
if space_host_startup:
|
| 206 |
print(f"✅ SPACE_HOST found: {space_host_startup}")
|
|
|
|
| 208 |
else:
|
| 209 |
print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
|
| 210 |
|
| 211 |
+
if space_id_startup: # Print repo URLs if SPACE_ID is found
|
| 212 |
print(f"✅ SPACE_ID found: {space_id_startup}")
|
| 213 |
print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
|
| 214 |
+
print(
|
| 215 |
+
f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main"
|
| 216 |
+
)
|
| 217 |
else:
|
| 218 |
+
print(
|
| 219 |
+
"ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined."
|
| 220 |
+
)
|
| 221 |
|
| 222 |
+
print("-" * (60 + len(" App Starting ")) + "\n")
|
| 223 |
|
| 224 |
print("Launching Gradio Interface for Basic Agent Evaluation...")
|
| 225 |
+
demo.launch(debug=True, share=False)
|
app_template.py
CHANGED
|
@@ -1,34 +1,38 @@
|
|
|
|
|
| 1 |
import os
|
|
|
|
| 2 |
import gradio as gr
|
| 3 |
-
import requests
|
| 4 |
-
import inspect
|
| 5 |
import pandas as pd
|
|
|
|
| 6 |
|
| 7 |
# (Keep Constants as is)
|
| 8 |
# --- Constants ---
|
| 9 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
| 10 |
|
|
|
|
| 11 |
# --- Basic Agent Definition ---
|
| 12 |
# ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
|
| 13 |
class BasicAgent:
|
| 14 |
def __init__(self):
|
| 15 |
print("BasicAgent initialized.")
|
|
|
|
| 16 |
def __call__(self, question: str) -> str:
|
| 17 |
print(f"Agent received question (first 50 chars): {question[:50]}...")
|
| 18 |
fixed_answer = "This is a default answer."
|
| 19 |
print(f"Agent returning fixed answer: {fixed_answer}")
|
| 20 |
return fixed_answer
|
| 21 |
|
| 22 |
-
|
|
|
|
| 23 |
"""
|
| 24 |
Fetches all questions, runs the BasicAgent on them, submits all answers,
|
| 25 |
and displays the results.
|
| 26 |
"""
|
| 27 |
# --- Determine HF Space Runtime URL and Repo URL ---
|
| 28 |
-
space_id = os.getenv("SPACE_ID")
|
| 29 |
|
| 30 |
if profile:
|
| 31 |
-
username= f"{profile.username}"
|
| 32 |
print(f"User logged in: {username}")
|
| 33 |
else:
|
| 34 |
print("User not logged in.")
|
|
@@ -55,16 +59,16 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
| 55 |
response.raise_for_status()
|
| 56 |
questions_data = response.json()
|
| 57 |
if not questions_data:
|
| 58 |
-
|
| 59 |
-
|
| 60 |
print(f"Fetched {len(questions_data)} questions.")
|
| 61 |
except requests.exceptions.RequestException as e:
|
| 62 |
print(f"Error fetching questions: {e}")
|
| 63 |
return f"Error fetching questions: {e}", None
|
| 64 |
except requests.exceptions.JSONDecodeError as e:
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
except Exception as e:
|
| 69 |
print(f"An unexpected error occurred fetching questions: {e}")
|
| 70 |
return f"An unexpected error occurred fetching questions: {e}", None
|
|
@@ -81,18 +85,36 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
| 81 |
continue
|
| 82 |
try:
|
| 83 |
submitted_answer = agent(question_text)
|
| 84 |
-
answers_payload.append(
|
| 85 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 86 |
except Exception as e:
|
| 87 |
-
|
| 88 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 89 |
|
| 90 |
if not answers_payload:
|
| 91 |
print("Agent did not produce any answers to submit.")
|
| 92 |
return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
|
| 93 |
|
| 94 |
-
# 4. Prepare Submission
|
| 95 |
-
submission_data = {
|
|
|
|
|
|
|
|
|
|
|
|
|
| 96 |
status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
|
| 97 |
print(status_update)
|
| 98 |
|
|
@@ -162,20 +184,19 @@ with gr.Blocks() as demo:
|
|
| 162 |
|
| 163 |
run_button = gr.Button("Run Evaluation & Submit All Answers")
|
| 164 |
|
| 165 |
-
status_output = gr.Textbox(
|
|
|
|
|
|
|
| 166 |
# Removed max_rows=10 from DataFrame constructor
|
| 167 |
results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
|
| 168 |
|
| 169 |
-
run_button.click(
|
| 170 |
-
fn=run_and_submit_all,
|
| 171 |
-
outputs=[status_output, results_table]
|
| 172 |
-
)
|
| 173 |
|
| 174 |
if __name__ == "__main__":
|
| 175 |
-
print("\n" + "-"*30 + " App Starting " + "-"*30)
|
| 176 |
# Check for SPACE_HOST and SPACE_ID at startup for information
|
| 177 |
space_host_startup = os.getenv("SPACE_HOST")
|
| 178 |
-
space_id_startup = os.getenv("SPACE_ID")
|
| 179 |
|
| 180 |
if space_host_startup:
|
| 181 |
print(f"✅ SPACE_HOST found: {space_host_startup}")
|
|
@@ -183,14 +204,18 @@ if __name__ == "__main__":
|
|
| 183 |
else:
|
| 184 |
print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
|
| 185 |
|
| 186 |
-
if space_id_startup:
|
| 187 |
print(f"✅ SPACE_ID found: {space_id_startup}")
|
| 188 |
print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
|
| 189 |
-
print(
|
|
|
|
|
|
|
| 190 |
else:
|
| 191 |
-
print(
|
|
|
|
|
|
|
| 192 |
|
| 193 |
-
print("-"*(60 + len(" App Starting ")) + "\n")
|
| 194 |
|
| 195 |
print("Launching Gradio Interface for Basic Agent Evaluation...")
|
| 196 |
-
demo.launch(debug=True, share=False)
|
|
|
|
| 1 |
+
import inspect
|
| 2 |
import os
|
| 3 |
+
|
| 4 |
import gradio as gr
|
|
|
|
|
|
|
| 5 |
import pandas as pd
|
| 6 |
+
import requests
|
| 7 |
|
| 8 |
# (Keep Constants as is)
|
| 9 |
# --- Constants ---
|
| 10 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
| 11 |
|
| 12 |
+
|
| 13 |
# --- Basic Agent Definition ---
|
| 14 |
# ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
|
| 15 |
class BasicAgent:
|
| 16 |
def __init__(self):
|
| 17 |
print("BasicAgent initialized.")
|
| 18 |
+
|
| 19 |
def __call__(self, question: str) -> str:
|
| 20 |
print(f"Agent received question (first 50 chars): {question[:50]}...")
|
| 21 |
fixed_answer = "This is a default answer."
|
| 22 |
print(f"Agent returning fixed answer: {fixed_answer}")
|
| 23 |
return fixed_answer
|
| 24 |
|
| 25 |
+
|
| 26 |
+
def run_and_submit_all(profile: gr.OAuthProfile | None):
|
| 27 |
"""
|
| 28 |
Fetches all questions, runs the BasicAgent on them, submits all answers,
|
| 29 |
and displays the results.
|
| 30 |
"""
|
| 31 |
# --- Determine HF Space Runtime URL and Repo URL ---
|
| 32 |
+
space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
|
| 33 |
|
| 34 |
if profile:
|
| 35 |
+
username = f"{profile.username}"
|
| 36 |
print(f"User logged in: {username}")
|
| 37 |
else:
|
| 38 |
print("User not logged in.")
|
|
|
|
| 59 |
response.raise_for_status()
|
| 60 |
questions_data = response.json()
|
| 61 |
if not questions_data:
|
| 62 |
+
print("Fetched questions list is empty.")
|
| 63 |
+
return "Fetched questions list is empty or invalid format.", None
|
| 64 |
print(f"Fetched {len(questions_data)} questions.")
|
| 65 |
except requests.exceptions.RequestException as e:
|
| 66 |
print(f"Error fetching questions: {e}")
|
| 67 |
return f"Error fetching questions: {e}", None
|
| 68 |
except requests.exceptions.JSONDecodeError as e:
|
| 69 |
+
print(f"Error decoding JSON response from questions endpoint: {e}")
|
| 70 |
+
print(f"Response text: {response.text[:500]}")
|
| 71 |
+
return f"Error decoding server response for questions: {e}", None
|
| 72 |
except Exception as e:
|
| 73 |
print(f"An unexpected error occurred fetching questions: {e}")
|
| 74 |
return f"An unexpected error occurred fetching questions: {e}", None
|
|
|
|
| 85 |
continue
|
| 86 |
try:
|
| 87 |
submitted_answer = agent(question_text)
|
| 88 |
+
answers_payload.append(
|
| 89 |
+
{"task_id": task_id, "submitted_answer": submitted_answer}
|
| 90 |
+
)
|
| 91 |
+
results_log.append(
|
| 92 |
+
{
|
| 93 |
+
"Task ID": task_id,
|
| 94 |
+
"Question": question_text,
|
| 95 |
+
"Submitted Answer": submitted_answer,
|
| 96 |
+
}
|
| 97 |
+
)
|
| 98 |
except Exception as e:
|
| 99 |
+
print(f"Error running agent on task {task_id}: {e}")
|
| 100 |
+
results_log.append(
|
| 101 |
+
{
|
| 102 |
+
"Task ID": task_id,
|
| 103 |
+
"Question": question_text,
|
| 104 |
+
"Submitted Answer": f"AGENT ERROR: {e}",
|
| 105 |
+
}
|
| 106 |
+
)
|
| 107 |
|
| 108 |
if not answers_payload:
|
| 109 |
print("Agent did not produce any answers to submit.")
|
| 110 |
return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
|
| 111 |
|
| 112 |
+
# 4. Prepare Submission
|
| 113 |
+
submission_data = {
|
| 114 |
+
"username": username.strip(),
|
| 115 |
+
"agent_code": agent_code,
|
| 116 |
+
"answers": answers_payload,
|
| 117 |
+
}
|
| 118 |
status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
|
| 119 |
print(status_update)
|
| 120 |
|
|
|
|
| 184 |
|
| 185 |
run_button = gr.Button("Run Evaluation & Submit All Answers")
|
| 186 |
|
| 187 |
+
status_output = gr.Textbox(
|
| 188 |
+
label="Run Status / Submission Result", lines=5, interactive=False
|
| 189 |
+
)
|
| 190 |
# Removed max_rows=10 from DataFrame constructor
|
| 191 |
results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
|
| 192 |
|
| 193 |
+
run_button.click(fn=run_and_submit_all, outputs=[status_output, results_table])
|
|
|
|
|
|
|
|
|
|
| 194 |
|
| 195 |
if __name__ == "__main__":
|
| 196 |
+
print("\n" + "-" * 30 + " App Starting " + "-" * 30)
|
| 197 |
# Check for SPACE_HOST and SPACE_ID at startup for information
|
| 198 |
space_host_startup = os.getenv("SPACE_HOST")
|
| 199 |
+
space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup
|
| 200 |
|
| 201 |
if space_host_startup:
|
| 202 |
print(f"✅ SPACE_HOST found: {space_host_startup}")
|
|
|
|
| 204 |
else:
|
| 205 |
print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
|
| 206 |
|
| 207 |
+
if space_id_startup: # Print repo URLs if SPACE_ID is found
|
| 208 |
print(f"✅ SPACE_ID found: {space_id_startup}")
|
| 209 |
print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
|
| 210 |
+
print(
|
| 211 |
+
f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main"
|
| 212 |
+
)
|
| 213 |
else:
|
| 214 |
+
print(
|
| 215 |
+
"ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined."
|
| 216 |
+
)
|
| 217 |
|
| 218 |
+
print("-" * (60 + len(" App Starting ")) + "\n")
|
| 219 |
|
| 220 |
print("Launching Gradio Interface for Basic Agent Evaluation...")
|
| 221 |
+
demo.launch(debug=True, share=False)
|
requirements.txt
CHANGED
|
@@ -12,8 +12,4 @@ langgraph
|
|
| 12 |
huggingface_hub
|
| 13 |
chromadb
|
| 14 |
sentence-transformers
|
| 15 |
-
arxiv
|
| 16 |
-
pymupdf
|
| 17 |
-
wikipedia
|
| 18 |
-
pgvector
|
| 19 |
python-dotenv
|
|
|
|
| 12 |
huggingface_hub
|
| 13 |
chromadb
|
| 14 |
sentence-transformers
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
python-dotenv
|
setup_actions.ipynb
CHANGED
|
@@ -2,7 +2,7 @@
|
|
| 2 |
"cells": [
|
| 3 |
{
|
| 4 |
"cell_type": "code",
|
| 5 |
-
"execution_count":
|
| 6 |
"id": "55b5db25",
|
| 7 |
"metadata": {},
|
| 8 |
"outputs": [
|
|
@@ -24,7 +24,17 @@
|
|
| 24 |
"\n",
|
| 25 |
"from huggingface_hub import login\n",
|
| 26 |
"\n",
|
| 27 |
-
"login(token=os.environ[\"HUGGINGFACE_TOKEN\"])"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
]
|
| 29 |
},
|
| 30 |
{
|
|
|
|
| 2 |
"cells": [
|
| 3 |
{
|
| 4 |
"cell_type": "code",
|
| 5 |
+
"execution_count": null,
|
| 6 |
"id": "55b5db25",
|
| 7 |
"metadata": {},
|
| 8 |
"outputs": [
|
|
|
|
| 24 |
"\n",
|
| 25 |
"from huggingface_hub import login\n",
|
| 26 |
"\n",
|
| 27 |
+
"login(token=os.environ[\"HUGGINGFACE_TOKEN\"])\n",
|
| 28 |
+
"\n",
|
| 29 |
+
"# # Run isort on all folders\n",
|
| 30 |
+
"# isort .\n",
|
| 31 |
+
"# # Run black on all folders\n",
|
| 32 |
+
"# black .\n",
|
| 33 |
+
"# # Run flake8 on all folders\n",
|
| 34 |
+
"# flake8 .\n",
|
| 35 |
+
"\n",
|
| 36 |
+
"# # Run pylint on all folders\n",
|
| 37 |
+
"# pylint ."
|
| 38 |
]
|
| 39 |
},
|
| 40 |
{
|