Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -24,7 +24,6 @@ from typing import TypedDict, Annotated, Literal
|
|
| 24 |
# Constants
|
| 25 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
| 26 |
|
| 27 |
-
# Define the state for the agent
|
| 28 |
class MessagesState(TypedDict):
|
| 29 |
messages: List[BaseMessage]
|
| 30 |
|
|
@@ -42,8 +41,6 @@ class AdvancedAgent:
|
|
| 42 |
def __init__(self):
|
| 43 |
print("Initializing AdvancedAgent with LangGraph, Wikipedia, Arxiv, and Gemini 2.0 Flash")
|
| 44 |
load_dotenv() # Load environment variables from .env file
|
| 45 |
-
|
| 46 |
-
# Initialize the graph
|
| 47 |
self.graph = self.build_graph()
|
| 48 |
print("Graph successfully built")
|
| 49 |
|
|
@@ -51,130 +48,74 @@ class AdvancedAgent:
|
|
| 51 |
"""Build the LangGraph agent with necessary tools"""
|
| 52 |
|
| 53 |
llm = ChatOpenAI(
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
)
|
| 59 |
print("LLM initialized: Gemini 2.5 Pro via OpenRouter")
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
# Initialize LLM
|
| 63 |
-
# llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash", temperature=0)
|
| 64 |
-
# print("LLM initialized: Gemini 2.0 Flash")
|
| 65 |
|
| 66 |
-
# Initialize tools
|
| 67 |
wikipedia_tool = WikipediaQueryRun(api_wrapper=WikipediaAPIWrapper())
|
| 68 |
arxiv_tool = ArxivQueryRun()
|
| 69 |
tavily_search = TavilySearchResults(max_results=5)
|
| 70 |
-
|
| 71 |
tools = [wikipedia_tool, arxiv_tool, tavily_search]
|
| 72 |
print(f"Initialized {len(tools)} tools: Wikipedia, Arxiv, Tavily Search")
|
| 73 |
|
| 74 |
-
# System message
|
| 75 |
sys_msg = SystemMessage(content=system_prompt)
|
| 76 |
-
|
| 77 |
-
# Bind tools to LLM
|
| 78 |
llm_with_tools = llm.bind_tools(tools)
|
| 79 |
-
|
| 80 |
-
#
|
| 81 |
def assistant(state: MessagesState):
|
| 82 |
"""Assistant node that processes messages and generates responses"""
|
| 83 |
messages = state["messages"]
|
| 84 |
response = llm_with_tools.invoke(messages)
|
| 85 |
-
return {"messages":
|
| 86 |
-
|
| 87 |
-
# Create tool node
|
| 88 |
tools_node = ToolNode(tools)
|
| 89 |
-
|
| 90 |
-
# Build the graph
|
| 91 |
builder = StateGraph(MessagesState)
|
| 92 |
-
|
| 93 |
-
# Add nodes
|
| 94 |
builder.add_node("assistant", assistant)
|
| 95 |
builder.add_node("tools", tools_node)
|
| 96 |
-
|
| 97 |
-
#
|
| 98 |
-
builder.add_edge("assistant", "tools")
|
| 99 |
builder.set_entry_point("assistant")
|
| 100 |
-
|
| 101 |
-
|
|
|
|
|
|
|
|
|
|
| 102 |
builder.add_conditional_edges(
|
| 103 |
"assistant",
|
| 104 |
tools_condition,
|
| 105 |
{"tools": "tools", END: END}
|
| 106 |
)
|
| 107 |
-
|
| 108 |
-
# # Set entry point
|
| 109 |
-
# builder.set_entry_point("assistant")
|
| 110 |
-
|
| 111 |
-
# Compile graph
|
| 112 |
return builder.compile()
|
| 113 |
|
| 114 |
def __call__(self, question: str) -> str:
|
| 115 |
-
"""Process a question through the agent graph and return the response"""
|
| 116 |
print(f"Agent received question (first 50 chars): {question[:50]}...")
|
| 117 |
-
|
| 118 |
-
# Create initial state with system message and human question
|
| 119 |
messages = [
|
| 120 |
SystemMessage(content=system_prompt),
|
| 121 |
HumanMessage(content=question)
|
| 122 |
]
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
# Run the graph
|
| 127 |
try:
|
| 128 |
result = self.graph.invoke({"messages": messages})
|
| 129 |
-
|
| 130 |
-
# Extract the final AI message as the answer
|
| 131 |
final_messages = result["messages"]
|
| 132 |
-
|
| 133 |
-
# Find the last non-tool AI message
|
| 134 |
ai_messages = [msg for msg in final_messages if isinstance(msg, AIMessage)]
|
| 135 |
if not ai_messages:
|
| 136 |
return "I wasn't able to generate a proper response. Please try again."
|
| 137 |
-
|
| 138 |
-
# Get the last AI message that's not a tool call
|
| 139 |
for msg in reversed(ai_messages):
|
| 140 |
-
# Check if this is not a tool call message
|
| 141 |
if not hasattr(msg, 'tool_calls') or not msg.tool_calls:
|
| 142 |
return msg.content
|
| 143 |
-
|
| 144 |
-
# If we only have tool call messages, return the content of the last AI message
|
| 145 |
return ai_messages[-1].content if ai_messages else "I wasn't able to generate a proper response. Please try again."
|
| 146 |
-
|
| 147 |
except Exception as e:
|
| 148 |
print(f"Error running agent graph: {e}")
|
| 149 |
return f"Sorry, I encountered an error while processing your question: {str(e)}"
|
| 150 |
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
# # Run the graph
|
| 154 |
-
# try:
|
| 155 |
-
# result = self.graph.invoke({"messages": messages})
|
| 156 |
-
# # Extract the last AI message as the answer
|
| 157 |
-
# for msg in reversed(result["messages"]):
|
| 158 |
-
# if isinstance(msg, AIMessage) and not getattr(msg, "tool_call_id", None):
|
| 159 |
-
# return msg.content
|
| 160 |
-
|
| 161 |
-
# # Fallback if no valid AI message found
|
| 162 |
-
# return "I wasn't able to generate a proper response. Please try again."
|
| 163 |
-
# except Exception as e:
|
| 164 |
-
# print(f"Error running agent graph: {e}")
|
| 165 |
-
# return f"Sorry, I encountered an error while processing your question: {str(e)}"
|
| 166 |
-
|
| 167 |
-
# Function to run and submit all questions
|
| 168 |
def run_and_submit_all(profile: gr.OAuthProfile | None):
|
| 169 |
-
""
|
| 170 |
-
Fetches all questions, runs the AdvancedAgent on them, submits all answers,
|
| 171 |
-
and displays the results.
|
| 172 |
-
"""
|
| 173 |
-
# --- Determine HF Space Runtime URL and Repo URL ---
|
| 174 |
-
space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
|
| 175 |
-
|
| 176 |
if profile:
|
| 177 |
-
username= f"{profile.username}"
|
| 178 |
print(f"User logged in: {username}")
|
| 179 |
else:
|
| 180 |
print("User not logged in.")
|
|
@@ -184,39 +125,35 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
|
|
| 184 |
questions_url = f"{api_url}/questions"
|
| 185 |
submit_url = f"{api_url}/submit"
|
| 186 |
|
| 187 |
-
# 1. Instantiate Agent
|
| 188 |
try:
|
| 189 |
agent = AdvancedAgent()
|
| 190 |
except Exception as e:
|
| 191 |
print(f"Error instantiating agent: {e}")
|
| 192 |
return f"Error initializing agent: {e}", None
|
| 193 |
-
|
| 194 |
-
# In the case of an app running as a hugging Face space, this link points toward your codebase
|
| 195 |
agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
|
| 196 |
print(agent_code)
|
| 197 |
|
| 198 |
-
# 2. Fetch Questions
|
| 199 |
print(f"Fetching questions from: {questions_url}")
|
| 200 |
try:
|
| 201 |
response = requests.get(questions_url, timeout=15)
|
| 202 |
response.raise_for_status()
|
| 203 |
questions_data = response.json()
|
| 204 |
if not questions_data:
|
| 205 |
-
|
| 206 |
-
|
| 207 |
print(f"Fetched {len(questions_data)} questions.")
|
| 208 |
except requests.exceptions.RequestException as e:
|
| 209 |
print(f"Error fetching questions: {e}")
|
| 210 |
return f"Error fetching questions: {e}", None
|
| 211 |
except requests.exceptions.JSONDecodeError as e:
|
| 212 |
-
|
| 213 |
-
|
| 214 |
-
|
| 215 |
except Exception as e:
|
| 216 |
print(f"An unexpected error occurred fetching questions: {e}")
|
| 217 |
return f"An unexpected error occurred fetching questions: {e}", None
|
| 218 |
|
| 219 |
-
# 3. Run your Agent
|
| 220 |
results_log = []
|
| 221 |
answers_payload = []
|
| 222 |
print(f"Running agent on {len(questions_data)} questions...")
|
|
@@ -231,19 +168,17 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
|
|
| 231 |
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
|
| 232 |
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
|
| 233 |
except Exception as e:
|
| 234 |
-
|
| 235 |
-
|
| 236 |
|
| 237 |
if not answers_payload:
|
| 238 |
print("Agent did not produce any answers to submit.")
|
| 239 |
return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
|
| 240 |
|
| 241 |
-
# 4. Prepare Submission
|
| 242 |
submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
|
| 243 |
status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
|
| 244 |
print(status_update)
|
| 245 |
|
| 246 |
-
# 5. Submit
|
| 247 |
print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
|
| 248 |
try:
|
| 249 |
response = requests.post(submit_url, json=submission_data, timeout=60)
|
|
@@ -286,8 +221,6 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
|
|
| 286 |
results_df = pd.DataFrame(results_log)
|
| 287 |
return status_message, results_df
|
| 288 |
|
| 289 |
-
|
| 290 |
-
# --- Build Gradio Interface using Blocks ---
|
| 291 |
with gr.Blocks() as demo:
|
| 292 |
gr.Markdown("# Advanced Agent Evaluation Runner")
|
| 293 |
gr.Markdown(
|
|
@@ -303,9 +236,7 @@ with gr.Blocks() as demo:
|
|
| 303 |
)
|
| 304 |
|
| 305 |
gr.LoginButton()
|
| 306 |
-
|
| 307 |
run_button = gr.Button("Run Evaluation & Submit All Answers")
|
| 308 |
-
|
| 309 |
status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
|
| 310 |
results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
|
| 311 |
|
|
@@ -316,24 +247,19 @@ with gr.Blocks() as demo:
|
|
| 316 |
|
| 317 |
if __name__ == "__main__":
|
| 318 |
print("\n" + "-"*30 + " App Starting " + "-"*30)
|
| 319 |
-
# Check for SPACE_HOST and SPACE_ID at startup for information
|
| 320 |
space_host_startup = os.getenv("SPACE_HOST")
|
| 321 |
-
space_id_startup = os.getenv("SPACE_ID")
|
| 322 |
-
|
| 323 |
if space_host_startup:
|
| 324 |
print(f"✅ SPACE_HOST found: {space_host_startup}")
|
| 325 |
print(f" Runtime URL should be: https://{space_host_startup}.hf.space")
|
| 326 |
else:
|
| 327 |
print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
|
| 328 |
-
|
| 329 |
-
if space_id_startup: # Print repo URLs if SPACE_ID is found
|
| 330 |
print(f"✅ SPACE_ID found: {space_id_startup}")
|
| 331 |
print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
|
| 332 |
print(f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main")
|
| 333 |
else:
|
| 334 |
print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.")
|
| 335 |
-
|
| 336 |
print("-"*(60 + len(" App Starting ")) + "\n")
|
| 337 |
-
|
| 338 |
print("Launching Gradio Interface for Advanced Agent Evaluation...")
|
| 339 |
demo.launch(debug=True, share=False)
|
|
|
|
| 24 |
# Constants
|
| 25 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
| 26 |
|
|
|
|
| 27 |
class MessagesState(TypedDict):
|
| 28 |
messages: List[BaseMessage]
|
| 29 |
|
|
|
|
| 41 |
def __init__(self):
|
| 42 |
print("Initializing AdvancedAgent with LangGraph, Wikipedia, Arxiv, and Gemini 2.0 Flash")
|
| 43 |
load_dotenv() # Load environment variables from .env file
|
|
|
|
|
|
|
| 44 |
self.graph = self.build_graph()
|
| 45 |
print("Graph successfully built")
|
| 46 |
|
|
|
|
| 48 |
"""Build the LangGraph agent with necessary tools"""
|
| 49 |
|
| 50 |
llm = ChatOpenAI(
|
| 51 |
+
model="google/gemini-2.0-flash-001",
|
| 52 |
+
temperature=0,
|
| 53 |
+
openai_api_key=os.getenv("OPENROUTER_API_KEY"),
|
| 54 |
+
openai_api_base="https://openrouter.ai/api/v1"
|
| 55 |
)
|
| 56 |
print("LLM initialized: Gemini 2.5 Pro via OpenRouter")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 57 |
|
|
|
|
| 58 |
wikipedia_tool = WikipediaQueryRun(api_wrapper=WikipediaAPIWrapper())
|
| 59 |
arxiv_tool = ArxivQueryRun()
|
| 60 |
tavily_search = TavilySearchResults(max_results=5)
|
|
|
|
| 61 |
tools = [wikipedia_tool, arxiv_tool, tavily_search]
|
| 62 |
print(f"Initialized {len(tools)} tools: Wikipedia, Arxiv, Tavily Search")
|
| 63 |
|
|
|
|
| 64 |
sys_msg = SystemMessage(content=system_prompt)
|
|
|
|
|
|
|
| 65 |
llm_with_tools = llm.bind_tools(tools)
|
| 66 |
+
|
| 67 |
+
# Assistant node must be able to access llm_with_tools.
|
| 68 |
def assistant(state: MessagesState):
|
| 69 |
"""Assistant node that processes messages and generates responses"""
|
| 70 |
messages = state["messages"]
|
| 71 |
response = llm_with_tools.invoke(messages)
|
| 72 |
+
return {"messages": messages + [response]}
|
| 73 |
+
|
|
|
|
| 74 |
tools_node = ToolNode(tools)
|
| 75 |
+
|
|
|
|
| 76 |
builder = StateGraph(MessagesState)
|
|
|
|
|
|
|
| 77 |
builder.add_node("assistant", assistant)
|
| 78 |
builder.add_node("tools", tools_node)
|
| 79 |
+
|
| 80 |
+
# The correct flow is: assistant -> tools (if tool needed) -> assistant (after tool used) -> END (if done)
|
|
|
|
| 81 |
builder.set_entry_point("assistant")
|
| 82 |
+
builder.add_edge("assistant", "tools")
|
| 83 |
+
builder.add_edge("tools", "assistant")
|
| 84 |
+
builder.add_edge("assistant", END)
|
| 85 |
+
|
| 86 |
+
# Conditional: if tools_condition is met, go to tools, else END
|
| 87 |
builder.add_conditional_edges(
|
| 88 |
"assistant",
|
| 89 |
tools_condition,
|
| 90 |
{"tools": "tools", END: END}
|
| 91 |
)
|
| 92 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
| 93 |
return builder.compile()
|
| 94 |
|
| 95 |
def __call__(self, question: str) -> str:
|
|
|
|
| 96 |
print(f"Agent received question (first 50 chars): {question[:50]}...")
|
|
|
|
|
|
|
| 97 |
messages = [
|
| 98 |
SystemMessage(content=system_prompt),
|
| 99 |
HumanMessage(content=question)
|
| 100 |
]
|
|
|
|
|
|
|
|
|
|
|
|
|
| 101 |
try:
|
| 102 |
result = self.graph.invoke({"messages": messages})
|
|
|
|
|
|
|
| 103 |
final_messages = result["messages"]
|
|
|
|
|
|
|
| 104 |
ai_messages = [msg for msg in final_messages if isinstance(msg, AIMessage)]
|
| 105 |
if not ai_messages:
|
| 106 |
return "I wasn't able to generate a proper response. Please try again."
|
|
|
|
|
|
|
| 107 |
for msg in reversed(ai_messages):
|
|
|
|
| 108 |
if not hasattr(msg, 'tool_calls') or not msg.tool_calls:
|
| 109 |
return msg.content
|
|
|
|
|
|
|
| 110 |
return ai_messages[-1].content if ai_messages else "I wasn't able to generate a proper response. Please try again."
|
|
|
|
| 111 |
except Exception as e:
|
| 112 |
print(f"Error running agent graph: {e}")
|
| 113 |
return f"Sorry, I encountered an error while processing your question: {str(e)}"
|
| 114 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 115 |
def run_and_submit_all(profile: gr.OAuthProfile | None):
|
| 116 |
+
space_id = os.getenv("SPACE_ID")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 117 |
if profile:
|
| 118 |
+
username = f"{profile.username}"
|
| 119 |
print(f"User logged in: {username}")
|
| 120 |
else:
|
| 121 |
print("User not logged in.")
|
|
|
|
| 125 |
questions_url = f"{api_url}/questions"
|
| 126 |
submit_url = f"{api_url}/submit"
|
| 127 |
|
|
|
|
| 128 |
try:
|
| 129 |
agent = AdvancedAgent()
|
| 130 |
except Exception as e:
|
| 131 |
print(f"Error instantiating agent: {e}")
|
| 132 |
return f"Error initializing agent: {e}", None
|
| 133 |
+
|
|
|
|
| 134 |
agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
|
| 135 |
print(agent_code)
|
| 136 |
|
|
|
|
| 137 |
print(f"Fetching questions from: {questions_url}")
|
| 138 |
try:
|
| 139 |
response = requests.get(questions_url, timeout=15)
|
| 140 |
response.raise_for_status()
|
| 141 |
questions_data = response.json()
|
| 142 |
if not questions_data:
|
| 143 |
+
print("Fetched questions list is empty.")
|
| 144 |
+
return "Fetched questions list is empty or invalid format.", None
|
| 145 |
print(f"Fetched {len(questions_data)} questions.")
|
| 146 |
except requests.exceptions.RequestException as e:
|
| 147 |
print(f"Error fetching questions: {e}")
|
| 148 |
return f"Error fetching questions: {e}", None
|
| 149 |
except requests.exceptions.JSONDecodeError as e:
|
| 150 |
+
print(f"Error decoding JSON response from questions endpoint: {e}")
|
| 151 |
+
print(f"Response text: {response.text[:500]}")
|
| 152 |
+
return f"Error decoding server response for questions: {e}", None
|
| 153 |
except Exception as e:
|
| 154 |
print(f"An unexpected error occurred fetching questions: {e}")
|
| 155 |
return f"An unexpected error occurred fetching questions: {e}", None
|
| 156 |
|
|
|
|
| 157 |
results_log = []
|
| 158 |
answers_payload = []
|
| 159 |
print(f"Running agent on {len(questions_data)} questions...")
|
|
|
|
| 168 |
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
|
| 169 |
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
|
| 170 |
except Exception as e:
|
| 171 |
+
print(f"Error running agent on task {task_id}: {e}")
|
| 172 |
+
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
|
| 173 |
|
| 174 |
if not answers_payload:
|
| 175 |
print("Agent did not produce any answers to submit.")
|
| 176 |
return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
|
| 177 |
|
|
|
|
| 178 |
submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
|
| 179 |
status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
|
| 180 |
print(status_update)
|
| 181 |
|
|
|
|
| 182 |
print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
|
| 183 |
try:
|
| 184 |
response = requests.post(submit_url, json=submission_data, timeout=60)
|
|
|
|
| 221 |
results_df = pd.DataFrame(results_log)
|
| 222 |
return status_message, results_df
|
| 223 |
|
|
|
|
|
|
|
| 224 |
with gr.Blocks() as demo:
|
| 225 |
gr.Markdown("# Advanced Agent Evaluation Runner")
|
| 226 |
gr.Markdown(
|
|
|
|
| 236 |
)
|
| 237 |
|
| 238 |
gr.LoginButton()
|
|
|
|
| 239 |
run_button = gr.Button("Run Evaluation & Submit All Answers")
|
|
|
|
| 240 |
status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
|
| 241 |
results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
|
| 242 |
|
|
|
|
| 247 |
|
| 248 |
if __name__ == "__main__":
|
| 249 |
print("\n" + "-"*30 + " App Starting " + "-"*30)
|
|
|
|
| 250 |
space_host_startup = os.getenv("SPACE_HOST")
|
| 251 |
+
space_id_startup = os.getenv("SPACE_ID")
|
|
|
|
| 252 |
if space_host_startup:
|
| 253 |
print(f"✅ SPACE_HOST found: {space_host_startup}")
|
| 254 |
print(f" Runtime URL should be: https://{space_host_startup}.hf.space")
|
| 255 |
else:
|
| 256 |
print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
|
| 257 |
+
if space_id_startup:
|
|
|
|
| 258 |
print(f"✅ SPACE_ID found: {space_id_startup}")
|
| 259 |
print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
|
| 260 |
print(f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main")
|
| 261 |
else:
|
| 262 |
print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.")
|
|
|
|
| 263 |
print("-"*(60 + len(" App Starting ")) + "\n")
|
|
|
|
| 264 |
print("Launching Gradio Interface for Advanced Agent Evaluation...")
|
| 265 |
demo.launch(debug=True, share=False)
|