Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -207,15 +207,24 @@
|
|
| 207 |
# 3. This code replaces the original template entirely.
|
| 208 |
#
|
| 209 |
# =================================================================================================
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 210 |
|
| 211 |
import os
|
| 212 |
import io
|
| 213 |
import requests
|
| 214 |
-
import inspect
|
| 215 |
import pandas as pd
|
| 216 |
import gradio as gr
|
| 217 |
from contextlib import redirect_stdout
|
| 218 |
-
from typing import TypedDict, Annotated, List
|
| 219 |
import operator
|
| 220 |
|
| 221 |
# --- LangChain & LangGraph Imports ---
|
|
@@ -224,8 +233,7 @@ from langchain_core.tools import tool
|
|
| 224 |
from langchain_groq import ChatGroq
|
| 225 |
# from langchain_openai import ChatOpenAI #<-- Alternative LLM
|
| 226 |
from langgraph.graph import StateGraph, END
|
| 227 |
-
from langgraph.prebuilt import
|
| 228 |
-
|
| 229 |
|
| 230 |
# (Keep Constants as is)
|
| 231 |
# --- Constants ---
|
|
@@ -301,7 +309,7 @@ def python_interpreter(code: str) -> str:
|
|
| 301 |
|
| 302 |
#
|
| 303 |
# ================================================================================================
|
| 304 |
-
# ✅ 2. CONFIGURE THE AGENT'S STATE, BRAIN (LLM)
|
| 305 |
# ================================================================================================
|
| 306 |
#
|
| 307 |
|
|
@@ -312,9 +320,6 @@ class AgentState(TypedDict):
|
|
| 312 |
# List of all the tools our agent can use
|
| 313 |
tools = [web_search, read_file, python_interpreter]
|
| 314 |
|
| 315 |
-
# The ToolExecutor is a helper class that runs the tools for us
|
| 316 |
-
tool_executor = ToolExecutor(tools)
|
| 317 |
-
|
| 318 |
# The "Brain" of our agent. We're using Groq for speed.
|
| 319 |
# Make sure to set GROQ_API_KEY in your HF Space secrets
|
| 320 |
llm = ChatGroq(model="llama3-70b-8192", temperature=0)
|
|
@@ -341,22 +346,11 @@ def call_model(state: AgentState) -> dict:
|
|
| 341 |
# We return a dict, because this node will always be part of a graph
|
| 342 |
return {"messages": [response]}
|
| 343 |
|
| 344 |
-
# NODE 2: The Tool Node (call_tool)
|
| 345 |
-
# This node executes the tool chosen by the LLM.
|
| 346 |
-
def call_tool(state: AgentState) -> dict:
|
| 347 |
-
last_message = state['messages'][-1] # Get the last message, which should be an AIMessage with tool calls
|
| 348 |
-
|
| 349 |
-
# We construct an ToolMessage with the output of the tool call
|
| 350 |
-
action = last_message.tool_calls[0]
|
| 351 |
-
print(f"--- Preparing to call tool: {action['name']} with args {action['args']} ---")
|
| 352 |
-
tool_output = tool_executor.invoke(action)
|
| 353 |
-
return {"messages": [ToolMessage(content=str(tool_output), tool_call_id=action['id'])]}
|
| 354 |
-
|
| 355 |
# EDGE: The Conditional Router (should_continue)
|
| 356 |
# This function decides which node to go to next.
|
| 357 |
def should_continue(state: AgentState) -> str:
|
| 358 |
last_message = state['messages'][-1]
|
| 359 |
-
# If the LLM made a tool call, we route to the 'action' node
|
| 360 |
if last_message.tool_calls:
|
| 361 |
print("--- Decision: Call a tool ---")
|
| 362 |
return "action"
|
|
@@ -367,16 +361,20 @@ def should_continue(state: AgentState) -> str:
|
|
| 367 |
|
| 368 |
#
|
| 369 |
# ================================================================================================
|
| 370 |
-
# ✅ 4. BUILD AND COMPILE THE GRAPH
|
| 371 |
# ================================================================================================
|
| 372 |
#
|
| 373 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 374 |
# 1. Initialize the graph and add our state object
|
| 375 |
workflow = StateGraph(AgentState)
|
| 376 |
|
| 377 |
-
# 2. Add the two nodes we
|
| 378 |
workflow.add_node("agent", call_model)
|
| 379 |
-
workflow.add_node("action",
|
| 380 |
|
| 381 |
# 3. Set the entry point of the graph. The first thing to run is the 'agent' node.
|
| 382 |
workflow.set_entry_point("agent")
|
|
@@ -391,7 +389,7 @@ workflow.add_conditional_edges(
|
|
| 391 |
}
|
| 392 |
)
|
| 393 |
|
| 394 |
-
# 5. Add a normal edge. After 'action' runs, it should always go back to 'agent'.
|
| 395 |
workflow.add_edge('action', 'agent')
|
| 396 |
|
| 397 |
# 6. Compile the graph into a runnable app.
|
|
@@ -407,32 +405,26 @@ app = workflow.compile()
|
|
| 407 |
class GaiaAgent:
|
| 408 |
def __init__(self):
|
| 409 |
print("GaiaAgent initialized.")
|
| 410 |
-
# Any one-time setup can go here
|
| 411 |
self.agent_app = app
|
| 412 |
|
| 413 |
def __call__(self, question: str) -> str:
|
| 414 |
-
print(f"
|
| 415 |
|
| 416 |
# The initial input for our graph is a list of messages.
|
| 417 |
initial_input = {"messages": [HumanMessage(content=question)]}
|
| 418 |
|
| 419 |
final_state = None
|
| 420 |
# Let's add a loop limit to prevent infinite cycles
|
| 421 |
-
for i, step in enumerate(self.agent_app.stream(initial_input, {"recursion_limit":
|
| 422 |
-
# We'll just take the final state. The stream is useful for seeing intermediate steps.
|
| 423 |
if i == 0:
|
| 424 |
print("--- Starting Agentic Loop ---")
|
| 425 |
-
|
| 426 |
-
# You can print keys to see what's happening at each step:
|
| 427 |
-
# print(f"Step {i}: {list(step.keys())}")
|
| 428 |
-
|
| 429 |
final_state = step
|
| 430 |
|
| 431 |
# The final answer is in the last AIMessage of the 'messages' list
|
| 432 |
final_answer_message = final_state['agent']['messages'][-1]
|
| 433 |
final_answer = final_answer_message.content
|
| 434 |
|
| 435 |
-
print(f"--- Agent finished. Final Answer: {final_answer}
|
| 436 |
return final_answer
|
| 437 |
|
| 438 |
#
|
|
@@ -446,8 +438,7 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
| 446 |
Fetches all questions, runs the BasicAgent on them, submits all answers,
|
| 447 |
and displays the results.
|
| 448 |
"""
|
| 449 |
-
|
| 450 |
-
space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
|
| 451 |
|
| 452 |
if profile:
|
| 453 |
username= f"{profile.username}"
|
|
@@ -460,20 +451,15 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
| 460 |
questions_url = f"{api_url}/questions"
|
| 461 |
submit_url = f"{api_url}/submit"
|
| 462 |
|
| 463 |
-
# 1. Instantiate Agent ( modify this part to create your agent)
|
| 464 |
try:
|
| 465 |
-
# -------------------------------------------------------------------
|
| 466 |
-
# THIS IS THE ONLY CHANGE IN THIS FUNCTION: We now use our GaiaAgent
|
| 467 |
agent = GaiaAgent()
|
| 468 |
-
# -------------------------------------------------------------------
|
| 469 |
except Exception as e:
|
| 470 |
print(f"Error instantiating agent: {e}")
|
| 471 |
return f"Error initializing agent: {e}", None
|
| 472 |
-
|
| 473 |
agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
|
| 474 |
print(agent_code)
|
| 475 |
|
| 476 |
-
# 2. Fetch Questions
|
| 477 |
print(f"Fetching questions from: {questions_url}")
|
| 478 |
try:
|
| 479 |
response = requests.get(questions_url, timeout=15)
|
|
@@ -483,18 +469,10 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
| 483 |
print("Fetched questions list is empty.")
|
| 484 |
return "Fetched questions list is empty or invalid format.", None
|
| 485 |
print(f"Fetched {len(questions_data)} questions.")
|
| 486 |
-
except requests.exceptions.RequestException as e:
|
| 487 |
-
print(f"Error fetching questions: {e}")
|
| 488 |
-
return f"Error fetching questions: {e}", None
|
| 489 |
-
except requests.exceptions.JSONDecodeError as e:
|
| 490 |
-
print(f"Error decoding JSON response from questions endpoint: {e}")
|
| 491 |
-
print(f"Response text: {response.text[:500]}")
|
| 492 |
-
return f"Error decoding server response for questions: {e}", None
|
| 493 |
except Exception as e:
|
| 494 |
print(f"An unexpected error occurred fetching questions: {e}")
|
| 495 |
return f"An unexpected error occurred fetching questions: {e}", None
|
| 496 |
|
| 497 |
-
# 3. Run your Agent
|
| 498 |
results_log = []
|
| 499 |
answers_payload = []
|
| 500 |
print(f"Running agent on {len(questions_data)} questions...")
|
|
@@ -516,12 +494,10 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
| 516 |
print("Agent did not produce any answers to submit.")
|
| 517 |
return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
|
| 518 |
|
| 519 |
-
# 4. Prepare Submission
|
| 520 |
submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
|
| 521 |
status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
|
| 522 |
print(status_update)
|
| 523 |
|
| 524 |
-
# 5. Submit
|
| 525 |
print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
|
| 526 |
try:
|
| 527 |
response = requests.post(submit_url, json=submission_data, timeout=60)
|
|
@@ -572,7 +548,7 @@ with gr.Blocks() as demo:
|
|
| 572 |
"""
|
| 573 |
**Instructor's Note:** This space is now powered by a LangGraph agent.
|
| 574 |
1. Ensure your `GROQ_API_KEY` is set in the Space secrets.
|
| 575 |
-
2. Make sure you have a `requirements.txt` file.
|
| 576 |
3. Log in below and click 'Run Evaluation'. Good luck!
|
| 577 |
"""
|
| 578 |
)
|
|
|
|
| 207 |
# 3. This code replaces the original template entirely.
|
| 208 |
#
|
| 209 |
# =================================================================================================
|
| 210 |
+
# =================================================================================================
|
| 211 |
+
# ✅ --- ✅ FINAL ASSESSMENT AGENT - INSTRUCTOR'S CORRECTED VERSION ✅ --- ✅
|
| 212 |
+
# =================================================================================================
|
| 213 |
+
#
|
| 214 |
+
# Instructions:
|
| 215 |
+
# 1. Make sure your requirements.txt file matches the one provided by the instructor.
|
| 216 |
+
# 2. Set your GROQ_API_KEY in the Hugging Face Space secrets.
|
| 217 |
+
# 3. This code replaces the original template entirely.
|
| 218 |
+
#
|
| 219 |
+
# =================================================================================================
|
| 220 |
|
| 221 |
import os
|
| 222 |
import io
|
| 223 |
import requests
|
|
|
|
| 224 |
import pandas as pd
|
| 225 |
import gradio as gr
|
| 226 |
from contextlib import redirect_stdout
|
| 227 |
+
from typing import TypedDict, Annotated, List
|
| 228 |
import operator
|
| 229 |
|
| 230 |
# --- LangChain & LangGraph Imports ---
|
|
|
|
| 233 |
from langchain_groq import ChatGroq
|
| 234 |
# from langchain_openai import ChatOpenAI #<-- Alternative LLM
|
| 235 |
from langgraph.graph import StateGraph, END
|
| 236 |
+
from langgraph.prebuilt import ToolNode # <-- Corrected Import for modern LangGraph
|
|
|
|
| 237 |
|
| 238 |
# (Keep Constants as is)
|
| 239 |
# --- Constants ---
|
|
|
|
| 309 |
|
| 310 |
#
|
| 311 |
# ================================================================================================
|
| 312 |
+
# ✅ 2. CONFIGURE THE AGENT'S STATE, BRAIN (LLM)
|
| 313 |
# ================================================================================================
|
| 314 |
#
|
| 315 |
|
|
|
|
| 320 |
# List of all the tools our agent can use
|
| 321 |
tools = [web_search, read_file, python_interpreter]
|
| 322 |
|
|
|
|
|
|
|
|
|
|
| 323 |
# The "Brain" of our agent. We're using Groq for speed.
|
| 324 |
# Make sure to set GROQ_API_KEY in your HF Space secrets
|
| 325 |
llm = ChatGroq(model="llama3-70b-8192", temperature=0)
|
|
|
|
| 346 |
# We return a dict, because this node will always be part of a graph
|
| 347 |
return {"messages": [response]}
|
| 348 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 349 |
# EDGE: The Conditional Router (should_continue)
|
| 350 |
# This function decides which node to go to next.
|
| 351 |
def should_continue(state: AgentState) -> str:
|
| 352 |
last_message = state['messages'][-1]
|
| 353 |
+
# If the LLM made a tool call, we route to the 'action' node to execute the tool
|
| 354 |
if last_message.tool_calls:
|
| 355 |
print("--- Decision: Call a tool ---")
|
| 356 |
return "action"
|
|
|
|
| 361 |
|
| 362 |
#
|
| 363 |
# ================================================================================================
|
| 364 |
+
# ✅ 4. BUILD AND COMPILE THE GRAPH (Corrected Version)
|
| 365 |
# ================================================================================================
|
| 366 |
#
|
| 367 |
|
| 368 |
+
# The ToolNode is a pre-built node that executes tools for us.
|
| 369 |
+
# It's the modern way to handle tool execution in LangGraph.
|
| 370 |
+
tool_node = ToolNode(tools)
|
| 371 |
+
|
| 372 |
# 1. Initialize the graph and add our state object
|
| 373 |
workflow = StateGraph(AgentState)
|
| 374 |
|
| 375 |
+
# 2. Add the two nodes we need: the 'agent' and the 'action' (our tool_node)
|
| 376 |
workflow.add_node("agent", call_model)
|
| 377 |
+
workflow.add_node("action", tool_node)
|
| 378 |
|
| 379 |
# 3. Set the entry point of the graph. The first thing to run is the 'agent' node.
|
| 380 |
workflow.set_entry_point("agent")
|
|
|
|
| 389 |
}
|
| 390 |
)
|
| 391 |
|
| 392 |
+
# 5. Add a normal edge. After 'action' runs, it should always go back to 'agent' to reflect.
|
| 393 |
workflow.add_edge('action', 'agent')
|
| 394 |
|
| 395 |
# 6. Compile the graph into a runnable app.
|
|
|
|
| 405 |
class GaiaAgent:
|
| 406 |
def __init__(self):
|
| 407 |
print("GaiaAgent initialized.")
|
|
|
|
| 408 |
self.agent_app = app
|
| 409 |
|
| 410 |
def __call__(self, question: str) -> str:
|
| 411 |
+
print(f"\n{'='*60}\nAgent received question (first 100 chars): {question[:100]}...\n{'='*60}")
|
| 412 |
|
| 413 |
# The initial input for our graph is a list of messages.
|
| 414 |
initial_input = {"messages": [HumanMessage(content=question)]}
|
| 415 |
|
| 416 |
final_state = None
|
| 417 |
# Let's add a loop limit to prevent infinite cycles
|
| 418 |
+
for i, step in enumerate(self.agent_app.stream(initial_input, {"recursion_limit": 15})):
|
|
|
|
| 419 |
if i == 0:
|
| 420 |
print("--- Starting Agentic Loop ---")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 421 |
final_state = step
|
| 422 |
|
| 423 |
# The final answer is in the last AIMessage of the 'messages' list
|
| 424 |
final_answer_message = final_state['agent']['messages'][-1]
|
| 425 |
final_answer = final_answer_message.content
|
| 426 |
|
| 427 |
+
print(f"\n--- Agent finished. Final Answer: {final_answer} ---\n")
|
| 428 |
return final_answer
|
| 429 |
|
| 430 |
#
|
|
|
|
| 438 |
Fetches all questions, runs the BasicAgent on them, submits all answers,
|
| 439 |
and displays the results.
|
| 440 |
"""
|
| 441 |
+
space_id = os.getenv("SPACE_ID")
|
|
|
|
| 442 |
|
| 443 |
if profile:
|
| 444 |
username= f"{profile.username}"
|
|
|
|
| 451 |
questions_url = f"{api_url}/questions"
|
| 452 |
submit_url = f"{api_url}/submit"
|
| 453 |
|
|
|
|
| 454 |
try:
|
|
|
|
|
|
|
| 455 |
agent = GaiaAgent()
|
|
|
|
| 456 |
except Exception as e:
|
| 457 |
print(f"Error instantiating agent: {e}")
|
| 458 |
return f"Error initializing agent: {e}", None
|
| 459 |
+
|
| 460 |
agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
|
| 461 |
print(agent_code)
|
| 462 |
|
|
|
|
| 463 |
print(f"Fetching questions from: {questions_url}")
|
| 464 |
try:
|
| 465 |
response = requests.get(questions_url, timeout=15)
|
|
|
|
| 469 |
print("Fetched questions list is empty.")
|
| 470 |
return "Fetched questions list is empty or invalid format.", None
|
| 471 |
print(f"Fetched {len(questions_data)} questions.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 472 |
except Exception as e:
|
| 473 |
print(f"An unexpected error occurred fetching questions: {e}")
|
| 474 |
return f"An unexpected error occurred fetching questions: {e}", None
|
| 475 |
|
|
|
|
| 476 |
results_log = []
|
| 477 |
answers_payload = []
|
| 478 |
print(f"Running agent on {len(questions_data)} questions...")
|
|
|
|
| 494 |
print("Agent did not produce any answers to submit.")
|
| 495 |
return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
|
| 496 |
|
|
|
|
| 497 |
submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
|
| 498 |
status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
|
| 499 |
print(status_update)
|
| 500 |
|
|
|
|
| 501 |
print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
|
| 502 |
try:
|
| 503 |
response = requests.post(submit_url, json=submission_data, timeout=60)
|
|
|
|
| 548 |
"""
|
| 549 |
**Instructor's Note:** This space is now powered by a LangGraph agent.
|
| 550 |
1. Ensure your `GROQ_API_KEY` is set in the Space secrets.
|
| 551 |
+
2. Make sure you have a `requirements.txt` file with the specified versions.
|
| 552 |
3. Log in below and click 'Run Evaluation'. Good luck!
|
| 553 |
"""
|
| 554 |
)
|