Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -2,65 +2,74 @@ import os
|
|
| 2 |
import gradio as gr
|
| 3 |
import requests
|
| 4 |
import pandas as pd
|
| 5 |
-
from typing import TypedDict, Annotated
|
| 6 |
-
|
| 7 |
-
from
|
| 8 |
-
from langchain import
|
| 9 |
-
from langchain_community.tools import
|
| 10 |
from langchain_huggingface import HuggingFaceEndpoint
|
|
|
|
|
|
|
| 11 |
|
| 12 |
# --- Main Application Logic ---
|
| 13 |
|
| 14 |
-
# This
|
| 15 |
-
|
| 16 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
|
| 18 |
# 1. Set up the LLM (The "Brain")
|
|
|
|
| 19 |
llm = HuggingFaceEndpoint(
|
| 20 |
repo_id="mistralai/Mistral-7B-Instruct-v0.2",
|
| 21 |
task="conversational",
|
| 22 |
max_new_tokens=512,
|
| 23 |
do_sample=False,
|
| 24 |
)
|
| 25 |
-
|
|
|
|
|
|
|
|
|
|
| 26 |
|
| 27 |
-
# 2. Define the
|
| 28 |
-
|
| 29 |
-
|
|
|
|
|
|
|
|
|
|
| 30 |
|
| 31 |
-
#
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
print("Prompt template pulled.")
|
| 35 |
-
|
| 36 |
-
# 4. Create the Agent's logic using the ReAct framework
|
| 37 |
-
agent_runnable = create_react_agent(llm, tools, prompt)
|
| 38 |
-
print("Agent logic created.")
|
| 39 |
|
| 40 |
-
#
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
|
|
|
| 50 |
)
|
| 51 |
-
|
| 52 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 53 |
|
| 54 |
# This function runs the agent for a single question.
|
| 55 |
-
def
|
| 56 |
print(f"Agent received question: {question}")
|
| 57 |
try:
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
"input": question,
|
| 61 |
-
"chat_history": []
|
| 62 |
-
})
|
| 63 |
-
final_answer = response.get("output", "Error: Could not parse final answer.")
|
| 64 |
except Exception as e:
|
| 65 |
print(f"Error during agent execution: {e}")
|
| 66 |
final_answer = f"Error: Agent failed to execute. {e}"
|
|
@@ -68,7 +77,7 @@ def run_agent_for_task(agent_executor, question: str) -> str:
|
|
| 68 |
print(f"Agent returning answer: {final_answer}")
|
| 69 |
return str(final_answer)
|
| 70 |
|
| 71 |
-
# The rest of the file runs the evaluation
|
| 72 |
def run_and_submit_all(profile: gr.OAuthProfile | None):
|
| 73 |
space_id = os.getenv("SPACE_ID")
|
| 74 |
if not profile:
|
|
@@ -76,7 +85,7 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
|
|
| 76 |
username = f"{profile.username}"
|
| 77 |
|
| 78 |
try:
|
| 79 |
-
agent_executor =
|
| 80 |
except Exception as e:
|
| 81 |
return f"Error initializing agent: {e}", None
|
| 82 |
|
|
@@ -94,7 +103,7 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
|
|
| 94 |
for item in questions_data:
|
| 95 |
task_id, question_text = item.get("task_id"), item.get("question")
|
| 96 |
if task_id and question_text:
|
| 97 |
-
submitted_answer =
|
| 98 |
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
|
| 99 |
|
| 100 |
submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
|
|
@@ -114,9 +123,9 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
|
|
| 114 |
except Exception as e:
|
| 115 |
return f"Error during submission: {e}", pd.DataFrame(answers_payload)
|
| 116 |
|
| 117 |
-
#
|
| 118 |
with gr.Blocks() as demo:
|
| 119 |
-
gr.Markdown("# Agent Evaluation Runner (Final
|
| 120 |
gr.LoginButton()
|
| 121 |
run_button = gr.Button("Run Evaluation & Submit All Answers")
|
| 122 |
status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
|
|
|
|
| 2 |
import gradio as gr
|
| 3 |
import requests
|
| 4 |
import pandas as pd
|
| 5 |
+
from typing import TypedDict, Annotated, Sequence
|
| 6 |
+
import operator
|
| 7 |
+
from langchain_core.messages import BaseMessage, HumanMessage, ToolMessage
|
| 8 |
+
from langchain.agents import AgentExecutor
|
| 9 |
+
from langchain_community.tools.tavily_search import TavilySearchResults
|
| 10 |
from langchain_huggingface import HuggingFaceEndpoint
|
| 11 |
+
from langgraph.graph import StateGraph, END
|
| 12 |
+
from langgraph.prebuilt import ToolNode, tools_condition
|
| 13 |
|
| 14 |
# --- Main Application Logic ---
|
| 15 |
|
| 16 |
+
# This defines the "memory" or state of our agent.
|
| 17 |
+
class AgentState(TypedDict):
|
| 18 |
+
messages: Annotated[Sequence[BaseMessage], operator.add]
|
| 19 |
+
|
| 20 |
+
# This function builds our final, robust agent using LangGraph.
|
| 21 |
+
def create_langgraph_agent():
|
| 22 |
+
print("Initializing LangGraph Agent...")
|
| 23 |
|
| 24 |
# 1. Set up the LLM (The "Brain")
|
| 25 |
+
# We use the powerful Mistral model with the correct 'conversational' task
|
| 26 |
llm = HuggingFaceEndpoint(
|
| 27 |
repo_id="mistralai/Mistral-7B-Instruct-v0.2",
|
| 28 |
task="conversational",
|
| 29 |
max_new_tokens=512,
|
| 30 |
do_sample=False,
|
| 31 |
)
|
| 32 |
+
# We bind the tools to the LLM so it knows about them
|
| 33 |
+
tools = [TavilySearchResults(max_results=3)]
|
| 34 |
+
llm_with_tools = llm.bind_tools(tools)
|
| 35 |
+
print("LLM and tools initialized.")
|
| 36 |
|
| 37 |
+
# 2. Define the Graph Nodes
|
| 38 |
+
# The 'agent' node calls the LLM
|
| 39 |
+
def agent_node(state):
|
| 40 |
+
print("Calling agent node...")
|
| 41 |
+
response = llm_with_tools.invoke(state["messages"])
|
| 42 |
+
return {"messages": [response]}
|
| 43 |
|
| 44 |
+
# The 'tool' node executes the tools
|
| 45 |
+
tool_node = ToolNode(tools)
|
| 46 |
+
print("Graph nodes defined.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 47 |
|
| 48 |
+
# 3. Define the Graph
|
| 49 |
+
graph = StateGraph(AgentState)
|
| 50 |
+
graph.add_node("agent", agent_node)
|
| 51 |
+
graph.add_node("tools", tool_node)
|
| 52 |
+
|
| 53 |
+
graph.set_entry_point("agent")
|
| 54 |
+
|
| 55 |
+
# This conditional edge decides whether to call a tool or end
|
| 56 |
+
graph.add_conditional_edges(
|
| 57 |
+
"agent",
|
| 58 |
+
tools_condition,
|
| 59 |
)
|
| 60 |
+
graph.add_edge("tools", "agent")
|
| 61 |
+
|
| 62 |
+
# 4. Compile the graph into a runnable app
|
| 63 |
+
app = graph.compile()
|
| 64 |
+
print("LangGraph agent compiled and ready.")
|
| 65 |
+
return app
|
| 66 |
|
| 67 |
# This function runs the agent for a single question.
|
| 68 |
+
def run_agent(agent_executor, question: str) -> str:
|
| 69 |
print(f"Agent received question: {question}")
|
| 70 |
try:
|
| 71 |
+
response = agent_executor.invoke({"messages": [HumanMessage(content=question)]})
|
| 72 |
+
final_answer = response['messages'][-1].content
|
|
|
|
|
|
|
|
|
|
|
|
|
| 73 |
except Exception as e:
|
| 74 |
print(f"Error during agent execution: {e}")
|
| 75 |
final_answer = f"Error: Agent failed to execute. {e}"
|
|
|
|
| 77 |
print(f"Agent returning answer: {final_answer}")
|
| 78 |
return str(final_answer)
|
| 79 |
|
| 80 |
+
# The rest of the file runs the evaluation
|
| 81 |
def run_and_submit_all(profile: gr.OAuthProfile | None):
|
| 82 |
space_id = os.getenv("SPACE_ID")
|
| 83 |
if not profile:
|
|
|
|
| 85 |
username = f"{profile.username}"
|
| 86 |
|
| 87 |
try:
|
| 88 |
+
agent_executor = create_langgraph_agent()
|
| 89 |
except Exception as e:
|
| 90 |
return f"Error initializing agent: {e}", None
|
| 91 |
|
|
|
|
| 103 |
for item in questions_data:
|
| 104 |
task_id, question_text = item.get("task_id"), item.get("question")
|
| 105 |
if task_id and question_text:
|
| 106 |
+
submitted_answer = run_agent(agent_executor, question_text)
|
| 107 |
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
|
| 108 |
|
| 109 |
submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
|
|
|
|
| 123 |
except Exception as e:
|
| 124 |
return f"Error during submission: {e}", pd.DataFrame(answers_payload)
|
| 125 |
|
| 126 |
+
# Gradio Interface
|
| 127 |
with gr.Blocks() as demo:
|
| 128 |
+
gr.Markdown("# Agent Evaluation Runner (Final LangGraph v2)")
|
| 129 |
gr.LoginButton()
|
| 130 |
run_button = gr.Button("Run Evaluation & Submit All Answers")
|
| 131 |
status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
|