mrhenu's picture
Update app.py
80eae18 verified
raw
history blame
5.27 kB
import os
import gradio as gr
import requests
import pandas as pd
from typing import TypedDict, Annotated, Sequence
import operator
from langchain_core.messages import BaseMessage, HumanMessage
from langchain.agents import AgentExecutor, create_react_agent
from langchain import hub
from langchain_community.tools import DuckDuckGoSearchRun
from langchain_huggingface import HuggingFaceEndpoint
from langgraph.graph import StateGraph, END
from langgraph.prebuilt import ToolNode
# --- Main Application Logic ---
# This defines the "memory" or state of our agent.
class AgentState(TypedDict):
messages: Annotated[Sequence[BaseMessage], operator.add]
# This function builds our agent using the standard ReAct framework
def create_agent():
print("Initializing ReAct Agent...")
# 1. Set up the LLM (The "Brain")
llm = HuggingFaceEndpoint(
repo_id="mistralai/Mistral-7B-Instruct-v0.2",
task="conversational",
max_new_tokens=512,
do_sample=False,
)
print("LLM initialized.")
# 2. Define the Tools
tools = [DuckDuckGoSearchRun()]
tool_node = ToolNode(tools)
print("Tools initialized.")
# 3. Get the ReAct Prompt Template
# This prompt is designed to work with create_react_agent
prompt = hub.pull("hwchase17/react-chat")
print("Prompt template pulled: hwchase17/react-chat")
# 4. Create the agent's logic
agent_runnable = create_react_agent(llm, tools, prompt)
print("Agent logic created.")
# 5. Define the Graph
graph = StateGraph(AgentState)
graph.add_node("agent", lambda state: {"messages": agent_runnable.invoke(state)['messages']})
graph.add_node("tools", tool_node)
graph.set_entry_point("agent")
def should_continue(state):
last_message = state['messages'][-1]
if not hasattr(last_message, 'tool_calls') or not last_message.tool_calls:
return END
return "tools"
graph.add_conditional_edges("agent", should_continue)
graph.add_edge("tools", "agent")
# 6. Compile the graph into a runnable app
app = graph.compile()
print("LangGraph agent compiled and ready.")
return app
# This function runs the agent for a single question.
def run_agent_for_task(agent_executor, question: str) -> str:
print(f"Agent received question: {question}")
try:
response = agent_executor.invoke({"messages": [HumanMessage(content=question)]})
final_answer = response['messages'][-1].content
except Exception as e:
print(f"Error during agent execution: {e}")
final_answer = f"Error: Agent failed to execute. {e}"
print(f"Agent returning answer: {final_answer}")
return str(final_answer)
# The rest of the file remains largely the same as the original template
def run_and_submit_all(profile: gr.OAuthProfile | None):
space_id = os.getenv("SPACE_ID")
if not profile:
return "Please Login to Hugging Face with the button.", None
username = f"{profile.username}"
try:
agent_executor = create_agent()
except Exception as e:
return f"Error initializing agent: {e}", None
agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
questions_url = f"https://agents-course-unit4-scoring.hf.space/questions"
try:
response = requests.get(questions_url, timeout=20)
response.raise_for_status()
questions_data = response.json()
except Exception as e:
return f"Error fetching questions: {e}", None
answers_payload = []
for item in questions_data:
task_id, question_text = item.get("task_id"), item.get("question")
if task_id and question_text:
submitted_answer = run_agent_for_task(agent_executor, question_text)
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
submit_url = f"https://agents-course-unit4-scoring.hf.space/submit"
try:
response = requests.post(submit_url, json=submission_data, timeout=120)
response.raise_for_status()
result_data = response.json()
final_status = (
f"Submission Successful!\n"
f"User: {result_data.get('username')}\n"
f"Overall Score: {result_data.get('score', 'N/A')}% "
f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
f"Message: {result_data.get('message', 'No message received.')}"
)
return final_status, pd.DataFrame(answers_payload)
except Exception as e:
return f"Error during submission: {e}", pd.DataFrame(answers_payload)
with gr.Blocks() as demo:
gr.Markdown("# Agent Evaluation Runner (Final LangGraph Version)")
gr.LoginButton()
run_button = gr.Button("Run Evaluation & Submit All Answers")
status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
run_button.click(fn=run_and_submit_all, outputs=[status_output, results_table])
if __name__ == "__main__":
demo.launch()