mrhenu's picture
Update app.py
91ef29f verified
raw
history blame
5.32 kB
import os
import gradio as gr
import requests
import pandas as pd
from typing import TypedDict, Annotated, Sequence
import operator
from langchain_core.messages import BaseMessage, HumanMessage
from langchain.agents import AgentExecutor
from langchain_experimental.tools import PythonREPLTool
from langchain_community.tools.Youtube import YouTubeSearchTool
from langchain_community.tools.tavily_search import TavilySearchResults
from langchain_openai import ChatOpenAI
from langgraph.graph import StateGraph, END
from langgraph.prebuilt import ToolNode, tools_condition
# --- Main Application Logic ---
# Agentin muisti
class AgentState(TypedDict):
messages: Annotated[Sequence[BaseMessage], operator.add]
# Agentin rakentajafunktio
def create_langgraph_agent():
print("Initializing Advanced LangGraph Agent...")
# 1. Kielimalli (GPT-4o on paras valinta)
llm = ChatOpenAI(model="gpt-4o", temperature=0)
# 2. Työkalut: Tavily, PythonREPL ja YouTube
tools = [TavilySearchResults(max_results=3), PythonREPLTool(), YouTubeSearchTool()]
llm_with_tools = llm.bind_tools(tools)
print("LLM and tools initialized.")
# 3. Agentin solmu (kutsuu kielimallia)
def agent_node(state):
print("Calling agent node...")
return {"messages": [llm_with_tools.invoke(state["messages"])]}
# 4. Työkalusolmu
tool_node = ToolNode(tools)
print("Graph nodes defined.")
# 5. Graafin määritys
graph = StateGraph(AgentState)
graph.add_node("agent", agent_node)
graph.add_node("tools", tool_node)
graph.set_entry_point("agent")
graph.add_conditional_edges("agent", tools_condition)
graph.add_edge("tools", "agent")
# 6. Graafin kääntäminen ja turvarajan asettaminen
app = graph.compile(recursion_limit=15)
print("LangGraph agent compiled and ready.")
return app
# Agentin suoritusfunktio
def run_agent(agent_executor, question: str) -> str:
print(f"Agent received question: {question}")
final_answer = ""
try:
response = agent_executor.invoke(
{"messages": [HumanMessage(content=question)]},
config={"recursion_limit": 15}
)
raw_answer = response['messages'][-1].content
if "FINAL ANSWER:" in raw_answer:
final_answer = raw_answer.split("FINAL ANSWER:")[-1].strip()
else:
final_answer = raw_answer
except Exception as e:
print(f"Error during agent execution: {e}")
final_answer = f"Error: Agent failed to execute. {e}"
print(f"Agent returning answer: {final_answer}")
return str(final_answer)
# Evaluaation ajaminen
def run_and_submit_all(profile: gr.OAuthProfile | None):
space_id = os.getenv("SPACE_ID")
if not profile:
return "Please Login to Hugging Face with the button.", None
username = f"{profile.username}"
if not os.getenv("TAVILY_API_KEY") or not os.getenv("OPENAI_API_KEY"):
return "One or more API keys (TAVILY_API_KEY, OPENAI_API_KEY) are not set.", None
try:
agent_executor = create_langgraph_agent()
except Exception as e:
return f"Error initializing agent: {e}", None
agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
questions_url = f"https://agents-course-unit4-scoring.hf.space/questions"
try:
response = requests.get(questions_url, timeout=20)
response.raise_for_status()
questions_data = response.json()
except Exception as e:
return f"Error fetching questions: {e}", None
answers_payload = []
for item in questions_data:
task_id, question_text = item.get("task_id"), item.get("question")
if task_id and question_text:
submitted_answer = run_agent(agent_executor, question_text)
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
submit_url = f"https://agents-course-unit4-scoring.hf.space/submit"
try:
response = requests.post(submit_url, json=submission_data, timeout=240)
response.raise_for_status()
result_data = response.json()
final_status = (
f"Submission Successful!\n"
f"User: {result_data.get('username')}\n"
f"Overall Score: {result_data.get('score', 'N/A')}% "
f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
f"Message: {result_data.get('message', 'No message received.')}"
)
return final_status, pd.DataFrame(answers_payload)
except Exception as e:
return f"Error during submission: {e}", pd.DataFrame(answers_payload)
# Gradio-käyttöliittymä
with gr.Blocks() as demo:
gr.Markdown("# Agent Evaluation Runner (Advanced Tools - Corrected)")
gr.LoginButton()
run_button = gr.Button("Run Evaluation & Submit All Answers")
status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
run_button.click(fn=run_and_submit_all, outputs=[status_output, results_table])
if __name__ == "__main__":
demo.launch()