gaia-agent / app.py
imcasnehal's picture
added corrected code
ab94110 verified
# pip install langgraph langchain-google-genai langchain-groq
from langgraph.graph import StateGraph, END
from typing import TypedDict
import requests
from langchain_google_genai import ChatGoogleGenerativeAI # or from langchain_groq import ChatGroQ
# 1. Define your model (Choose one)
llm = ChatGoogleGenerativeAI(model="gemini-1.5-flash", google_api_key="AIzaSyBkAb7nRruE-Sn-Ff9bMWhiVBvY751pqOk")
# llm = ChatGroQ(model="mixtral-8x7b-32768", groq_api_key="your_key") # Fast & cheap for experimentation
# 2. Define your agent's state. What information does it need to carry between steps?
class AgentState(TypedDict):
question: str
task_id: str | None # For fetching files
agent_response: str
final_answer: str
# 3. Define the nodes (functions) of your graph
def fetch_question_node(state: AgentState):
"""Node 1: Calls the /random-question API to get a problem."""
response = requests.get("https://agents-course-unit4-scoring.hf.space/random-question")
response.raise_for_status()
data = response.json()
print("this is a data",data)
return {
"question": data["question"],
"task_id": data.get("task_id") # If the question requires a file, the API might provide an ID
}
def download_file_node(state: AgentState):
"""Node 2: If a task_id is present, download the related file."""
if not state['task_id']:
return {"agent_response": "No file to download."}
file_url = f"https://agents-course-unit4-scoring.hf.space/files/{state['task_id']}"
file_response = requests.get(file_url)
# Process the file (e.g., if it's a text file, read it; if it's an image, use a vision model)
file_content = file_response.text
enhanced_query = f"Question: {state['question']}\n\nUse the following file content to answer:\n{file_content}"
return {"agent_response": enhanced_query}
def reason_and_answer_node(state: AgentState):
"""Node 3: The core reasoning node. Use the LLM to solve the problem."""
# Construct a powerful prompt to encourage step-by-step thinking internally,
# but demand a final, clean answer.
system_prompt = """You are a highly intelligent AI agent solving a question.
Think carefully about the problem. Your final output must be ONLY the final answer, with no additional text, explanations, or formatting.
"""
prompt = f"{system_prompt}\n\nQuestion: {state['agent_response']}"
response = llm.invoke(prompt)
# Here you might need to parse the response to extract *only* the final answer.
# For a well-instructed model, `response.content` should be just the answer.
return {"final_answer": response.content.strip()}
# def submit_answer_node(state: AgentState):
# """Node 4: Submit the final answer to the scoring API."""
# payload = {
# "question_id": state.get("question_id"), # You might need to get this from the initial fetch
# "answer": state['final_answer']
# }
# submission_response = requests.post("https://agents-course-unit4-scoring.hf.space/submit", json=payload)
# print(f"Submission Status: {submission_response.status_code}")
# print(f"Submission Response: {submission_response.json()}")
# return state
def submit_answer_node(state: AgentState):
"""Node 4: Submit the final answer to the scoring API."""
payload = {
"username": "your_hf_username", # πŸ‘ˆ replace with your Hugging Face username
"agent_code": "https://huggingface.co/spaces/your-username/your-space-name/tree/main", # πŸ‘ˆ replace with your Space link
"answers": [
{
"task_id": state.get("task_id"), # use task_id from initial fetch
"submitted_answer": state["final_answer"]
}
]
}
submission_response = requests.post(
"https://agents-course-unit4-scoring.hf.space/submit",
json=payload
)
print(f"Submission Status: {submission_response.status_code}")
print(f"Submission Response: {submission_response.json()}")
return state
# 4. Build the Graph
workflow = StateGraph(AgentState)
# Add nodes
workflow.add_node("fetch_question", fetch_question_node)
workflow.add_node("download_file", download_file_node)
workflow.add_node("reason", reason_and_answer_node)
workflow.add_node("submit", submit_answer_node)
# Define the graph flow
workflow.set_entry_point("fetch_question")
workflow.add_edge("fetch_question", "download_file")
workflow.add_edge("download_file", "reason")
workflow.add_edge("reason", "submit")
workflow.add_edge("submit", END)
# Compile the graph
app = workflow.compile()