Spaces:
Runtime error
Runtime error
added corrected code
Browse files
app.py
CHANGED
|
@@ -1,53 +1,114 @@
|
|
| 1 |
-
#
|
| 2 |
-
|
|
|
|
| 3 |
import requests
|
|
|
|
| 4 |
|
| 5 |
-
|
|
|
|
|
|
|
| 6 |
|
| 7 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
|
| 9 |
-
|
| 10 |
|
| 11 |
-
def
|
| 12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
|
| 14 |
-
def
|
| 15 |
-
|
| 16 |
-
|
|
|
|
| 17 |
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
response =
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 41 |
)
|
| 42 |
-
return response.json()
|
| 43 |
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 50 |
|
| 51 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 52 |
|
| 53 |
-
|
|
|
|
|
|
| 1 |
+
# pip install langgraph langchain-google-genai langchain-groq
|
| 2 |
+
from langgraph.graph import StateGraph, END
|
| 3 |
+
from typing import TypedDict
|
| 4 |
import requests
|
| 5 |
+
from langchain_google_genai import ChatGoogleGenerativeAI # or from langchain_groq import ChatGroQ
|
| 6 |
|
| 7 |
+
# 1. Define your model (Choose one)
|
| 8 |
+
llm = ChatGoogleGenerativeAI(model="gemini-1.5-flash", google_api_key="AIzaSyBkAb7nRruE-Sn-Ff9bMWhiVBvY751pqOk")
|
| 9 |
+
# llm = ChatGroQ(model="mixtral-8x7b-32768", groq_api_key="your_key") # Fast & cheap for experimentation
|
| 10 |
|
| 11 |
+
# 2. Define your agent's state. What information does it need to carry between steps?
|
| 12 |
+
class AgentState(TypedDict):
|
| 13 |
+
question: str
|
| 14 |
+
task_id: str | None # For fetching files
|
| 15 |
+
agent_response: str
|
| 16 |
+
final_answer: str
|
| 17 |
|
| 18 |
+
# 3. Define the nodes (functions) of your graph
|
| 19 |
|
| 20 |
+
def fetch_question_node(state: AgentState):
|
| 21 |
+
"""Node 1: Calls the /random-question API to get a problem."""
|
| 22 |
+
response = requests.get("https://agents-course-unit4-scoring.hf.space/random-question")
|
| 23 |
+
response.raise_for_status()
|
| 24 |
+
data = response.json()
|
| 25 |
+
print("this is a data",data)
|
| 26 |
+
return {
|
| 27 |
+
"question": data["question"],
|
| 28 |
+
"task_id": data.get("task_id") # If the question requires a file, the API might provide an ID
|
| 29 |
+
}
|
| 30 |
|
| 31 |
+
def download_file_node(state: AgentState):
|
| 32 |
+
"""Node 2: If a task_id is present, download the related file."""
|
| 33 |
+
if not state['task_id']:
|
| 34 |
+
return {"agent_response": "No file to download."}
|
| 35 |
|
| 36 |
+
file_url = f"https://agents-course-unit4-scoring.hf.space/files/{state['task_id']}"
|
| 37 |
+
file_response = requests.get(file_url)
|
| 38 |
+
# Process the file (e.g., if it's a text file, read it; if it's an image, use a vision model)
|
| 39 |
+
file_content = file_response.text
|
| 40 |
+
enhanced_query = f"Question: {state['question']}\n\nUse the following file content to answer:\n{file_content}"
|
| 41 |
+
return {"agent_response": enhanced_query}
|
| 42 |
+
|
| 43 |
+
def reason_and_answer_node(state: AgentState):
|
| 44 |
+
"""Node 3: The core reasoning node. Use the LLM to solve the problem."""
|
| 45 |
+
# Construct a powerful prompt to encourage step-by-step thinking internally,
|
| 46 |
+
# but demand a final, clean answer.
|
| 47 |
+
system_prompt = """You are a highly intelligent AI agent solving a question.
|
| 48 |
+
Think carefully about the problem. Your final output must be ONLY the final answer, with no additional text, explanations, or formatting.
|
| 49 |
+
"""
|
| 50 |
+
|
| 51 |
+
prompt = f"{system_prompt}\n\nQuestion: {state['agent_response']}"
|
| 52 |
+
response = llm.invoke(prompt)
|
| 53 |
+
|
| 54 |
+
# Here you might need to parse the response to extract *only* the final answer.
|
| 55 |
+
# For a well-instructed model, `response.content` should be just the answer.
|
| 56 |
+
return {"final_answer": response.content.strip()}
|
| 57 |
+
|
| 58 |
+
# def submit_answer_node(state: AgentState):
|
| 59 |
+
# """Node 4: Submit the final answer to the scoring API."""
|
| 60 |
+
# payload = {
|
| 61 |
+
# "question_id": state.get("question_id"), # You might need to get this from the initial fetch
|
| 62 |
+
# "answer": state['final_answer']
|
| 63 |
+
# }
|
| 64 |
+
# submission_response = requests.post("https://agents-course-unit4-scoring.hf.space/submit", json=payload)
|
| 65 |
+
# print(f"Submission Status: {submission_response.status_code}")
|
| 66 |
+
# print(f"Submission Response: {submission_response.json()}")
|
| 67 |
+
# return state
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def submit_answer_node(state: AgentState):
|
| 73 |
+
"""Node 4: Submit the final answer to the scoring API."""
|
| 74 |
+
|
| 75 |
+
payload = {
|
| 76 |
+
"username": "your_hf_username", # 👈 replace with your Hugging Face username
|
| 77 |
+
"agent_code": "https://huggingface.co/spaces/your-username/your-space-name/tree/main", # 👈 replace with your Space link
|
| 78 |
+
"answers": [
|
| 79 |
+
{
|
| 80 |
+
"task_id": state.get("task_id"), # use task_id from initial fetch
|
| 81 |
+
"submitted_answer": state["final_answer"]
|
| 82 |
+
}
|
| 83 |
+
]
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
submission_response = requests.post(
|
| 87 |
+
"https://agents-course-unit4-scoring.hf.space/submit",
|
| 88 |
+
json=payload
|
| 89 |
)
|
|
|
|
| 90 |
|
| 91 |
+
print(f"Submission Status: {submission_response.status_code}")
|
| 92 |
+
print(f"Submission Response: {submission_response.json()}")
|
| 93 |
+
|
| 94 |
+
return state
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
# 4. Build the Graph
|
| 98 |
+
workflow = StateGraph(AgentState)
|
| 99 |
+
|
| 100 |
+
# Add nodes
|
| 101 |
+
workflow.add_node("fetch_question", fetch_question_node)
|
| 102 |
+
workflow.add_node("download_file", download_file_node)
|
| 103 |
+
workflow.add_node("reason", reason_and_answer_node)
|
| 104 |
+
workflow.add_node("submit", submit_answer_node)
|
| 105 |
|
| 106 |
+
# Define the graph flow
|
| 107 |
+
workflow.set_entry_point("fetch_question")
|
| 108 |
+
workflow.add_edge("fetch_question", "download_file")
|
| 109 |
+
workflow.add_edge("download_file", "reason")
|
| 110 |
+
workflow.add_edge("reason", "submit")
|
| 111 |
+
workflow.add_edge("submit", END)
|
| 112 |
|
| 113 |
+
# Compile the graph
|
| 114 |
+
app = workflow.compile()
|