imcasnehal commited on
Commit
ab94110
·
verified ·
1 Parent(s): ecf3310

added corrected code

Browse files
Files changed (1) hide show
  1. app.py +103 -42
app.py CHANGED
@@ -1,53 +1,114 @@
1
- # app.py
2
- import gradio as gr
 
3
  import requests
 
4
 
5
- from transformers import HfAgent
 
 
6
 
7
- agent = HfAgent("https://api-inference.huggingface.co/models/bigcode/starcoder")
 
 
 
 
 
8
 
9
- API_BASE = "https://gaia-course-api.huggingface.co"
10
 
11
- def get_questions():
12
- return requests.get(f"{API_BASE}/questions").json()
 
 
 
 
 
 
 
 
13
 
14
- def run_agent_on_all_questions():
15
- questions = get_questions()
16
- results = []
 
17
 
18
- for q in questions:
19
- task_id = q["id"]
20
- prompt = q["input"]
21
- print(f"Running on: {prompt}")
22
- try:
23
- answer = agent.run(prompt)
24
- print(f"Answer: {answer}")
25
- results.append({"task_id": task_id, "submitted_answer": answer.strip()})
26
- except Exception as e:
27
- print("Error:", e)
28
- results.append({"task_id": task_id, "submitted_answer": "ERROR"})
29
-
30
- return results
31
-
32
- def submit_answers(username, code_link):
33
- answers = run_agent_on_all_questions()
34
- response = requests.post(
35
- f"{API_BASE}/submit",
36
- json={
37
- "username": username,
38
- "agent_code": code_link,
39
- "answers": answers
40
- },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
  )
42
- return response.json()
43
 
44
- with gr.Blocks() as demo:
45
- with gr.Row():
46
- username = gr.Textbox(label="Hugging Face Username")
47
- code_link = gr.Textbox(label="Link to Your Space Code (tree/main)")
48
- submit_btn = gr.Button("Submit Answers")
49
- output = gr.Textbox(label="Response")
 
 
 
 
 
 
 
 
50
 
51
- submit_btn.click(fn=submit_answers, inputs=[username, code_link], outputs=output)
 
 
 
 
 
52
 
53
- demo.launch()
 
 
1
+ # pip install langgraph langchain-google-genai langchain-groq
2
+ from langgraph.graph import StateGraph, END
3
+ from typing import TypedDict
4
  import requests
5
+ from langchain_google_genai import ChatGoogleGenerativeAI # or from langchain_groq import ChatGroQ
6
 
7
+ # 1. Define your model (Choose one)
8
+ llm = ChatGoogleGenerativeAI(model="gemini-1.5-flash", google_api_key="AIzaSyBkAb7nRruE-Sn-Ff9bMWhiVBvY751pqOk")
9
+ # llm = ChatGroQ(model="mixtral-8x7b-32768", groq_api_key="your_key") # Fast & cheap for experimentation
10
 
11
+ # 2. Define your agent's state. What information does it need to carry between steps?
12
+ class AgentState(TypedDict):
13
+ question: str
14
+ task_id: str | None # For fetching files
15
+ agent_response: str
16
+ final_answer: str
17
 
18
+ # 3. Define the nodes (functions) of your graph
19
 
20
+ def fetch_question_node(state: AgentState):
21
+ """Node 1: Calls the /random-question API to get a problem."""
22
+ response = requests.get("https://agents-course-unit4-scoring.hf.space/random-question")
23
+ response.raise_for_status()
24
+ data = response.json()
25
+ print("this is a data",data)
26
+ return {
27
+ "question": data["question"],
28
+ "task_id": data.get("task_id") # If the question requires a file, the API might provide an ID
29
+ }
30
 
31
+ def download_file_node(state: AgentState):
32
+ """Node 2: If a task_id is present, download the related file."""
33
+ if not state['task_id']:
34
+ return {"agent_response": "No file to download."}
35
 
36
+ file_url = f"https://agents-course-unit4-scoring.hf.space/files/{state['task_id']}"
37
+ file_response = requests.get(file_url)
38
+ # Process the file (e.g., if it's a text file, read it; if it's an image, use a vision model)
39
+ file_content = file_response.text
40
+ enhanced_query = f"Question: {state['question']}\n\nUse the following file content to answer:\n{file_content}"
41
+ return {"agent_response": enhanced_query}
42
+
43
+ def reason_and_answer_node(state: AgentState):
44
+ """Node 3: The core reasoning node. Use the LLM to solve the problem."""
45
+ # Construct a powerful prompt to encourage step-by-step thinking internally,
46
+ # but demand a final, clean answer.
47
+ system_prompt = """You are a highly intelligent AI agent solving a question.
48
+ Think carefully about the problem. Your final output must be ONLY the final answer, with no additional text, explanations, or formatting.
49
+ """
50
+
51
+ prompt = f"{system_prompt}\n\nQuestion: {state['agent_response']}"
52
+ response = llm.invoke(prompt)
53
+
54
+ # Here you might need to parse the response to extract *only* the final answer.
55
+ # For a well-instructed model, `response.content` should be just the answer.
56
+ return {"final_answer": response.content.strip()}
57
+
58
+ # def submit_answer_node(state: AgentState):
59
+ # """Node 4: Submit the final answer to the scoring API."""
60
+ # payload = {
61
+ # "question_id": state.get("question_id"), # You might need to get this from the initial fetch
62
+ # "answer": state['final_answer']
63
+ # }
64
+ # submission_response = requests.post("https://agents-course-unit4-scoring.hf.space/submit", json=payload)
65
+ # print(f"Submission Status: {submission_response.status_code}")
66
+ # print(f"Submission Response: {submission_response.json()}")
67
+ # return state
68
+
69
+
70
+
71
+
72
+ def submit_answer_node(state: AgentState):
73
+ """Node 4: Submit the final answer to the scoring API."""
74
+
75
+ payload = {
76
+ "username": "your_hf_username", # 👈 replace with your Hugging Face username
77
+ "agent_code": "https://huggingface.co/spaces/your-username/your-space-name/tree/main", # 👈 replace with your Space link
78
+ "answers": [
79
+ {
80
+ "task_id": state.get("task_id"), # use task_id from initial fetch
81
+ "submitted_answer": state["final_answer"]
82
+ }
83
+ ]
84
+ }
85
+
86
+ submission_response = requests.post(
87
+ "https://agents-course-unit4-scoring.hf.space/submit",
88
+ json=payload
89
  )
 
90
 
91
+ print(f"Submission Status: {submission_response.status_code}")
92
+ print(f"Submission Response: {submission_response.json()}")
93
+
94
+ return state
95
+
96
+
97
+ # 4. Build the Graph
98
+ workflow = StateGraph(AgentState)
99
+
100
+ # Add nodes
101
+ workflow.add_node("fetch_question", fetch_question_node)
102
+ workflow.add_node("download_file", download_file_node)
103
+ workflow.add_node("reason", reason_and_answer_node)
104
+ workflow.add_node("submit", submit_answer_node)
105
 
106
+ # Define the graph flow
107
+ workflow.set_entry_point("fetch_question")
108
+ workflow.add_edge("fetch_question", "download_file")
109
+ workflow.add_edge("download_file", "reason")
110
+ workflow.add_edge("reason", "submit")
111
+ workflow.add_edge("submit", END)
112
 
113
+ # Compile the graph
114
+ app = workflow.compile()