blazingbunny commited on
Commit
df1a0e5
·
verified ·
1 Parent(s): bbe58ed

Upload 6 files

Browse files
Files changed (6) hide show
  1. .gitattributes +35 -35
  2. .gitignore +5 -0
  3. README.md +15 -12
  4. agent.py +96 -0
  5. app.py +226 -0
  6. requirements.txt +11 -0
.gitattributes CHANGED
@@ -1,35 +1,35 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Environment variables
2
+ .env
3
+
4
+ # Python cache
5
+ __pycache__/
README.md CHANGED
@@ -1,12 +1,15 @@
1
- ---
2
- title: Agent Test
3
- emoji: 🐨
4
- colorFrom: gray
5
- colorTo: gray
6
- sdk: gradio
7
- sdk_version: 6.5.1
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
1
+ ---
2
+ title: Template Final Assignment
3
+ emoji: 🕵🏻‍♂️
4
+ colorFrom: indigo
5
+ colorTo: indigo
6
+ sdk: gradio
7
+ sdk_version: 5.25.2
8
+ app_file: app.py
9
+ pinned: false
10
+ hf_oauth: true
11
+ # optional, default duration is 8 hours/480 minutes. Max duration is 30 days/43200 minutes.
12
+ hf_oauth_expiration_minutes: 480
13
+ ---
14
+
15
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
agent.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import TypedDict, Annotated, List
2
+ import operator
3
+ import os
4
+ from langchain_google_genai import ChatGoogleGenerativeAI
5
+ from langchain_core.messages import BaseMessage, HumanMessage
6
+ from langgraph.graph import StateGraph, END, START
7
+ from langgraph.prebuilt import ToolNode
8
+ from langchain_tavily import TavilySearch
9
+ import google.auth
10
+ from dotenv import load_dotenv
11
+
12
+ load_dotenv()
13
+
14
+
15
+ # Set up Google credentials
16
+ try:
17
+ _, project_id = google.auth.default()
18
+ os.environ["GOOGLE_CLOUD_PROJECT"] = project_id
19
+ os.environ["GOOGLE_CLOUD_LOCATION"] = "global"
20
+ os.environ["GOOGLE_GENAI_USE_VERTEXAI"] = "True"
21
+ except google.auth.exceptions.DefaultCredentialsError:
22
+ print("Google Cloud credentials not found. Please configure your credentials.")
23
+ # You might want to fall back to an API key or raise an exception here
24
+ # For this example, we'll proceed, but it will likely fail if not configured
25
+ pass
26
+
27
+
28
+ # 1. Define the state
29
+ class AgentState(TypedDict):
30
+ messages: Annotated[List[BaseMessage], operator.add]
31
+
32
+ # 2. Define the tools
33
+ tools = [TavilySearch(max_results=1)]
34
+ tool_node = ToolNode(tools)
35
+
36
+ # 3. Define the model
37
+ LLM = "gemini-1.5-flash"
38
+ model = ChatGoogleGenerativeAI(model=LLM, temperature=0)
39
+ model = model.bind_tools(tools)
40
+
41
+ # 4. Define the agent node
42
+ def should_continue(state):
43
+ messages = state['messages']
44
+ last_message = messages[-1]
45
+ # If there are no tool calls, then we finish
46
+ if not last_message.tool_calls:
47
+ return "end"
48
+ # Otherwise if there are tool calls, we continue
49
+ else:
50
+ return "continue"
51
+
52
+ def call_model(state):
53
+ messages = state['messages']
54
+ response = model.invoke(messages)
55
+ # We return a list, because this will get added to the existing list
56
+ return {"messages": [response]}
57
+
58
+ # 5. Create the graph
59
+ workflow = StateGraph(AgentState)
60
+
61
+ # Define the two nodes we will cycle between
62
+ workflow.add_node("agent", call_model)
63
+ workflow.add_node("action", tool_node)
64
+
65
+ # Set the entrypoint as `agent`
66
+ # This means that this node is the first one called
67
+ workflow.add_edge(START, "agent")
68
+
69
+ # We now add a conditional edge
70
+ workflow.add_conditional_edges(
71
+ "agent",
72
+ should_continue,
73
+ {
74
+ "continue": "action",
75
+ "end": END,
76
+ },
77
+ )
78
+
79
+ # We now add a normal edge from `tools` to `agent`.
80
+ # This means that after `tools` is called, `agent` node is called next.
81
+ workflow.add_edge("action", "agent")
82
+
83
+ # Finally, we compile it!
84
+ # This compiles it into a LangChain Runnable,
85
+ # meaning you can use it as you would any other runnable
86
+ app = workflow.compile()
87
+
88
+
89
+ class LangGraphAgent:
90
+ def __init__(self):
91
+ self.app = app
92
+
93
+ def __call__(self, question: str) -> str:
94
+ inputs = {"messages": [HumanMessage(content=question)]}
95
+ final_state = self.app.invoke(inputs)
96
+ return final_state['messages'][-1].content
app.py ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ import requests
4
+ import pandas as pd
5
+ from dotenv import load_dotenv
6
+ from agent import LangGraphAgent
7
+
8
+ load_dotenv()
9
+
10
+ # (Keep Constants as is)
11
+ # --- Constants ---
12
+ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
13
+
14
+ def run_and_submit_all( profile: gr.OAuthProfile | None):
15
+ """
16
+ Fetches all questions, runs the SimpleAgent on them, submits all answers,
17
+ and displays the results.
18
+ """
19
+ # --- Determine HF Space Runtime URL and Repo URL ---
20
+ space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
21
+
22
+ if profile:
23
+ username= f"{profile.username}"
24
+ print(f"User logged in: {username}")
25
+ else:
26
+ print("User not logged in.")
27
+ return "Please Login to Hugging Face with the button.", None
28
+
29
+ api_url = DEFAULT_API_URL
30
+ questions_url = f"{api_url}/questions"
31
+ submit_url = f"{api_url}/submit"
32
+
33
+ # 1. Instantiate Agent ( modify this part to create your agent)
34
+ try:
35
+ agent = LangGraphAgent()
36
+ except Exception as e:
37
+ print(f"Error instantiating agent: {e}")
38
+ return f"Error initializing agent: {e}", None
39
+ # In the case of an app running as a hugging Face space, this link points toward your codebase ( usefull for others so please keep it public)
40
+ agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
41
+ print(agent_code)
42
+
43
+ # 2. Fetch Questions
44
+ print(f"Fetching questions from: {questions_url}")
45
+ response = None # Initialize response to None
46
+ try:
47
+ response = requests.get(questions_url, timeout=15)
48
+ response.raise_for_status()
49
+ questions_data = response.json()
50
+ if not questions_data:
51
+ print("Fetched questions list is empty.")
52
+ return "Fetched questions list is empty or invalid format.", None
53
+ print(f"Fetched {len(questions_data)} questions.")
54
+ except requests.exceptions.RequestException as e:
55
+ print(f"Error fetching questions: {e}")
56
+ # Try to get more specific error information
57
+ if isinstance(e, requests.exceptions.ConnectionError):
58
+ return "Error fetching questions: Connection Error. Please check the API URL and your network connection.", None
59
+ if isinstance(e, requests.exceptions.Timeout):
60
+ return "Error fetching questions: Request timed out.", None
61
+ if response:
62
+ try:
63
+ error_json = response.json()
64
+ error_detail = error_json.get('detail', response.text)
65
+ return f"Error fetching questions: {e} - {error_detail}", None
66
+ except requests.exceptions.JSONDecodeError:
67
+ return f"Error fetching questions: {e} - Could not decode JSON from response: {response.text[:500]}", None
68
+ return f"Error fetching questions: {e}", None
69
+ except Exception as e:
70
+ print(f"An unexpected error occurred fetching questions: {e}")
71
+ return f"An unexpected error occurred fetching questions: {e}", None
72
+
73
+ # 3. Run your Agent
74
+ results_log = []
75
+ answers_payload = []
76
+ print(f"Running agent on {len(questions_data)} questions...")
77
+ for item in questions_data:
78
+ task_id = item.get("task_id")
79
+ question_text = item.get("question")
80
+ if not task_id or question_text is None:
81
+ print(f"Skipping item with missing task_id or question: {item}")
82
+ continue
83
+ try:
84
+ submitted_answer = agent(question_text)
85
+ answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer.strip()})
86
+ results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
87
+ except Exception as e:
88
+ print(f"Error running agent on task {task_id}: {e}")
89
+ results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
90
+
91
+ if not answers_payload:
92
+ print("Agent did not produce any answers to submit.")
93
+ return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
94
+
95
+ # 4. Prepare Submission
96
+ submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
97
+ status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
98
+ print(status_update)
99
+
100
+ # 5. Submit
101
+ print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
102
+ try:
103
+ response = requests.post(submit_url, json=submission_data, timeout=60)
104
+ response.raise_for_status()
105
+ result_data = response.json()
106
+ final_status = (
107
+ f"Submission Successful!\n"
108
+ f"User: {result_data.get('username')}\n"
109
+ f"Overall Score: {result_data.get('score', 'N/A')}% "
110
+ f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
111
+ f"Message: {result_data.get('message', 'No message received.')}"
112
+ )
113
+ print("Submission successful.")
114
+ results_df = pd.DataFrame(results_log)
115
+ return final_status, results_df
116
+ except requests.exceptions.HTTPError as e:
117
+ error_detail = f"Server responded with status {e.response.status_code}."
118
+ try:
119
+ error_json = e.response.json()
120
+ error_detail += f" Detail: {error_json.get('detail', e.response.text)}"
121
+ except requests.exceptions.JSONDecodeError:
122
+ error_detail += f" Response: {e.response.text[:500]}"
123
+ status_message = f"Submission Failed: {error_detail}"
124
+ print(status_message)
125
+ results_df = pd.DataFrame(results_log)
126
+ return status_message, results_df
127
+ except requests.exceptions.Timeout:
128
+ status_message = "Submission Failed: The request timed out."
129
+ print(status_message)
130
+ results_df = pd.DataFrame(results_log)
131
+ return status_message, results_df
132
+ except requests.exceptions.RequestException as e:
133
+ status_message = f"Submission Failed: Network error - {e}"
134
+ print(status_message)
135
+ results_df = pd.DataFrame(results_log)
136
+ return status_message, results_df
137
+ except Exception as e:
138
+ status_message = f"An unexpected error occurred during submission: {e}"
139
+ print(status_message)
140
+ results_df = pd.DataFrame(results_log)
141
+ return status_message, results_df
142
+
143
+
144
+ def test_agent(question: str):
145
+ """
146
+ Runs the agent on a single question and returns the answer.
147
+ """
148
+ if not question:
149
+ return "Please enter a question."
150
+ try:
151
+ agent = LangGraphAgent()
152
+ answer = agent(question)
153
+ return answer
154
+ except Exception as e:
155
+ return f"Error running agent: {e}"
156
+
157
+ # --- Build Gradio Interface using Blocks ---
158
+ with gr.Blocks() as demo:
159
+ gr.Markdown("# Basic Agent Evaluation Runner")
160
+ gr.Markdown(
161
+ """
162
+ **Instructions:**
163
+
164
+ 1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ...
165
+ 2. Log in to your Hugging Face account using the button below. This uses your HF username for submission.
166
+ 3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
167
+
168
+ ---
169
+ **Disclaimers:**
170
+ Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions).
171
+ This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a seperate action or even to answer the questions in async.
172
+ """
173
+ )
174
+
175
+ login_button = gr.LoginButton()
176
+
177
+ run_button = gr.Button("Run Evaluation & Submit All Answers")
178
+
179
+ status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
180
+ # Removed max_rows=10 from DataFrame constructor
181
+ results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
182
+
183
+ run_button.click(
184
+ fn=run_and_submit_all,
185
+ inputs=[login_button],
186
+ outputs=[status_output, results_table]
187
+ )
188
+
189
+
190
+ gr.Markdown("---")
191
+ gr.Markdown("## Test the Agent")
192
+ with gr.Row():
193
+ question_textbox = gr.Textbox(label="Enter your question")
194
+ answer_textbox = gr.Textbox(label="Agent's Answer")
195
+ test_button = gr.Button("Test Agent")
196
+
197
+ test_button.click(
198
+ fn=test_agent,
199
+ inputs=[question_textbox],
200
+ outputs=[answer_textbox]
201
+ )
202
+
203
+ if __name__ == "__main__":
204
+ print("\n" + "-"*30 + " App Starting " + "-"*30)
205
+ # Check for SPACE_HOST and SPACE_ID at startup for information
206
+ space_host_startup = os.getenv("SPACE_HOST")
207
+ space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup
208
+
209
+ if space_host_startup:
210
+ print(f"✅ SPACE_HOST found: {space_host_startup}")
211
+ print(f" Runtime URL should be: https://{space_host_startup}.hf.space")
212
+ else:
213
+ print("SPACE_HOST environment variable not found (running locally?).")
214
+
215
+ if space_id_startup: # Print repo URLs if SPACE_ID is found
216
+ print(f"✅ SPACE_ID found: {space_id_startup}")
217
+ print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
218
+ print(f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main")
219
+ else:
220
+ print("SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.")
221
+
222
+ print("-"*(60 + len(" App Starting ")) + "\n")
223
+
224
+ print("Launching Gradio Interface for Basic Agent Evaluation...")
225
+ demo.launch(debug=True, share=False)
226
+
requirements.txt ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ gradio[oauth]
2
+ requests
3
+ langchain
4
+ langgraph
5
+ python-dotenv
6
+ langchain-community
7
+ tavily-python
8
+ langchain-google-genai
9
+ google-auth
10
+ langchain-tavily
11
+ google-cloud-aiplatform