mandysss10 commited on
Commit
7408c8d
·
verified ·
1 Parent(s): cda9201

Upload 6 files

Browse files
Files changed (6) hide show
  1. .env +0 -0
  2. app.py +0 -0
  3. embbeding.ipynb +156 -0
  4. metadata.jsonl +0 -0
  5. requirements.txt +18 -0
  6. tools.py +215 -0
.env ADDED
File without changes
app.py ADDED
File without changes
embbeding.ipynb ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 4,
6
+ "id": "09226255",
7
+ "metadata": {},
8
+ "outputs": [],
9
+ "source": [
10
+ "import json\n",
11
+ "\n",
12
+ "documents = []\n",
13
+ "with open(\"./metadata.jsonl\", 'r') as f:\n",
14
+ " \n",
15
+ " \n",
16
+ " for doc in f:\n",
17
+ " documents.append(json.loads(doc))"
18
+ ]
19
+ },
20
+ {
21
+ "cell_type": "code",
22
+ "execution_count": 5,
23
+ "id": "5f5389a4",
24
+ "metadata": {},
25
+ "outputs": [
26
+ {
27
+ "data": {
28
+ "text/plain": [
29
+ "{'task_id': 'c61d22de-5f6c-4958-a7f6-5e9707bd3466',\n",
30
+ " 'Question': 'A paper about AI regulation that was originally submitted to arXiv.org in June 2022 shows a figure with three axes, where each axis has a label word at both ends. Which of these words is used to describe a type of society in a Physics and Society article submitted to arXiv.org on August 11, 2016?',\n",
31
+ " 'Level': 2,\n",
32
+ " 'Final answer': 'egalitarian',\n",
33
+ " 'file_name': '',\n",
34
+ " 'Annotator Metadata': {'Steps': '1. Go to arxiv.org and navigate to the Advanced Search page.\\n2. Enter \"AI regulation\" in the search box and select \"All fields\" from the dropdown.\\n3. Enter 2022-06-01 and 2022-07-01 into the date inputs, select \"Submission date (original)\", and submit the search.\\n4. Go through the search results to find the article that has a figure with three axes and labels on each end of the axes, titled \"Fairness in Agreement With European Values: An Interdisciplinary Perspective on AI Regulation\".\\n5. Note the six words used as labels: deontological, egalitarian, localized, standardized, utilitarian, and consequential.\\n6. Go back to arxiv.org\\n7. Find \"Physics and Society\" and go to the page for the \"Physics and Society\" category.\\n8. Note that the tag for this category is \"physics.soc-ph\".\\n9. Go to the Advanced Search page.\\n10. Enter \"physics.soc-ph\" in the search box and select \"All fields\" from the dropdown.\\n11. Enter 2016-08-11 and 2016-08-12 into the date inputs, select \"Submission date (original)\", and submit the search.\\n12. Search for instances of the six words in the results to find the paper titled \"Phase transition from egalitarian to hierarchical societies driven by competition between cognitive and social constraints\", indicating that \"egalitarian\" is the correct answer.',\n",
35
+ " 'Number of steps': '12',\n",
36
+ " 'How long did this take?': '8 minutes',\n",
37
+ " 'Tools': '1. Web browser\\n2. Image recognition tools (to identify and parse a figure with three axes)',\n",
38
+ " 'Number of tools': '2'}}"
39
+ ]
40
+ },
41
+ "execution_count": 5,
42
+ "metadata": {},
43
+ "output_type": "execute_result"
44
+ }
45
+ ],
46
+ "source": [
47
+ "documents[0]"
48
+ ]
49
+ },
50
+ {
51
+ "cell_type": "code",
52
+ "execution_count": null,
53
+ "id": "ff72589b",
54
+ "metadata": {},
55
+ "outputs": [],
56
+ "source": [
57
+ "def filt_level1(docs):\n",
58
+ " \n",
59
+ " firstlevel_docs = [ doc for doc in docs if doc[\"Level\"] == 1\n",
60
+ " \n",
61
+ " ]\n",
62
+ " return firstlevel_docs"
63
+ ]
64
+ },
65
+ {
66
+ "cell_type": "code",
67
+ "execution_count": null,
68
+ "id": "6f75e308",
69
+ "metadata": {},
70
+ "outputs": [
71
+ {
72
+ "ename": "ImportError",
73
+ "evalue": "cannot import name 'HuggingFaceEmbeddings' from 'langchain.embeddings' (c:\\Users\\ivanml\\AppData\\Local\\anaconda3\\envs\\venv_agent\\lib\\site-packages\\langchain\\embeddings\\__init__.py)",
74
+ "output_type": "error",
75
+ "traceback": [
76
+ "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
77
+ "\u001b[1;31mImportError\u001b[0m Traceback (most recent call last)",
78
+ "Cell \u001b[1;32mIn[2], line 3\u001b[0m\n\u001b[0;32m 1\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;21;01mos\u001b[39;00m\n\u001b[0;32m 2\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;21;01mdotenv\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;28;01mimport\u001b[39;00m load_dotenv\n\u001b[1;32m----> 3\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;21;01mlangchain\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01membeddings\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;28;01mimport\u001b[39;00m HuggingFaceEmbeddings\n\u001b[0;32m 4\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;21;01mlangchain_community\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mvectorstores\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;28;01mimport\u001b[39;00m SupabaseVectorStore\n\u001b[0;32m 5\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;21;01msupabase\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mclient\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;28;01mimport\u001b[39;00m create_client\n",
79
+ "\u001b[1;31mImportError\u001b[0m: cannot import name 'HuggingFaceEmbeddings' from 'langchain.embeddings' (c:\\Users\\ivanml\\AppData\\Local\\anaconda3\\envs\\venv_agent\\lib\\site-packages\\langchain\\embeddings\\__init__.py)"
80
+ ]
81
+ }
82
+ ],
83
+ "source": [
84
+ "import os\n",
85
+ "from dotenv import load_dotenv\n",
86
+ "from langchain_huggingface import HuggingFaceEmbeddings\n",
87
+ "from langchain_community.vectorstores import SupabaseVectorStore\n",
88
+ "from supabase.client import create_client\n",
89
+ "\n",
90
+ "load_dotenv()\n",
91
+ "\n",
92
+ "# Leer credenciales desde variables de entorno\n",
93
+ "SUPABASE_URL = os.getenv(\"SUPABASE_URL\")\n",
94
+ "SUPABASE_KEY = os.getenv(\"SUPABASE_KEY\")\n",
95
+ "\n",
96
+ "# Inicializar cliente Supabase\n",
97
+ "supabase = create_client(SUPABASE_URL, SUPABASE_KEY)\n",
98
+ "\n",
99
+ "# Inicializar embeddings\n",
100
+ "embeddings = HuggingFaceEmbeddings(model_name=\"sentence-transformers/all-mpnet-base-v2\")\n",
101
+ "\n",
102
+ "# Preparar registros para insertar\n",
103
+ "table_records = []\n",
104
+ "for doc in filtered_docs:\n",
105
+ " content = f\"Question: {doc['Question']}\\nFinal answer: {doc['Final answer']}\"\n",
106
+ " record = {\n",
107
+ " \"content\": content,\n",
108
+ " \"embedding\": embeddings.embed_query(content)\n",
109
+ " }\n",
110
+ " table_records.append(record)\n",
111
+ "\n",
112
+ "# Insertar registros en Supabase\n",
113
+ "response = supabase.table(\"documents\").insert(table_records).execute()\n",
114
+ "\n",
115
+ "# Inicializar vector store\n",
116
+ "vector_store = SupabaseVectorStore(\n",
117
+ " embedding=embeddings,\n",
118
+ " client=supabase,\n",
119
+ " table_name=\"documents\",\n",
120
+ " query_name=\"match_documents\"\n",
121
+ ")\n",
122
+ "\n",
123
+ "# Buscar documentos similares\n",
124
+ "query_text = (\"If Eliud Kipchoge could maintain his record-making marathon pace indefinitely, \"\n",
125
+ " \"how many thousand hours would it take him to run the distance between the Earth \"\n",
126
+ " \"and the Moon at its closest approach?\")\n",
127
+ "results = vector_store.similarity_search(query=query_text, k=1)\n",
128
+ "\n",
129
+ "# Obtener respuesta final\n",
130
+ "final_answer = results[0].page_content.split(\"Final answer:\")[-1].strip()\n",
131
+ "print(final_answer)\n"
132
+ ]
133
+ }
134
+ ],
135
+ "metadata": {
136
+ "kernelspec": {
137
+ "display_name": "venv_agent",
138
+ "language": "python",
139
+ "name": "python3"
140
+ },
141
+ "language_info": {
142
+ "codemirror_mode": {
143
+ "name": "ipython",
144
+ "version": 3
145
+ },
146
+ "file_extension": ".py",
147
+ "mimetype": "text/x-python",
148
+ "name": "python",
149
+ "nbconvert_exporter": "python",
150
+ "pygments_lexer": "ipython3",
151
+ "version": "3.10.19"
152
+ }
153
+ },
154
+ "nbformat": 4,
155
+ "nbformat_minor": 5
156
+ }
metadata.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
requirements.txt ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ gradio
2
+ requests
3
+ pandas
4
+ numpy
5
+ langgraph
6
+ openai
7
+ tqdm
8
+ pydantic
9
+ langchain
10
+ langchain-openai
11
+ serpapi
12
+ python-dotenv
13
+ google-search-results
14
+ langchain-huggingface
15
+ langchain-community
16
+ supabase
17
+ sentence-transformers
18
+ langchain-google-genai
tools.py ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ import requests
4
+ import inspect
5
+ import pandas as pd
6
+ from langgraph.prebuilt import ToolNode, tools_condition
7
+ from langchain_core.messages import HumanMessage
8
+ from tools import assistant, AgentState, tools, retriever
9
+ from langgraph.graph import StateGraph, START
10
+ # (Keep Constants as is)
11
+ # --- Constants ---
12
+ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
13
+
14
+ # --- Basic Agent Definition ---
15
+ # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
16
+ class BasicAgent:
17
+ def __init__(self):
18
+ builder = StateGraph(AgentState)
19
+ builder.add_node("retriever", retriever)
20
+ builder.add_node("assistant", assistant)
21
+ builder.add_node("tools", ToolNode(tools))
22
+ builder.add_edge(START, "retriever")
23
+ builder.add_edge("retriever", "assistant")
24
+ builder.add_conditional_edges("assistant", tools_condition)
25
+ builder.add_edge("tools", "assistant")
26
+
27
+ self.agent = builder.compile()
28
+
29
+ print("BasicAgent initialized.")
30
+
31
+ def __call__(self, question: str) -> str:
32
+
33
+ print(f"Agent received question (first 50 chars): {question[:50]}...")
34
+
35
+ human_message = [HumanMessage(content = question)]
36
+ fixed_answer = self.agent.invoke({"messages": human_message})
37
+
38
+ print(f"Agent returning fixed answer: {fixed_answer}")
39
+ answer = fixed_answer["messages"][-1].content
40
+ return answer.split("FINAL ANSWER: ")[-1]
41
+
42
+
43
+ def run_and_submit_all( profile: gr.OAuthProfile | None):
44
+ """
45
+ Fetches all questions, runs the BasicAgent on them, submits all answers,
46
+ and displays the results.
47
+ """
48
+ # --- Determine HF Space Runtime URL and Repo URL ---
49
+ space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
50
+
51
+ if profile:
52
+ username= f"{profile.username}"
53
+ print(f"User logged in: {username}")
54
+ else:
55
+ print("User not logged in.")
56
+ return "Please Login to Hugging Face with the button.", None
57
+
58
+ api_url = DEFAULT_API_URL
59
+ questions_url = f"{api_url}/questions"
60
+ submit_url = f"{api_url}/submit"
61
+
62
+ # 1. Instantiate Agent ( modify this part to create your agent)
63
+ try:
64
+ agent = BasicAgent()
65
+ except Exception as e:
66
+ print(f"Error instantiating agent: {e}")
67
+ return f"Error initializing agent: {e}", None
68
+ # In the case of an app running as a hugging Face space, this link points toward your codebase ( usefull for others so please keep it public)
69
+ agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
70
+ print(agent_code)
71
+
72
+ # 2. Fetch Questions
73
+ print(f"Fetching questions from: {questions_url}")
74
+ try:
75
+ response = requests.get(questions_url, timeout=15)
76
+ response.raise_for_status()
77
+ questions_data = response.json()
78
+ if not questions_data:
79
+ print("Fetched questions list is empty.")
80
+ return "Fetched questions list is empty or invalid format.", None
81
+ print(f"Fetched {len(questions_data)} questions.")
82
+ except requests.exceptions.RequestException as e:
83
+ print(f"Error fetching questions: {e}")
84
+ return f"Error fetching questions: {e}", None
85
+ except requests.exceptions.JSONDecodeError as e:
86
+ print(f"Error decoding JSON response from questions endpoint: {e}")
87
+ print(f"Response text: {response.text[:500]}")
88
+ return f"Error decoding server response for questions: {e}", None
89
+ except Exception as e:
90
+ print(f"An unexpected error occurred fetching questions: {e}")
91
+ return f"An unexpected error occurred fetching questions: {e}", None
92
+
93
+ # 3. Run your Agent
94
+ results_log = []
95
+ answers_payload = []
96
+ print(f"Running agent on {len(questions_data)} questions...")
97
+ for item in questions_data:
98
+ task_id = item.get("task_id")
99
+ question_text = item.get("question")
100
+ if not task_id or question_text is None:
101
+ print(f"Skipping item with missing task_id or question: {item}")
102
+ continue
103
+ try:
104
+ submitted_answer = agent(question_text)
105
+ answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
106
+ results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
107
+ except Exception as e:
108
+ print(f"Error running agent on task {task_id}: {e}")
109
+ results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
110
+
111
+ if not answers_payload:
112
+ print("Agent did not produce any answers to submit.")
113
+ return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
114
+
115
+ # 4. Prepare Submission
116
+ submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
117
+ status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
118
+ print(status_update)
119
+
120
+ # 5. Submit
121
+ print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
122
+ try:
123
+ response = requests.post(submit_url, json=submission_data, timeout=60)
124
+ response.raise_for_status()
125
+ result_data = response.json()
126
+ final_status = (
127
+ f"Submission Successful!\n"
128
+ f"User: {result_data.get('username')}\n"
129
+ f"Overall Score: {result_data.get('score', 'N/A')}% "
130
+ f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
131
+ f"Message: {result_data.get('message', 'No message received.')}"
132
+ )
133
+ print("Submission successful.")
134
+ results_df = pd.DataFrame(results_log)
135
+ return final_status, results_df
136
+ except requests.exceptions.HTTPError as e:
137
+ error_detail = f"Server responded with status {e.response.status_code}."
138
+ try:
139
+ error_json = e.response.json()
140
+ error_detail += f" Detail: {error_json.get('detail', e.response.text)}"
141
+ except requests.exceptions.JSONDecodeError:
142
+ error_detail += f" Response: {e.response.text[:500]}"
143
+ status_message = f"Submission Failed: {error_detail}"
144
+ print(status_message)
145
+ results_df = pd.DataFrame(results_log)
146
+ return status_message, results_df
147
+ except requests.exceptions.Timeout:
148
+ status_message = "Submission Failed: The request timed out."
149
+ print(status_message)
150
+ results_df = pd.DataFrame(results_log)
151
+ return status_message, results_df
152
+ except requests.exceptions.RequestException as e:
153
+ status_message = f"Submission Failed: Network error - {e}"
154
+ print(status_message)
155
+ results_df = pd.DataFrame(results_log)
156
+ return status_message, results_df
157
+ except Exception as e:
158
+ status_message = f"An unexpected error occurred during submission: {e}"
159
+ print(status_message)
160
+ results_df = pd.DataFrame(results_log)
161
+ return status_message, results_df
162
+
163
+
164
+ # --- Build Gradio Interface using Blocks ---
165
+ with gr.Blocks() as demo:
166
+ gr.Markdown("# Basic Agent Evaluation Runner")
167
+ gr.Markdown(
168
+ """
169
+ **Instructions:**
170
+ 1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ...
171
+ 2. Log in to your Hugging Face account using the button below. This uses your HF username for submission.
172
+ 3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
173
+ ---
174
+ **Disclaimers:**
175
+ Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions).
176
+ This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a seperate action or even to answer the questions in async.
177
+ """
178
+ )
179
+
180
+ gr.LoginButton()
181
+
182
+ run_button = gr.Button("Run Evaluation & Submit All Answers")
183
+
184
+ status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
185
+ # Removed max_rows=10 from DataFrame constructor
186
+ results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
187
+
188
+ run_button.click(
189
+ fn=run_and_submit_all,
190
+ outputs=[status_output, results_table]
191
+ )
192
+
193
+ if __name__ == "__main__":
194
+ print("\n" + "-"*30 + " App Starting " + "-"*30)
195
+ # Check for SPACE_HOST and SPACE_ID at startup for information
196
+ space_host_startup = os.getenv("SPACE_HOST")
197
+ space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup
198
+
199
+ if space_host_startup:
200
+ print(f"✅ SPACE_HOST found: {space_host_startup}")
201
+ print(f" Runtime URL should be: https://{space_host_startup}.hf.space")
202
+ else:
203
+ print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
204
+
205
+ if space_id_startup: # Print repo URLs if SPACE_ID is found
206
+ print(f"✅ SPACE_ID found: {space_id_startup}")
207
+ print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
208
+ print(f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main")
209
+ else:
210
+ print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.")
211
+
212
+ print("-"*(60 + len(" App Starting ")) + "\n")
213
+
214
+ print("Launching Gradio Interface for Basic Agent Evaluation...")
215
+ demo.launch(debug=True, share=False)