{ "cells": [ { "cell_type": "code", "execution_count": 1, "id": "a600d7fc", "metadata": { "id": "a600d7fc" }, "outputs": [], "source": [ "import json\n", "with open('metadata.jsonl', 'r') as f:\n", " json_list = list(f)\n", "\n", "json_QA = []\n", "for json_str in json_list:\n", " json_data = json.loads(json_str)\n", " json_QA.append(json_data)" ] }, { "cell_type": "code", "execution_count": 2, "id": "a2e6091a-9630-4f57-a980-3f7cad2126f4", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "165" ] }, "execution_count": 2, "metadata": {}, "output_type": "execute_result" } ], "source": [ "len(json_QA)" ] }, { "cell_type": "code", "execution_count": 3, "id": "aca0d8ad-d4f6-4749-a979-4b7e38843c64", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "{'task_id': 'c61d22de-5f6c-4958-a7f6-5e9707bd3466',\n", " 'Question': 'A paper about AI regulation that was originally submitted to arXiv.org in June 2022 shows a figure with three axes, where each axis has a label word at both ends. Which of these words is used to describe a type of society in a Physics and Society article submitted to arXiv.org on August 11, 2016?',\n", " 'Level': 2,\n", " 'Final answer': 'egalitarian',\n", " 'file_name': '',\n", " 'Annotator Metadata': {'Steps': '1. Go to arxiv.org and navigate to the Advanced Search page.\\n2. Enter \"AI regulation\" in the search box and select \"All fields\" from the dropdown.\\n3. Enter 2022-06-01 and 2022-07-01 into the date inputs, select \"Submission date (original)\", and submit the search.\\n4. Go through the search results to find the article that has a figure with three axes and labels on each end of the axes, titled \"Fairness in Agreement With European Values: An Interdisciplinary Perspective on AI Regulation\".\\n5. Note the six words used as labels: deontological, egalitarian, localized, standardized, utilitarian, and consequential.\\n6. Go back to arxiv.org\\n7. Find \"Physics and Society\" and go to the page for the \"Physics and Society\" category.\\n8. Note that the tag for this category is \"physics.soc-ph\".\\n9. Go to the Advanced Search page.\\n10. Enter \"physics.soc-ph\" in the search box and select \"All fields\" from the dropdown.\\n11. Enter 2016-08-11 and 2016-08-12 into the date inputs, select \"Submission date (original)\", and submit the search.\\n12. Search for instances of the six words in the results to find the paper titled \"Phase transition from egalitarian to hierarchical societies driven by competition between cognitive and social constraints\", indicating that \"egalitarian\" is the correct answer.',\n", " 'Number of steps': '12',\n", " 'How long did this take?': '8 minutes',\n", " 'Tools': '1. Web browser\\n2. Image recognition tools (to identify and parse a figure with three axes)',\n", " 'Number of tools': '2'}}" ] }, "execution_count": 3, "metadata": {}, "output_type": "execute_result" } ], "source": [ "json_QA[0]" ] }, { "cell_type": "code", "execution_count": 4, "id": "fa5d8eb8", "metadata": { "id": "fa5d8eb8", "outputId": "562cd6eb-7436-4563-cbbb-de2a38ae447e" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "==================================================\n", "Task ID: 65afbc8a-89ca-4ad5-8d62-355bb401f61d\n", "Question: You are given this Excel file as a map. You start on the START cell and move toward the END cell. You are allowed to move two cells per turn, and you may move up, down, left, or right. You may not move fewer than two cells, and you may not move backward. You must avoid moving onto any blue cells. On the eleventh turn, what is the 6-digit hex code (without prefix) of the color of the cell where you land after moving?\n", "Level: 1\n", "Final Answer: F478A7\n", "Annotator Metadata: \n", " ├── Steps: \n", " │ ├── 1. Opened Map.xlsx.\n", " │ ├── 2. Counted 11 turns of 2 spaces each (22 spaces) along the path of non-blue cells.\n", " │ ├── 3. Opened cell formatting for the cell.\n", " │ ├── 4. Clicked the \"Fill\" tab.\n", " │ ├── 5. Clicked \"More Colors...\"\n", " │ ├── 6. Noted the hex code of the color.\n", " ├── Number of steps: 6\n", " ├── How long did this take?: 5 minutes\n", " ├── Tools:\n", " │ ├── 1. Access to Excel files\n", " │ ├── 2. Color recognition\n", " │ ├── 3. Calculator (or ability to count)\n", " └── Number of tools: 3\n", "==================================================\n" ] } ], "source": [ "import random\n", "random_samples = random.sample(json_QA, 1)\n", "for sample in random_samples:\n", " print(\"=\" * 50)\n", " print(f\"Task ID: {sample['task_id']}\")\n", " print(f\"Question: {sample['Question']}\")\n", " print(f\"Level: {sample['Level']}\")\n", " print(f\"Final Answer: {sample['Final answer']}\")\n", " print(f\"Annotator Metadata: \")\n", " print(f\" ├── Steps: \")\n", " for step in sample['Annotator Metadata']['Steps'].split('\\n'):\n", " print(f\" │ ├── {step}\")\n", " print(f\" ├── Number of steps: {sample['Annotator Metadata']['Number of steps']}\")\n", " print(f\" ├── How long did this take?: {sample['Annotator Metadata']['How long did this take?']}\")\n", " print(f\" ├── Tools:\")\n", " for tool in sample['Annotator Metadata']['Tools'].split('\\n'):\n", " print(f\" │ ├── {tool}\")\n", " print(f\" └── Number of tools: {sample['Annotator Metadata']['Number of tools']}\")\n", "print(\"=\" * 50)" ] }, { "cell_type": "code", "execution_count": 5, "id": "05076516", "metadata": { "id": "05076516" }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "/Users/ron/Documents/github/agentcoursefinal/hf-agent/lib/python3.13/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", " from .autonotebook import tqdm as notebook_tqdm\n" ] } ], "source": [ "import os\n", "from dotenv import load_dotenv\n", "from langchain_huggingface import HuggingFaceEmbeddings\n", "from langchain_community.vectorstores import SupabaseVectorStore\n", "from supabase.client import Client, create_client\n", "\n", "\n", "load_dotenv()\n", "embeddings = HuggingFaceEmbeddings(model_name=\"sentence-transformers/all-mpnet-base-v2\") # dim=768\n", "\n", "supabase_url = os.environ.get(\"SUPABASE_URL\")\n", "supabase_key = os.environ.get(\"SUPABASE_SERVICE_KEY\")\n", "supabase: Client = create_client(supabase_url, supabase_key)" ] }, { "cell_type": "code", "execution_count": 9, "id": "aa1402e3", "metadata": { "id": "aa1402e3" }, "outputs": [], "source": [ "from langchain.schema import Document\n", "docs = []\n", "cnt = 0\n", "for sample in json_QA:\n", " content = f\"Question : {sample['Question']}\\n\\nFinal answer : {sample['Final answer']}\"\n", " doc = {\n", " \"id\" : cnt,\n", " \"content\" : content,\n", " \"metadata\" : {\n", " \"source\" : sample['task_id']\n", " },\n", " \"embedding\" : embeddings.embed_query(content),\n", " }\n", " docs.append(doc)\n", " cnt += 1\n", "\n", "# upload the documents to the vector database\n", "try:\n", " response = (\n", " supabase.table(\"documents\")\n", " .insert(docs)\n", " .execute()\n", " )\n", "except Exception as exception:\n", " print(\"Error inserting data into Supabase:\", exception)\n", "\n", "# # Save the documents (a list of dict) into a csv file, and manually upload it to Supabase\n", "# import pandas as pd\n", "# df = pd.DataFrame(docs)\n", "# df.to_csv('supabase_docs.csv',index=False)" ] }, { "cell_type": "code", "execution_count": 10, "id": "b2a197a4-c0a7-40ac-a773-c3a6239d696e", "metadata": {}, "outputs": [], "source": [ "import pandas as pd\n", "df = pd.DataFrame(docs)\n", "df.to_csv('supabase_docs_22.csv',index=False)" ] }, { "cell_type": "code", "execution_count": 11, "id": "dd698b87-fbc7-4d30-b78d-a52b289b7812", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "(165, 4)" ] }, "execution_count": 11, "metadata": {}, "output_type": "execute_result" } ], "source": [ "df.shape" ] }, { "cell_type": "code", "execution_count": 32, "id": "9aa7eb5e", "metadata": { "id": "9aa7eb5e" }, "outputs": [], "source": [ "# add items to vector database\n", "vector_store = SupabaseVectorStore(\n", " client=supabase,\n", " embedding= embeddings,\n", " table_name=\"documents\",\n", " query_name=\"match_documents_langchain_2\",\n", ")\n", "retriever = vector_store.as_retriever()" ] }, { "cell_type": "code", "execution_count": 33, "id": "9eecafd1", "metadata": { "id": "9eecafd1" }, "outputs": [], "source": [ "query = \"On June 6, 2023, an article by Carolyn Collins Petersen was published in Universe Today. This article mentions a team that produced a paper about their observations, linked at the bottom of the article. Find this paper. Under what NASA award number was the work performed by R. G. Arendt supported by?\"\n", "# matched_docs = vector_store.similarity_search(query, k=2)\n", "docs = retriever.invoke(query)" ] }, { "cell_type": "code", "execution_count": 34, "id": "658e0400-a611-4ce0-abd3-d0e3c3976d1b", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "1" ] }, "execution_count": 34, "metadata": {}, "output_type": "execute_result" } ], "source": [ "len(docs)" ] }, { "cell_type": "code", "execution_count": 35, "id": "ff917840", "metadata": { "id": "ff917840", "outputId": "b802cc5b-3d17-405c-d6ed-0c51cfd77285" }, "outputs": [ { "data": { "text/plain": [ "Document(metadata={'source': '840bfca7-4f7b-481a-8794-c560c340185d'}, page_content='Question : On June 6, 2023, an article by Carolyn Collins Petersen was published in Universe Today. This article mentions a team that produced a paper about their observations, linked at the bottom of the article. Find this paper. Under what NASA award number was the work performed by R. G. Arendt supported by?\\n\\nFinal answer : 80GSFC21M0002')" ] }, "execution_count": 35, "metadata": {}, "output_type": "execute_result" } ], "source": [ "docs[0]" ] }, { "cell_type": "code", "execution_count": 36, "id": "5004471b-5124-4a18-bb0f-4860a370977c", "metadata": {}, "outputs": [], "source": [ "matched_docs = vector_store.similarity_search(query, k=4)" ] }, { "cell_type": "code", "execution_count": 37, "id": "27faef37-95c5-4cf4-a359-e1c53557d631", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "1" ] }, "execution_count": 37, "metadata": {}, "output_type": "execute_result" } ], "source": [ "len(matched_docs)" ] }, { "cell_type": "code", "execution_count": 38, "id": "18c100a4-bde8-4084-a4d2-fd45f8414671", "metadata": {}, "outputs": [], "source": [ "query = \"When was a picture of St. Thomas Aquinas first added to the Wikipedia page on the Principle of double effect?\"" ] }, { "cell_type": "code", "execution_count": 39, "id": "67cfcf76-a890-4e37-a2fd-562153b21a35", "metadata": {}, "outputs": [], "source": [ "docs = retriever.invoke(query)" ] }, { "cell_type": "code", "execution_count": 40, "id": "209eb26e-5956-41f0-a599-38706831ed47", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "[Document(metadata={'source': 'd5141ca5-e7a0-469f-bf3e-e773507c86e2'}, page_content='Question : When was a picture of St. Thomas Aquinas first added to the Wikipedia page on the Principle of double effect? Answer using the format DD/MM/YYYY.\\n\\nFinal answer : 19/02/2009')]" ] }, "execution_count": 40, "metadata": {}, "output_type": "execute_result" } ], "source": [ "docs" ] }, { "cell_type": "code", "execution_count": 30, "id": "21770f59-c6ca-4df6-b29e-3d7442e758d9", "metadata": {}, "outputs": [], "source": [ "matched_docs = vector_store.similarity_search(query, k=4)" ] }, { "cell_type": "code", "execution_count": 31, "id": "565af38a-ccdc-4dcd-b6d6-035da1c51b12", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "[]" ] }, "execution_count": 31, "metadata": {}, "output_type": "execute_result" } ], "source": [ "matched_docs" ] }, { "cell_type": "code", "execution_count": 21, "id": "01c8f337", "metadata": { "id": "01c8f337", "outputId": "b2b05251-1ab1-4019-f272-129294f28708" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "List of tools used in all samples:\n", "Total number of tools used: 83\n", " ├── web browser: 107\n", " ├── image recognition tools (to identify and parse a figure with three axes): 1\n", " ├── search engine: 101\n", " ├── calculator: 34\n", " ├── unlambda compiler (optional): 1\n", " ├── a web browser.: 2\n", " ├── a search engine.: 2\n", " ├── a calculator.: 1\n", " ├── microsoft excel: 5\n", " ├── google search: 1\n", " ├── ne: 9\n", " ├── pdf access: 7\n", " ├── file handling: 2\n", " ├── python: 3\n", " ├── image recognition tools: 12\n", " ├── jsonld file access: 1\n", " ├── video parsing: 1\n", " ├── python compiler: 1\n", " ├── video recognition tools: 3\n", " ├── pdf viewer: 7\n", " ├── microsoft excel / google sheets: 3\n", " ├── word document access: 1\n", " ├── tool to extract text from images: 1\n", " ├── a word reversal tool / script: 1\n", " ├── counter: 1\n", " ├── excel: 3\n", " ├── image recognition: 5\n", " ├── color recognition: 3\n", " ├── excel file access: 3\n", " ├── xml file access: 1\n", " ├── access to the internet archive, web.archive.org: 1\n", " ├── text processing/diff tool: 1\n", " ├── gif parsing tools: 1\n", " ├── a web browser: 7\n", " ├── a search engine: 7\n", " ├── a speech-to-text tool: 2\n", " ├── code/data analysis tools: 1\n", " ├── audio capability: 2\n", " ├── pdf reader: 1\n", " ├── markdown: 1\n", " ├── a calculator: 5\n", " ├── access to wikipedia: 3\n", " ├── image recognition/ocr: 3\n", " ├── google translate access: 1\n", " ├── ocr: 4\n", " ├── bass note data: 1\n", " ├── text editor: 1\n", " ├── xlsx file access: 1\n", " ├── powerpoint viewer: 1\n", " ├── csv file access: 1\n", " ├── calculator (or use excel): 1\n", " ├── computer algebra system: 1\n", " ├── video processing software: 1\n", " ├── audio processing software: 1\n", " ├── computer vision: 1\n", " ├── google maps: 1\n", " ├── access to excel files: 1\n", " ├── calculator (or ability to count): 1\n", " ├── a file interface: 3\n", " ├── a python ide: 1\n", " ├── spreadsheet editor: 1\n", " ├── tools required: 1\n", " ├── b browser: 1\n", " ├── image recognition and processing tools: 1\n", " ├── computer vision or ocr: 1\n", " ├── c++ compiler: 1\n", " ├── access to google maps: 1\n", " ├── youtube player: 1\n", " ├── natural language processor: 1\n", " ├── graph interaction tools: 1\n", " ├── bablyonian cuniform -> arabic legend: 1\n", " ├── access to youtube: 1\n", " ├── image search tools: 1\n", " ├── calculator or counting function: 1\n", " ├── a speech-to-text audio processing tool: 1\n", " ├── access to academic journal websites: 1\n", " ├── pdf reader/extracter: 1\n", " ├── rubik's cube model: 1\n", " ├── wikipedia: 1\n", " ├── video capability: 1\n", " ├── image processing tools: 1\n", " ├── age recognition software: 1\n", " ├── youtube: 1\n" ] } ], "source": [ "# list of the tools used in all the samples\n", "from collections import Counter, OrderedDict\n", "\n", "tools = []\n", "for sample in json_QA:\n", " for tool in sample['Annotator Metadata']['Tools'].split('\\n'):\n", " tool = tool[2:].strip().lower()\n", " if tool.startswith(\"(\"):\n", " tool = tool[11:].strip()\n", " tools.append(tool)\n", "tools_counter = OrderedDict(Counter(tools))\n", "print(\"List of tools used in all samples:\")\n", "print(\"Total number of tools used:\", len(tools_counter))\n", "for tool, count in tools_counter.items():\n", " print(f\" ├── {tool}: {count}\")" ] }, { "cell_type": "code", "execution_count": 22, "id": "b056a03f-6183-4f8d-97b6-63168f745e51", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "83" ] }, "execution_count": 22, "metadata": {}, "output_type": "execute_result" } ], "source": [ "len(tools_counter)" ] }, { "cell_type": "code", "execution_count": null, "id": "1bfa81e5-c097-4b0f-be7f-760ef1355928", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "colab": { "provenance": [] }, "kernelspec": { "display_name": "hf-agent", "language": "python", "name": "hf-agent" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.13.5" } }, "nbformat": 4, "nbformat_minor": 5 }