Commit
·
c3b34bf
1
Parent(s):
81917a3
Initial update
Browse files- .gitattributes +6 -0
- app.py +24 -4
- questions.json +122 -0
- requirements.txt +29 -2
- tools/__pycache__/base_tools.cpython-310.pyc +0 -0
- tools/__pycache__/file_tools.cpython-310.pyc +0 -0
- tools/__pycache__/math_tools.cpython-310.pyc +0 -0
- tools/__pycache__/sub_agents.cpython-310.pyc +0 -0
- tools/__pycache__/web_tools.cpython-310.pyc +0 -0
- tools/base_tools.py +22 -0
- tools/file_tools.py +64 -0
- tools/main_agent.py +63 -0
- tools/math_tools.py +175 -0
- tools/sub_agents.py +134 -0
- tools/web_tools.py +138 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,9 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
data filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
data/1f975693-876d-457b-a649-393859e79bf3.mp3 filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
data/7bd855d8-463d-4ed5-93ca-5fe35145f733.xlsx filter=lfs diff=lfs merge=lfs -text
|
| 39 |
+
data/99c9cc74-fdc8-46c6-8f8d-3ce2d3bfeea3.mp3 filter=lfs diff=lfs merge=lfs -text
|
| 40 |
+
data/cca530fc-4052-43b2-b130-b30968d8aa44.png filter=lfs diff=lfs merge=lfs -text
|
| 41 |
+
data/f918266a-b3e0-4914-865d-4faa564f1aef.py filter=lfs diff=lfs merge=lfs -text
|
app.py
CHANGED
|
@@ -3,6 +3,9 @@ import gradio as gr
|
|
| 3 |
import requests
|
| 4 |
import inspect
|
| 5 |
import pandas as pd
|
|
|
|
|
|
|
|
|
|
| 6 |
|
| 7 |
# (Keep Constants as is)
|
| 8 |
# --- Constants ---
|
|
@@ -40,7 +43,7 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
| 40 |
|
| 41 |
# 1. Instantiate Agent ( modify this part to create your agent)
|
| 42 |
try:
|
| 43 |
-
agent =
|
| 44 |
except Exception as e:
|
| 45 |
print(f"Error instantiating agent: {e}")
|
| 46 |
return f"Error initializing agent: {e}", None
|
|
@@ -73,16 +76,33 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
| 73 |
results_log = []
|
| 74 |
answers_payload = []
|
| 75 |
print(f"Running agent on {len(questions_data)} questions...")
|
|
|
|
|
|
|
|
|
|
| 76 |
for item in questions_data:
|
| 77 |
task_id = item.get("task_id")
|
| 78 |
question_text = item.get("question")
|
| 79 |
if not task_id or question_text is None:
|
| 80 |
print(f"Skipping item with missing task_id or question: {item}")
|
|
|
|
| 81 |
continue
|
| 82 |
try:
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 86 |
except Exception as e:
|
| 87 |
print(f"Error running agent on task {task_id}: {e}")
|
| 88 |
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
|
|
|
|
| 3 |
import requests
|
| 4 |
import inspect
|
| 5 |
import pandas as pd
|
| 6 |
+
import asyncio
|
| 7 |
+
import tqdm
|
| 8 |
+
from tools.main_agent import MainAgent
|
| 9 |
|
| 10 |
# (Keep Constants as is)
|
| 11 |
# --- Constants ---
|
|
|
|
| 43 |
|
| 44 |
# 1. Instantiate Agent ( modify this part to create your agent)
|
| 45 |
try:
|
| 46 |
+
agent = MainAgent()
|
| 47 |
except Exception as e:
|
| 48 |
print(f"Error instantiating agent: {e}")
|
| 49 |
return f"Error initializing agent: {e}", None
|
|
|
|
| 76 |
results_log = []
|
| 77 |
answers_payload = []
|
| 78 |
print(f"Running agent on {len(questions_data)} questions...")
|
| 79 |
+
progress = tqdm.tqdm(
|
| 80 |
+
total=len(questions_data), desc="Running Agent", unit="question"
|
| 81 |
+
)
|
| 82 |
for item in questions_data:
|
| 83 |
task_id = item.get("task_id")
|
| 84 |
question_text = item.get("question")
|
| 85 |
if not task_id or question_text is None:
|
| 86 |
print(f"Skipping item with missing task_id or question: {item}")
|
| 87 |
+
progress.update(1)
|
| 88 |
continue
|
| 89 |
try:
|
| 90 |
+
progress.write(
|
| 91 |
+
f"Running agent on task {task_id} with question: {question_text}"
|
| 92 |
+
)
|
| 93 |
+
submitted_answer = asyncio.run(agent(question_text))
|
| 94 |
+
answers_payload.append(
|
| 95 |
+
{"task_id": task_id, "submitted_answer": submitted_answer}
|
| 96 |
+
)
|
| 97 |
+
|
| 98 |
+
progress.update(1)
|
| 99 |
+
results_log.append(
|
| 100 |
+
{
|
| 101 |
+
"Task ID": task_id,
|
| 102 |
+
"Question": question_text,
|
| 103 |
+
"Submitted Answer": submitted_answer,
|
| 104 |
+
}
|
| 105 |
+
)
|
| 106 |
except Exception as e:
|
| 107 |
print(f"Error running agent on task {task_id}: {e}")
|
| 108 |
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
|
questions.json
ADDED
|
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"task_id": "8e867cd7-cff9-4e6c-867a-ff5ddc2550be",
|
| 4 |
+
"question": "How many studio albums were published by Mercedes Sosa between 2000 and 2009 (included)? You can use the latest 2022 version of english wikipedia.",
|
| 5 |
+
"Level": "1",
|
| 6 |
+
"file_name": ""
|
| 7 |
+
},
|
| 8 |
+
{
|
| 9 |
+
"task_id": "a1e91b78-d3d8-4675-bb8d-62741b4b68a6",
|
| 10 |
+
"question": "In the video https://www.youtube.com/watch?v=L1vXCYZAYYM, what is the highest number of bird species to be on camera simultaneously?",
|
| 11 |
+
"Level": "1",
|
| 12 |
+
"file_name": ""
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"task_id": "2d83110e-a098-4ebb-9987-066c06fa42d0",
|
| 16 |
+
"question": ".rewsna eht sa \"tfel\" drow eht fo etisoppo eht etirw ,ecnetnes siht dnatsrednu uoy fI",
|
| 17 |
+
"Level": "1",
|
| 18 |
+
"file_name": ""
|
| 19 |
+
},
|
| 20 |
+
{
|
| 21 |
+
"task_id": "cca530fc-4052-43b2-b130-b30968d8aa44",
|
| 22 |
+
"question": "Review the chess position provided in the image. It is black's turn. Provide the correct next move for black which guarantees a win. Please provide your response in algebraic notation.",
|
| 23 |
+
"Level": "1",
|
| 24 |
+
"file_name": "cca530fc-4052-43b2-b130-b30968d8aa44.png"
|
| 25 |
+
},
|
| 26 |
+
{
|
| 27 |
+
"task_id": "4fc2f1ae-8625-45b5-ab34-ad4433bc21f8",
|
| 28 |
+
"question": "Who nominated the only Featured Article on English Wikipedia about a dinosaur that was promoted in November 2016?",
|
| 29 |
+
"Level": "1",
|
| 30 |
+
"file_name": ""
|
| 31 |
+
},
|
| 32 |
+
{
|
| 33 |
+
"task_id": "6f37996b-2ac7-44b0-8e68-6d28256631b4",
|
| 34 |
+
"question": "Given this table defining * on the set S = {a, b, c, d, e}\n\n|*|a|b|c|d|e|\n|---|---|---|---|---|---|\n|a|a|b|c|b|d|\n|b|b|c|a|e|c|\n|c|c|a|b|b|a|\n|d|b|e|b|e|d|\n|e|d|b|a|d|c|\n\nprovide the subset of S involved in any possible counter-examples that prove * is not commutative. Provide your answer as a comma separated list of the elements in the set in alphabetical order.",
|
| 35 |
+
"Level": "1",
|
| 36 |
+
"file_name": ""
|
| 37 |
+
},
|
| 38 |
+
{
|
| 39 |
+
"task_id": "9d191bce-651d-4746-be2d-7ef8ecadb9c2",
|
| 40 |
+
"question": "Examine the video at https://www.youtube.com/watch?v=1htKBjuUWec.\n\nWhat does Teal'c say in response to the question \"Isn't that hot?\"",
|
| 41 |
+
"Level": "1",
|
| 42 |
+
"file_name": ""
|
| 43 |
+
},
|
| 44 |
+
{
|
| 45 |
+
"task_id": "cabe07ed-9eca-40ea-8ead-410ef5e83f91",
|
| 46 |
+
"question": "What is the surname of the equine veterinarian mentioned in 1.E Exercises from the chemistry materials licensed by Marisa Alviar-Agnew & Henry Agnew under the CK-12 license in LibreText's Introductory Chemistry materials as compiled 08/21/2023?",
|
| 47 |
+
"Level": "1",
|
| 48 |
+
"file_name": ""
|
| 49 |
+
},
|
| 50 |
+
{
|
| 51 |
+
"task_id": "3cef3a44-215e-4aed-8e3b-b1e3f08063b7",
|
| 52 |
+
"question": "I'm making a grocery list for my mom, but she's a professor of botany and she's a real stickler when it comes to categorizing things. I need to add different foods to different categories on the grocery list, but if I make a mistake, she won't buy anything inserted in the wrong category. Here's the list I have so far:\n\nmilk, eggs, flour, whole bean coffee, Oreos, sweet potatoes, fresh basil, plums, green beans, rice, corn, bell pepper, whole allspice, acorns, broccoli, celery, zucchini, lettuce, peanuts\n\nI need to make headings for the fruits and vegetables. Could you please create a list of just the vegetables from my list? If you could do that, then I can figure out how to categorize the rest of the list into the appropriate categories. But remember that my mom is a real stickler, so make sure that no botanical fruits end up on the vegetable list, or she won't get them when she's at the store. Please alphabetize the list of vegetables, and place each item in a comma separated list.",
|
| 53 |
+
"Level": "1",
|
| 54 |
+
"file_name": ""
|
| 55 |
+
},
|
| 56 |
+
{
|
| 57 |
+
"task_id": "99c9cc74-fdc8-46c6-8f8d-3ce2d3bfeea3",
|
| 58 |
+
"question": "Hi, I'm making a pie but I could use some help with my shopping list. I have everything I need for the crust, but I'm not sure about the filling. I got the recipe from my friend Aditi, but she left it as a voice memo and the speaker on my phone is buzzing so I can't quite make out what she's saying. Could you please listen to the recipe and list all of the ingredients that my friend described? I only want the ingredients for the filling, as I have everything I need to make my favorite pie crust. I've attached the recipe as Strawberry pie.mp3.\n\nIn your response, please only list the ingredients, not any measurements. So if the recipe calls for \"a pinch of salt\" or \"two cups of ripe strawberries\" the ingredients on the list would be \"salt\" and \"ripe strawberries\".\n\nPlease format your response as a comma separated list of ingredients. Also, please alphabetize the ingredients.",
|
| 59 |
+
"Level": "1",
|
| 60 |
+
"file_name": "99c9cc74-fdc8-46c6-8f8d-3ce2d3bfeea3.mp3"
|
| 61 |
+
},
|
| 62 |
+
{
|
| 63 |
+
"task_id": "305ac316-eef6-4446-960a-92d80d542f82",
|
| 64 |
+
"question": "Who did the actor who played Ray in the Polish-language version of Everybody Loves Raymond play in Magda M.? Give only the first name.",
|
| 65 |
+
"Level": "1",
|
| 66 |
+
"file_name": ""
|
| 67 |
+
},
|
| 68 |
+
{
|
| 69 |
+
"task_id": "f918266a-b3e0-4914-865d-4faa564f1aef",
|
| 70 |
+
"question": "What is the final numeric output from the attached Python code?",
|
| 71 |
+
"Level": "1",
|
| 72 |
+
"file_name": "f918266a-b3e0-4914-865d-4faa564f1aef.py"
|
| 73 |
+
},
|
| 74 |
+
{
|
| 75 |
+
"task_id": "3f57289b-8c60-48be-bd80-01f8099ca449",
|
| 76 |
+
"question": "How many at bats did the Yankee with the most walks in the 1977 regular season have that same season?",
|
| 77 |
+
"Level": "1",
|
| 78 |
+
"file_name": ""
|
| 79 |
+
},
|
| 80 |
+
{
|
| 81 |
+
"task_id": "1f975693-876d-457b-a649-393859e79bf3",
|
| 82 |
+
"question": "Hi, I was out sick from my classes on Friday, so I'm trying to figure out what I need to study for my Calculus mid-term next week. My friend from class sent me an audio recording of Professor Willowbrook giving out the recommended reading for the test, but my headphones are broken :(\n\nCould you please listen to the recording for me and tell me the page numbers I'm supposed to go over? I've attached a file called Homework.mp3 that has the recording. Please provide just the page numbers as a comma-delimited list. And please provide the list in ascending order.",
|
| 83 |
+
"Level": "1",
|
| 84 |
+
"file_name": "1f975693-876d-457b-a649-393859e79bf3.mp3"
|
| 85 |
+
},
|
| 86 |
+
{
|
| 87 |
+
"task_id": "840bfca7-4f7b-481a-8794-c560c340185d",
|
| 88 |
+
"question": "On June 6, 2023, an article by Carolyn Collins Petersen was published in Universe Today. This article mentions a team that produced a paper about their observations, linked at the bottom of the article. Find this paper. Under what NASA award number was the work performed by R. G. Arendt supported by?",
|
| 89 |
+
"Level": "1",
|
| 90 |
+
"file_name": ""
|
| 91 |
+
},
|
| 92 |
+
{
|
| 93 |
+
"task_id": "bda648d7-d618-4883-88f4-3466eabd860e",
|
| 94 |
+
"question": "Where were the Vietnamese specimens described by Kuznetzov in Nedoshivina's 2010 paper eventually deposited? Just give me the city name without abbreviations.",
|
| 95 |
+
"Level": "1",
|
| 96 |
+
"file_name": ""
|
| 97 |
+
},
|
| 98 |
+
{
|
| 99 |
+
"task_id": "cf106601-ab4f-4af9-b045-5295fe67b37d",
|
| 100 |
+
"question": "What country had the least number of athletes at the 1928 Summer Olympics? If there's a tie for a number of athletes, return the first in alphabetical order. Give the IOC country code as your answer.",
|
| 101 |
+
"Level": "1",
|
| 102 |
+
"file_name": ""
|
| 103 |
+
},
|
| 104 |
+
{
|
| 105 |
+
"task_id": "a0c07678-e491-4bbc-8f0b-07405144218f",
|
| 106 |
+
"question": "Who are the pitchers with the number before and after Taish\u014d Tamai's number as of July 2023? Give them to me in the form Pitcher Before, Pitcher After, use their last names only, in Roman characters.",
|
| 107 |
+
"Level": "1",
|
| 108 |
+
"file_name": ""
|
| 109 |
+
},
|
| 110 |
+
{
|
| 111 |
+
"task_id": "7bd855d8-463d-4ed5-93ca-5fe35145f733",
|
| 112 |
+
"question": "The attached Excel file contains the sales of menu items for a local fast-food chain. What were the total sales that the chain made from food (not including drinks)? Express your answer in USD with two decimal places.",
|
| 113 |
+
"Level": "1",
|
| 114 |
+
"file_name": "7bd855d8-463d-4ed5-93ca-5fe35145f733.xlsx"
|
| 115 |
+
},
|
| 116 |
+
{
|
| 117 |
+
"task_id": "5a0c1adf-205e-4841-a666-7c3ef95def9d",
|
| 118 |
+
"question": "What is the first name of the only Malko Competition recipient from the 20th Century (after 1977) whose nationality on record is a country that no longer exists?",
|
| 119 |
+
"Level": "1",
|
| 120 |
+
"file_name": ""
|
| 121 |
+
}
|
| 122 |
+
]
|
requirements.txt
CHANGED
|
@@ -1,2 +1,29 @@
|
|
| 1 |
-
|
| 2 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Core LlamaIndex dependencies
|
| 2 |
+
llama-index
|
| 3 |
+
llama-index-llms-openai
|
| 4 |
+
llama-index-embeddings-openai
|
| 5 |
+
llama-index-tools-tavily-research
|
| 6 |
+
llama-index-readers-youtube-transcript
|
| 7 |
+
llama-index-readers-json
|
| 8 |
+
|
| 9 |
+
# Web scraping and HTTP
|
| 10 |
+
aiohttp
|
| 11 |
+
html2text
|
| 12 |
+
requests
|
| 13 |
+
|
| 14 |
+
# Data processing
|
| 15 |
+
pandas
|
| 16 |
+
openpyxl
|
| 17 |
+
xlrd
|
| 18 |
+
|
| 19 |
+
# AI/ML
|
| 20 |
+
openai
|
| 21 |
+
whisper
|
| 22 |
+
|
| 23 |
+
# Wikipedia
|
| 24 |
+
wikipedia
|
| 25 |
+
|
| 26 |
+
# General utilities
|
| 27 |
+
python-dotenv
|
| 28 |
+
asyncio
|
| 29 |
+
gradio[oauth]
|
tools/__pycache__/base_tools.cpython-310.pyc
ADDED
|
Binary file (1.11 kB). View file
|
|
|
tools/__pycache__/file_tools.cpython-310.pyc
ADDED
|
Binary file (2.27 kB). View file
|
|
|
tools/__pycache__/math_tools.cpython-310.pyc
ADDED
|
Binary file (5.92 kB). View file
|
|
|
tools/__pycache__/sub_agents.cpython-310.pyc
ADDED
|
Binary file (3.71 kB). View file
|
|
|
tools/__pycache__/web_tools.cpython-310.pyc
ADDED
|
Binary file (5.45 kB). View file
|
|
|
tools/base_tools.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from llama_index.core.tools import FunctionTool
|
| 2 |
+
|
| 3 |
+
def get_thought(thought: str) -> str:
|
| 4 |
+
"""
|
| 5 |
+
Get the thought of the agent.
|
| 6 |
+
"""
|
| 7 |
+
return f"Thought: {thought}"
|
| 8 |
+
|
| 9 |
+
def get_answer(answer: str) -> str:
|
| 10 |
+
"""
|
| 11 |
+
Get the answer of the agent.
|
| 12 |
+
"""
|
| 13 |
+
return f"Answer: {answer}"
|
| 14 |
+
|
| 15 |
+
def get_base_tools():
|
| 16 |
+
"""
|
| 17 |
+
Get the base tools.
|
| 18 |
+
"""
|
| 19 |
+
return [
|
| 20 |
+
FunctionTool.from_defaults(get_thought),
|
| 21 |
+
FunctionTool.from_defaults(get_answer),
|
| 22 |
+
]
|
tools/file_tools.py
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import asyncio
|
| 2 |
+
import logging
|
| 3 |
+
import os
|
| 4 |
+
import tempfile
|
| 5 |
+
from typing import Any, Dict, List, Optional, Union
|
| 6 |
+
import pandas as pd
|
| 7 |
+
import ast
|
| 8 |
+
import inspect
|
| 9 |
+
|
| 10 |
+
import openai
|
| 11 |
+
from llama_index.core import Settings, VectorStoreIndex, SimpleDirectoryReader
|
| 12 |
+
from llama_index.readers.json import JSONReader
|
| 13 |
+
from llama_index.core.schema import Document
|
| 14 |
+
from llama_index.core.tools import FunctionTool
|
| 15 |
+
from llama_index.embeddings.openai import OpenAIEmbedding
|
| 16 |
+
from llama_index.llms.openai import OpenAI
|
| 17 |
+
|
| 18 |
+
# Configure logging
|
| 19 |
+
logging.basicConfig(level=logging.INFO)
|
| 20 |
+
logger = logging.getLogger(__name__)
|
| 21 |
+
|
| 22 |
+
# Initialize OpenAI settings
|
| 23 |
+
Settings.llm = OpenAI(model="gpt-4o", temperature=0.1)
|
| 24 |
+
Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small")
|
| 25 |
+
|
| 26 |
+
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
|
| 27 |
+
|
| 28 |
+
async def get_task_id(question: str) -> str:
|
| 29 |
+
|
| 30 |
+
# db = chromadb.PersistentClient(path="./alfred_chroma_db")
|
| 31 |
+
# chroma_collection = db.get_or_create_collection("alfred")
|
| 32 |
+
# vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
|
| 33 |
+
logger.info(f"Getting task id for question: {question}")
|
| 34 |
+
question = question.replace('"', '')
|
| 35 |
+
|
| 36 |
+
reader = JSONReader(file_path="../questions.json")
|
| 37 |
+
documents = reader.load_data()
|
| 38 |
+
print(len(documents))
|
| 39 |
+
|
| 40 |
+
index = VectorStoreIndex.from_documents(documents=documents)
|
| 41 |
+
query_engine = index.as_query_engine(llm=OpenAI(model="gpt-4o-mini"))
|
| 42 |
+
response = query_engine.query(f"What is the task id of the question: {question}")
|
| 43 |
+
logger.info(f"Task id: {response.response}")
|
| 44 |
+
return response.response
|
| 45 |
+
|
| 46 |
+
def transcribe_mp3_file(task_id: str) -> str:
|
| 47 |
+
"""Transcribe an MP3 file."""
|
| 48 |
+
logger.info(f"Transcribing MP3 file for task id: {task_id}")
|
| 49 |
+
file_path = "../data/" + task_id + ".mp3"
|
| 50 |
+
# get the audio file
|
| 51 |
+
audio_file = open(file_path, "rb")
|
| 52 |
+
# transcribe the audio file
|
| 53 |
+
transcript = client.audio.transcriptions.create(
|
| 54 |
+
model="whisper-1",
|
| 55 |
+
file=audio_file
|
| 56 |
+
)
|
| 57 |
+
# return the transcript
|
| 58 |
+
return transcript.text
|
| 59 |
+
|
| 60 |
+
def get_file_tools():
|
| 61 |
+
"""Return all available file processing tools for the agent."""
|
| 62 |
+
return [
|
| 63 |
+
FunctionTool.from_defaults(transcribe_mp3_file),
|
| 64 |
+
]
|
tools/main_agent.py
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from llama_index.core.tools import FunctionTool
|
| 2 |
+
import asyncio
|
| 3 |
+
from sub_agents import MathAgent, WebAgent, BaseAgent
|
| 4 |
+
|
| 5 |
+
math_agent = MathAgent()
|
| 6 |
+
web_agent = WebAgent()
|
| 7 |
+
|
| 8 |
+
MAIN_AGENT_PROMPT = """
|
| 9 |
+
You are the Main Agent, responsible for solving complex tasks by coordinating a team of specialized agents.
|
| 10 |
+
When given a task, you must carefully and systematically follow this procedure:
|
| 11 |
+
|
| 12 |
+
Carefully read the entire task, paying close attention to any specific instructions provided (such as required output format, style, structure, or other constraints).
|
| 13 |
+
Break down the task into clear, manageable subproblems.
|
| 14 |
+
For each subproblem:
|
| 15 |
+
Clearly define what needs to be achieved.
|
| 16 |
+
Determine which specialized agent (e.g., Math Agent, Websearch Agent, etc.) is best suited to handle it.
|
| 17 |
+
Create a clear, specific instruction for the agent, ensuring it aligns with the overall task requirements.
|
| 18 |
+
Delegate each subproblem to the appropriate specialized agent.
|
| 19 |
+
Collect and review the agent outputs, checking that they meet the original task's instructions.
|
| 20 |
+
Integrate all partial results logically and cohesively.
|
| 21 |
+
Format the final answer precisely according to the tasks specified requirements.
|
| 22 |
+
Present the complete, correct, and clearly structured final output.
|
| 23 |
+
Important:
|
| 24 |
+
|
| 25 |
+
Always adhere strictly to any instructions related to formatting, style, or special requirements.
|
| 26 |
+
Think step-by-step, delegate intelligently, and assemble the solution carefully.
|
| 27 |
+
Never solve detailed subproblems yourself — always delegate to the appropriate agent.
|
| 28 |
+
Your answer should only be the solution; don't write "Answer:" and don't write a sentence when possible.
|
| 29 |
+
Accuracy, completeness, and strict compliance with the user's original instructions are your top priorities.
|
| 30 |
+
|
| 31 |
+
If the user says they have a file, you should use the file agent to get the information from the file. You dont have to access file directly,so it doesnt matter if you dont have the file or the capability to access it. Just pass the exact question along to the file agent.
|
| 32 |
+
"""
|
| 33 |
+
|
| 34 |
+
async def call_math_agent(question: str) -> str:
|
| 35 |
+
"""Call a math agent by delegating part of the question to it."""
|
| 36 |
+
return await math_agent(question=question)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
async def call_web_agent(question: str) -> str:
|
| 40 |
+
"""Call a search agent by delegating part of the question to it."""
|
| 41 |
+
return await web_agent(question=question)
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def get_tools():
|
| 45 |
+
tools = [
|
| 46 |
+
FunctionTool.from_defaults(fn=call_math_agent),
|
| 47 |
+
FunctionTool.from_defaults(fn=call_web_agent),
|
| 48 |
+
]
|
| 49 |
+
return tools
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
class MainAgent(BaseAgent):
|
| 53 |
+
def __init__(self):
|
| 54 |
+
super().__init__(
|
| 55 |
+
name="main_agent",
|
| 56 |
+
description="Main agent which creates subtasks and delegates them to other agents. You can use the other agents to look up information in the web, solve math problems, or transcribe audio files. ",
|
| 57 |
+
prompt=MAIN_AGENT_PROMPT,
|
| 58 |
+
tools=get_tools(),
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
if __name__ == "__main__":
|
| 62 |
+
main_agent = MainAgent()
|
| 63 |
+
asyncio.run(main_agent("I'm making a grocery list for my mom, but she's a professor of botany and she's a real stickler when it comes to categorizing things. I need to add different foods to different categories on the grocery list, but if I make a mistake, she won't buy anything inserted in the wrong category. Here's the list I have so far:\n\nmilk, eggs, flour, whole bean coffee, Oreos, sweet potatoes, fresh basil, plums, green beans, rice, corn, bell pepper, whole allspice, acorns, broccoli, celery, zucchini, lettuce, peanuts\n\nI need to make headings for the fruits and vegetables. Could you please create a list of just the vegetables from my list? If you could do that, then I can figure out how to categorize the rest of the list into the appropriate categories. But remember that my mom is a real stickler, so make sure that no botanical fruits end up on the vegetable list, or she won't get them when she's at the store. Please alphabetize the list of vegetables, and place each item in a comma separated list."))
|
tools/math_tools.py
ADDED
|
@@ -0,0 +1,175 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from math import sqrt
|
| 2 |
+
import logging
|
| 3 |
+
from llama_index.core.tools import FunctionTool
|
| 4 |
+
|
| 5 |
+
def add(a: float, b: float, context: str) -> str:
|
| 6 |
+
"""Add two numbers.
|
| 7 |
+
Args:
|
| 8 |
+
a (float): Addend 1
|
| 9 |
+
b (float): Addend 2
|
| 10 |
+
context (str): Context for the addition, this should be used to keep track of the operations.
|
| 11 |
+
Returns:
|
| 12 |
+
str: The result with the context included.
|
| 13 |
+
"""
|
| 14 |
+
return f"Result {str(a + b)} for {context}"
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def sub(a: float, b: float, context: str) -> str:
|
| 18 |
+
"""Subtract two numbers.
|
| 19 |
+
Args:
|
| 20 |
+
a (float): Minuend
|
| 21 |
+
b (float): Subtrahend
|
| 22 |
+
context (str): Context for the subtraction, this should be used to keep track of the operations.
|
| 23 |
+
Returns:
|
| 24 |
+
str: The result with the context included.
|
| 25 |
+
"""
|
| 26 |
+
return f"Result {str(a - b)} for {context}"
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def multiply(a: float, b: float, context: str) -> str:
|
| 30 |
+
"""Multiply two numbers.
|
| 31 |
+
Args:
|
| 32 |
+
a (float): Multiplicand
|
| 33 |
+
b (float): Multiplier
|
| 34 |
+
context (str): Context for the multiplication, this should be used to keep track of the operations.
|
| 35 |
+
Returns:
|
| 36 |
+
str: The result with the context included.
|
| 37 |
+
"""
|
| 38 |
+
return f"Result {str(a * b)} for {context}"
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def divide(a: float, b: float, context: str) -> str:
|
| 42 |
+
"""Divide two numbers.
|
| 43 |
+
Args:
|
| 44 |
+
a (float): Dividend
|
| 45 |
+
b (float): Divisor
|
| 46 |
+
context (str): Context for the division, this should be used to keep track of the operations.
|
| 47 |
+
Returns:
|
| 48 |
+
str: The result with the context included, or an error message if division by zero occurs.
|
| 49 |
+
"""
|
| 50 |
+
if b == 0:
|
| 51 |
+
return f"Error: Division by zero for {context}"
|
| 52 |
+
return f"Result {str(a / b)} for {context}"
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def floordiv(a: float, b: float, context: str) -> str:
|
| 56 |
+
"""Floor divide two numbers.
|
| 57 |
+
Args:
|
| 58 |
+
a (float): Dividend
|
| 59 |
+
b (float): Divisor
|
| 60 |
+
context (str): Context for the floor division, this should be used to keep track of the operations.
|
| 61 |
+
Returns:
|
| 62 |
+
str: The result with the context included, or an error message if division by zero occurs.
|
| 63 |
+
"""
|
| 64 |
+
if b == 0:
|
| 65 |
+
return f"Error: Division by zero for {context}"
|
| 66 |
+
return f"Result {str(a // b)} for {context}"
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def mod(a: float, b: float, context: str) -> str:
|
| 70 |
+
"""Get the modulus of two numbers.
|
| 71 |
+
Args:
|
| 72 |
+
a (float): Dividend
|
| 73 |
+
b (float): Divisor
|
| 74 |
+
context (str): Context for the modulus operation, this should be used to keep track of the operations.
|
| 75 |
+
Returns:
|
| 76 |
+
str: The result with the context included.
|
| 77 |
+
"""
|
| 78 |
+
return f"Result {str(a % b)} for {context}"
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def sqrt(a: float, context: str) -> str:
|
| 82 |
+
"""Get the square root of a number.
|
| 83 |
+
Args:
|
| 84 |
+
a (float): Number to find the square root of
|
| 85 |
+
context (str): Context for the square root operation, this should be used to keep track of the operations.
|
| 86 |
+
Returns:
|
| 87 |
+
str: The result with the context included.
|
| 88 |
+
"""
|
| 89 |
+
return f"Result {str(a**0.5)} for {context}"
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def equals(a: float, b: float, context: str) -> str:
|
| 93 |
+
"""Check if two numbers are equal.
|
| 94 |
+
Args:
|
| 95 |
+
a (float): First number
|
| 96 |
+
b (float): Second number
|
| 97 |
+
context (str): Context for the equality check, this should be used to keep track of the operations.
|
| 98 |
+
Returns:
|
| 99 |
+
str: The result with the context included.
|
| 100 |
+
"""
|
| 101 |
+
return f"Result {str(a == b)} for {context}"
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
def less_than(a: float, b: float, context: str) -> str:
|
| 105 |
+
"""Check if the first number is less than the second number.
|
| 106 |
+
Args:
|
| 107 |
+
a (float): First number
|
| 108 |
+
b (float): Second number
|
| 109 |
+
context (str): Context for the less than check, this should be used to keep track of the operations.
|
| 110 |
+
Returns:
|
| 111 |
+
str: The result with the context included.
|
| 112 |
+
"""
|
| 113 |
+
return f"Result {str(a < b)} for {context}"
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def more_than(a: float, b: float, context: str) -> str:
|
| 117 |
+
"""Check if the first number is more than the second number.
|
| 118 |
+
Args:
|
| 119 |
+
a (float): First number
|
| 120 |
+
b (float): Second number
|
| 121 |
+
context (str): Context for the more than check, this should be used to keep track of the operations.
|
| 122 |
+
Returns:
|
| 123 |
+
str: The result with the context included.
|
| 124 |
+
"""
|
| 125 |
+
return f"Result {str(a > b)} for {context}"
|
| 126 |
+
|
| 127 |
+
def solve_math_problem(problem: str) -> str:
|
| 128 |
+
"""
|
| 129 |
+
Solve mathematical problems using GPT-4.
|
| 130 |
+
Args:
|
| 131 |
+
problem: The mathematical problem to solve
|
| 132 |
+
Returns:
|
| 133 |
+
str: Solution
|
| 134 |
+
"""
|
| 135 |
+
|
| 136 |
+
try:
|
| 137 |
+
from openai import OpenAI
|
| 138 |
+
client = OpenAI()
|
| 139 |
+
|
| 140 |
+
response = client.chat.completions.create(
|
| 141 |
+
model="gpt-4o",
|
| 142 |
+
messages=[
|
| 143 |
+
{
|
| 144 |
+
"role": "system",
|
| 145 |
+
"content": "You are a mathematics expert. Solve problems step by step with clear explanations."
|
| 146 |
+
},
|
| 147 |
+
{
|
| 148 |
+
"role": "user",
|
| 149 |
+
"content": f"Please solve this mathematical problem step by step:\n\n{problem}"
|
| 150 |
+
}
|
| 151 |
+
]
|
| 152 |
+
)
|
| 153 |
+
|
| 154 |
+
return response.choices[0].message.content
|
| 155 |
+
|
| 156 |
+
except Exception as e:
|
| 157 |
+
return f"Math problem solving failed: {str(e)}"
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
def get_math_tools():
|
| 161 |
+
"""
|
| 162 |
+
Get the math tools.
|
| 163 |
+
"""
|
| 164 |
+
return [
|
| 165 |
+
FunctionTool.from_defaults(fn=sum),
|
| 166 |
+
FunctionTool.from_defaults(fn=sub),
|
| 167 |
+
FunctionTool.from_defaults(fn=add),
|
| 168 |
+
FunctionTool.from_defaults(fn=sqrt),
|
| 169 |
+
FunctionTool.from_defaults(fn=floordiv),
|
| 170 |
+
FunctionTool.from_defaults(fn=mod),
|
| 171 |
+
FunctionTool.from_defaults(fn=multiply),
|
| 172 |
+
FunctionTool.from_defaults(fn=equals),
|
| 173 |
+
FunctionTool.from_defaults(fn=less_than),
|
| 174 |
+
FunctionTool.from_defaults(fn=more_than),
|
| 175 |
+
]
|
tools/sub_agents.py
ADDED
|
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import asyncio
|
| 3 |
+
import logging
|
| 4 |
+
from abc import ABC
|
| 5 |
+
|
| 6 |
+
from llama_index.core.agent.workflow import FunctionAgent
|
| 7 |
+
from llama_index.llms.openai import OpenAI
|
| 8 |
+
from llama_index.core.tools import FunctionTool
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
from base_tools import get_base_tools
|
| 12 |
+
from math_tools import get_math_tools
|
| 13 |
+
from web_tools import get_web_tools
|
| 14 |
+
from file_tools import get_file_tools
|
| 15 |
+
|
| 16 |
+
MATH_AGENT_PROMPT = """
|
| 17 |
+
You are an AI agent highly skilled in solving math problems using basic operations: addition, subtraction, multiplication, and division. You have access to internal functions that perform these operations precisely.
|
| 18 |
+
|
| 19 |
+
Your task is to always find the correct final answer by following a clear and reliable solving process:
|
| 20 |
+
|
| 21 |
+
Carefully read and understand the user's problem.
|
| 22 |
+
|
| 23 |
+
Break the problem down into smaller, logical steps.
|
| 24 |
+
|
| 25 |
+
Plan only one step at a time.
|
| 26 |
+
|
| 27 |
+
For each step:
|
| 28 |
+
|
| 29 |
+
Clearly explain what operation needs to be performed and why.
|
| 30 |
+
Use the appropriate internal tools to carry out the operation.
|
| 31 |
+
State the result of each step explicitly before continuing.
|
| 32 |
+
Proceed methodically until the problem is fully solved.
|
| 33 |
+
Summarize your intermediate results and plan your next step based on the results.
|
| 34 |
+
|
| 35 |
+
Before answering the question, verify if your answer is correct, and if it is not correct, try to correct it.
|
| 36 |
+
|
| 37 |
+
At the end, present the final, correct answer clearly.
|
| 38 |
+
|
| 39 |
+
Accuracy is your highest priority, but your explanations should also be easy for the user to follow. Always solve step-by-step, without skipping any parts, even if the problem looks simple.
|
| 40 |
+
"""
|
| 41 |
+
|
| 42 |
+
WEB_AGENT_PROMPT = """
|
| 43 |
+
You are an AI agent specialized in gathering accurate and up-to-date information by performing web searches. When given a user's prompt, you must follow this process:
|
| 44 |
+
|
| 45 |
+
Carefully read and fully understand the user's query.
|
| 46 |
+
Determine what specific information or context is needed to answer it accurately.
|
| 47 |
+
Think about the information you need, break it down into as many queries as you see necessary, and formulate your plan.
|
| 48 |
+
Plan and perform targeted web searches to gather relevant, reliable information.
|
| 49 |
+
Consider doing more web searches to get more information about specific topics within the gathered information as you see fit.
|
| 50 |
+
Summarize key findings clearly, identifying trusted sources where appropriate.
|
| 51 |
+
Use the gathered information to construct a complete, precise, and helpful final answer.
|
| 52 |
+
If multiple sources provide different information, note the differences and recommend the most trustworthy or consistent one.
|
| 53 |
+
Always prioritize accuracy, clarity, and usefulness in your final response.
|
| 54 |
+
Verify the current time before you check the up-to-date information.
|
| 55 |
+
Break the problem down into smaller steps.
|
| 56 |
+
|
| 57 |
+
Plan only one step at a time.
|
| 58 |
+
|
| 59 |
+
For each step:
|
| 60 |
+
|
| 61 |
+
Clearly explain what searches needs to be performed and why.
|
| 62 |
+
Use the appropriate internal tools to carry out the tasks.
|
| 63 |
+
State the result of each step explicitly before continuing.
|
| 64 |
+
Proceed methodically until the problem is fully solved.
|
| 65 |
+
Only rely on verified information retrieved from your searches. Be methodical: search first, then answer — never assume facts without confirming them.
|
| 66 |
+
"""
|
| 67 |
+
|
| 68 |
+
FILE_AGENT_PROMPT = """
|
| 69 |
+
You are a file agent. You are given a question and you need to answer it. Always use the get_task_id tool to get the task id of the question. Then pass the task id to the transcribe_mp3_file tool to get the transcript of the audio file. Then use the transcribed text to answer the question. It doesnt matter if you dont have the file or the capability to access it. Just pass the exact question along to the transcribe_mp3_file tool to get the transcript which you can then use to answer the question.
|
| 70 |
+
"""
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
class BaseAgent(ABC):
|
| 74 |
+
def __init__(
|
| 75 |
+
self, name, description, prompt, tools, llm_model="gpt-4.1"
|
| 76 |
+
):
|
| 77 |
+
logging.info(f"{name} Agent initialized.")
|
| 78 |
+
|
| 79 |
+
self.llm = OpenAI(
|
| 80 |
+
model=llm_model,
|
| 81 |
+
temperature=0.1,
|
| 82 |
+
api_key=os.getenv("OPENAI_API_KEY"),
|
| 83 |
+
)
|
| 84 |
+
|
| 85 |
+
agent_tools = get_base_tools()
|
| 86 |
+
agent_tools.extend(tools)
|
| 87 |
+
|
| 88 |
+
self.agent = FunctionAgent(
|
| 89 |
+
llm=self.llm,
|
| 90 |
+
tools=agent_tools,
|
| 91 |
+
name=name,
|
| 92 |
+
description=description,
|
| 93 |
+
system_prompt=prompt,
|
| 94 |
+
)
|
| 95 |
+
|
| 96 |
+
async def chat(self, question: str, history: dict) -> str:
|
| 97 |
+
answer = await self(question)
|
| 98 |
+
return answer
|
| 99 |
+
|
| 100 |
+
async def __call__(self, question: str) -> str:
|
| 101 |
+
answer = await self.agent.run(user_msg=question, timeout=60)
|
| 102 |
+
print(answer)
|
| 103 |
+
return answer.response.content
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
class MathAgent(BaseAgent):
|
| 107 |
+
def __init__(self):
|
| 108 |
+
super().__init__(
|
| 109 |
+
name="math agent",
|
| 110 |
+
description="A agent specializing in solving math problems.",
|
| 111 |
+
prompt=MATH_AGENT_PROMPT,
|
| 112 |
+
tools=get_math_tools(),
|
| 113 |
+
llm_model="o4-mini",
|
| 114 |
+
)
|
| 115 |
+
|
| 116 |
+
class WebAgent(BaseAgent):
|
| 117 |
+
def __init__(self):
|
| 118 |
+
super().__init__(
|
| 119 |
+
name="web agent",
|
| 120 |
+
description="A agent specializing in searching the web for information.",
|
| 121 |
+
prompt=WEB_AGENT_PROMPT,
|
| 122 |
+
tools=get_web_tools(),
|
| 123 |
+
llm_model="o4-mini",
|
| 124 |
+
)
|
| 125 |
+
|
| 126 |
+
class FileAgent(BaseAgent):
|
| 127 |
+
def __init__(self):
|
| 128 |
+
super().__init__(
|
| 129 |
+
name="file agent",
|
| 130 |
+
description="A agent specializing in searching through attached files for information. Always use the get_task_id tool to get the task id of the question. Then pass the task id to the transcribe_mp3_file tool to get the transcript of the audio file. Then use the transcribed text to answer the question.",
|
| 131 |
+
prompt=FILE_AGENT_PROMPT,
|
| 132 |
+
tools=get_file_tools(),
|
| 133 |
+
llm_model="o4-mini",
|
| 134 |
+
)
|
tools/web_tools.py
ADDED
|
@@ -0,0 +1,138 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import asyncio
|
| 2 |
+
import logging
|
| 3 |
+
import os
|
| 4 |
+
from typing import Any, Dict, List, Optional
|
| 5 |
+
|
| 6 |
+
import aiohttp
|
| 7 |
+
import html2text
|
| 8 |
+
from griffe import json_decoder
|
| 9 |
+
from llama_index.core import Settings, VectorStoreIndex
|
| 10 |
+
from llama_index.core.schema import Document
|
| 11 |
+
from llama_index.core.tools import FunctionTool
|
| 12 |
+
from llama_index.embeddings.openai import OpenAIEmbedding
|
| 13 |
+
from llama_index.llms.openai import OpenAI
|
| 14 |
+
from llama_index.tools.tavily_research import TavilyToolSpec
|
| 15 |
+
from llama_index.tools.wikipedia import WikipediaToolSpec
|
| 16 |
+
from llama_index.readers.youtube_transcript import YoutubeTranscriptReader
|
| 17 |
+
# Configure logging
|
| 18 |
+
logging.basicConfig(level=logging.INFO)
|
| 19 |
+
logger = logging.getLogger(__name__)
|
| 20 |
+
|
| 21 |
+
# Initialize OpenAI settings
|
| 22 |
+
Settings.llm = OpenAI(model="gpt-4o", temperature=0.1)
|
| 23 |
+
Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small")
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
async def fetch_and_process(
|
| 28 |
+
urls: List[str], session: aiohttp.ClientSession, timeout: int = 10
|
| 29 |
+
) -> List[Document]:
|
| 30 |
+
"""Fetch and convert webpages to Document objects concurrently."""
|
| 31 |
+
|
| 32 |
+
async def fetch(url: str) -> Dict[str, str]:
|
| 33 |
+
try:
|
| 34 |
+
async with session.get(url, timeout=timeout) as response:
|
| 35 |
+
return {"text": await response.text(), "url": str(response.url)}
|
| 36 |
+
except (asyncio.TimeoutError, aiohttp.ClientError) as e:
|
| 37 |
+
logging.warning(f"Could not fetch {url}: {repr(e)}")
|
| 38 |
+
return {"text": "", "url": url}
|
| 39 |
+
|
| 40 |
+
tasks = [fetch(url) for url in urls]
|
| 41 |
+
responses = await asyncio.gather(*tasks)
|
| 42 |
+
|
| 43 |
+
return [
|
| 44 |
+
Document(text=html2text.html2text(resp["text"]), id_=resp["url"])
|
| 45 |
+
for resp in responses if resp["text"]
|
| 46 |
+
]
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
async def summarize_websites(urls: List[str], query: str) -> List[str]:
|
| 50 |
+
"""Summarize a query from content across multiple websites. Even if there is only one website, it will still be used.
|
| 51 |
+
|
| 52 |
+
Args:
|
| 53 |
+
urls: A list of URLs to summarize.
|
| 54 |
+
query: The query to summarize.
|
| 55 |
+
Returns:
|
| 56 |
+
A list of summaries.
|
| 57 |
+
"""
|
| 58 |
+
|
| 59 |
+
logging.info(f"Summarizing {len(urls)} websites for query: {query}")
|
| 60 |
+
|
| 61 |
+
Settings.llm = OpenAI(model="gpt-4o-mini")
|
| 62 |
+
Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small")
|
| 63 |
+
|
| 64 |
+
summaries = []
|
| 65 |
+
|
| 66 |
+
async with aiohttp.ClientSession() as session:
|
| 67 |
+
documents = await fetch_and_process(urls, session)
|
| 68 |
+
for doc in documents:
|
| 69 |
+
index = VectorStoreIndex.from_documents([doc])
|
| 70 |
+
result = index.as_query_engine().query(f"Summarize in very meticulous detail. {query}")
|
| 71 |
+
summaries.append(f"Source: {doc.id_} \nContent: {result.response}")
|
| 72 |
+
|
| 73 |
+
return summaries
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def tavily_search(query: str, max_results: Optional[int] = 10) -> List[Dict]:
|
| 77 |
+
"""
|
| 78 |
+
Tavily search with result formatting.
|
| 79 |
+
Args:
|
| 80 |
+
query: The query to search for.
|
| 81 |
+
max_results: The maximum number of results to return.
|
| 82 |
+
Returns:
|
| 83 |
+
results: A list of dictionaries containing the results as URLS. Need to be used with the summarize_websites tool.
|
| 84 |
+
"""
|
| 85 |
+
logger.info(f"Called tavily_search for: {query}")
|
| 86 |
+
|
| 87 |
+
try:
|
| 88 |
+
search_engine = TavilyToolSpec(api_key=os.getenv("TAVILY_API_KEY"))
|
| 89 |
+
search_results = search_engine.search(query, max_results=max_results)
|
| 90 |
+
|
| 91 |
+
results = []
|
| 92 |
+
for document in search_results:
|
| 93 |
+
results.append({
|
| 94 |
+
"url": document.metadata.get("url", ""),
|
| 95 |
+
"content": f"Title: {document.metadata.get('title', '')}\nContent: {document.text}"
|
| 96 |
+
})
|
| 97 |
+
|
| 98 |
+
return results
|
| 99 |
+
except Exception as e:
|
| 100 |
+
logger.error(f"Tavily search failed: {str(e)}")
|
| 101 |
+
return [{"error": f"Search failed: {str(e)}"}]
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
def search_wikipedia(query: str, language: str = "en") -> str:
|
| 105 |
+
"""
|
| 106 |
+
Search Wikipedia for specific information. This is a more efficient way to search Wikipedia than the tavily_search tool. Need to be used with the summarize_websites tool.
|
| 107 |
+
Args:
|
| 108 |
+
query: The search query
|
| 109 |
+
language: Wikipedia language code (default: "en")
|
| 110 |
+
Returns:
|
| 111 |
+
str: Wikipedia content summary
|
| 112 |
+
"""
|
| 113 |
+
logger.info(f"Searching Wikipedia for: {query}")
|
| 114 |
+
|
| 115 |
+
try:
|
| 116 |
+
wikipedia_tool = WikipediaToolSpec()
|
| 117 |
+
search_results = wikipedia_tool.search_data(query, language=language)
|
| 118 |
+
return search_results
|
| 119 |
+
except Exception as e:
|
| 120 |
+
logger.error(f"Wikipedia search failed: {str(e)}")
|
| 121 |
+
return f"Wikipedia search failed: {str(e)}"
|
| 122 |
+
|
| 123 |
+
def transcribe_youtube_video(video_url: str) -> str:
|
| 124 |
+
"""Transcribe a YouTube video."""
|
| 125 |
+
# get the video url
|
| 126 |
+
reader = YoutubeTranscriptReader()
|
| 127 |
+
transcript = reader.load_data(video_url)
|
| 128 |
+
# return the transcript
|
| 129 |
+
return transcript
|
| 130 |
+
|
| 131 |
+
def get_web_tools():
|
| 132 |
+
"""Return all available tools for the agent."""
|
| 133 |
+
return [
|
| 134 |
+
FunctionTool.from_defaults(summarize_websites),
|
| 135 |
+
FunctionTool.from_defaults(tavily_search),
|
| 136 |
+
FunctionTool.from_defaults(search_wikipedia),
|
| 137 |
+
FunctionTool.from_defaults(transcribe_youtube_video),
|
| 138 |
+
]
|