Spaces:
Sleeping
Sleeping
George Sergia
commited on
Commit
·
99fc878
1
Parent(s):
de17a0a
Add more tools to agent
Browse files- agent.py +14 -14
- app.py +6 -5
- requirements.txt +3 -0
agent.py
CHANGED
|
@@ -1,20 +1,20 @@
|
|
| 1 |
from llama_index.core.agent.workflow import AgentWorkflow
|
| 2 |
-
from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
|
| 3 |
from llama_index.tools.duckduckgo import DuckDuckGoSearchToolSpec
|
| 4 |
-
from llama_index.
|
| 5 |
-
from llama_index.
|
| 6 |
-
import
|
| 7 |
-
import yaml
|
| 8 |
|
| 9 |
-
async def main(query: str,
|
| 10 |
-
hugging_face_llm = HuggingFaceInferenceAPI(model_name="Qwen/Qwen2.5-Coder-32B-Instruct", token=
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
|
|
|
|
|
|
| 14 |
|
| 15 |
agent = AgentWorkflow.from_tools_or_functions(
|
| 16 |
-
|
| 17 |
-
llm=
|
| 18 |
system_prompt="""
|
| 19 |
You are a general AI assistant. I will ask you a question.
|
| 20 |
Report your thoughts, and finish your answer with the following template: FINAL ANSWER: [YOUR FINAL ANSWER].
|
|
@@ -25,6 +25,6 @@ async def main(query: str, hf_token: str) -> str:
|
|
| 25 |
"""
|
| 26 |
)
|
| 27 |
|
| 28 |
-
|
| 29 |
-
|
| 30 |
return response
|
|
|
|
| 1 |
from llama_index.core.agent.workflow import AgentWorkflow
|
|
|
|
| 2 |
from llama_index.tools.duckduckgo import DuckDuckGoSearchToolSpec
|
| 3 |
+
from llama_index.tools.wikipedia import WikipediaToolSpec
|
| 4 |
+
from llama_index.tools.code_interpreter import CodeInterpreterToolSpec
|
| 5 |
+
from llama_index.llms.gemini import Gemini
|
|
|
|
| 6 |
|
| 7 |
+
async def main(query: str, api_key: str) -> str:
|
| 8 |
+
#hugging_face_llm = HuggingFaceInferenceAPI(model_name="Qwen/Qwen2.5-Coder-32B-Instruct", token=api_key)
|
| 9 |
+
gemini_llm = Gemini(model="models/gemini-2.0-flash-lite", token=api_key)
|
| 10 |
+
|
| 11 |
+
tools = DuckDuckGoSearchToolSpec().to_tool_list()
|
| 12 |
+
tools += WikipediaToolSpec().to_tool_list()
|
| 13 |
+
tools += CodeInterpreterToolSpec().to_tool_list()
|
| 14 |
|
| 15 |
agent = AgentWorkflow.from_tools_or_functions(
|
| 16 |
+
tools_or_functions=tools,
|
| 17 |
+
llm=gemini_llm,
|
| 18 |
system_prompt="""
|
| 19 |
You are a general AI assistant. I will ask you a question.
|
| 20 |
Report your thoughts, and finish your answer with the following template: FINAL ANSWER: [YOUR FINAL ANSWER].
|
|
|
|
| 25 |
"""
|
| 26 |
)
|
| 27 |
|
| 28 |
+
response = await agent.run(query)
|
| 29 |
+
|
| 30 |
return response
|
app.py
CHANGED
|
@@ -19,11 +19,10 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
| 19 |
Fetches all questions, runs the BasicAgent on them, submits all answers,
|
| 20 |
and displays the results.
|
| 21 |
"""
|
| 22 |
-
for name, value in os.environ.items():
|
| 23 |
-
print("{0}: {1}".format(name, name))
|
| 24 |
|
| 25 |
load_dotenv()
|
| 26 |
-
HF_TOKEN = os.getenv("HF_TOKEN")
|
|
|
|
| 27 |
|
| 28 |
# --- Determine HF Space Runtime URL and Repo URL ---
|
| 29 |
space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
|
|
@@ -81,7 +80,8 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
| 81 |
print(f"Skipping item with missing task_id or question: {item}")
|
| 82 |
continue
|
| 83 |
try:
|
| 84 |
-
|
|
|
|
| 85 |
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
|
| 86 |
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
|
| 87 |
except Exception as e:
|
|
@@ -92,12 +92,13 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
| 92 |
print("Agent did not produce any answers to submit.")
|
| 93 |
return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
|
| 94 |
|
| 95 |
-
return
|
| 96 |
# 4. Prepare Submission
|
| 97 |
submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
|
| 98 |
status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
|
| 99 |
print(status_update)
|
| 100 |
|
|
|
|
|
|
|
| 101 |
# 5. Submit
|
| 102 |
print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
|
| 103 |
try:
|
|
|
|
| 19 |
Fetches all questions, runs the BasicAgent on them, submits all answers,
|
| 20 |
and displays the results.
|
| 21 |
"""
|
|
|
|
|
|
|
| 22 |
|
| 23 |
load_dotenv()
|
| 24 |
+
#HF_TOKEN = os.getenv("HF_TOKEN")
|
| 25 |
+
API_KEY = os.getenv("GEMINI_API_KEY")
|
| 26 |
|
| 27 |
# --- Determine HF Space Runtime URL and Repo URL ---
|
| 28 |
space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
|
|
|
|
| 80 |
print(f"Skipping item with missing task_id or question: {item}")
|
| 81 |
continue
|
| 82 |
try:
|
| 83 |
+
agent_output = asyncio.run(agent.main(question_text, API_KEY))
|
| 84 |
+
submitted_answer = str(agent_output)
|
| 85 |
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
|
| 86 |
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
|
| 87 |
except Exception as e:
|
|
|
|
| 92 |
print("Agent did not produce any answers to submit.")
|
| 93 |
return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
|
| 94 |
|
|
|
|
| 95 |
# 4. Prepare Submission
|
| 96 |
submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
|
| 97 |
status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
|
| 98 |
print(status_update)
|
| 99 |
|
| 100 |
+
return "Agent finished. Submitting answers...", pd.DataFrame(results_log)
|
| 101 |
+
|
| 102 |
# 5. Submit
|
| 103 |
print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
|
| 104 |
try:
|
requirements.txt
CHANGED
|
@@ -6,4 +6,7 @@ dotenv
|
|
| 6 |
llama-index
|
| 7 |
llama_index.llms.huggingface_api
|
| 8 |
llama_index.tools.duckduckgo
|
|
|
|
|
|
|
|
|
|
| 9 |
asyncio
|
|
|
|
| 6 |
llama-index
|
| 7 |
llama_index.llms.huggingface_api
|
| 8 |
llama_index.tools.duckduckgo
|
| 9 |
+
llama_index.tools.wikipedia
|
| 10 |
+
llama_index.tools.code_interpreter
|
| 11 |
+
llama-index-llms-gemini
|
| 12 |
asyncio
|