Spaces:
Runtime error
Runtime error
fixup! Add file handling support in `hf_submission_api`, include file save logic, and static file directory setup
Browse files- .gitattributes +2 -0
- src/gaia_solving_agent/agent.py +107 -31
- src/gaia_solving_agent/hf_submission_api.py +42 -4
.gitattributes
CHANGED
|
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
*.mp3 filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
*.psd filter=lfs diff=lfs merge=lfs -text
|
src/gaia_solving_agent/agent.py
CHANGED
|
@@ -1,18 +1,24 @@
|
|
| 1 |
-
import re
|
| 2 |
from pathlib import Path
|
| 3 |
-
from typing import
|
| 4 |
|
| 5 |
from llama_index.core.agent.workflow import FunctionAgent, AgentWorkflow
|
| 6 |
from llama_index.core.prompts import RichPromptTemplate
|
| 7 |
from llama_index.llms.nebius import NebiusLLM
|
| 8 |
from llama_index.tools.requests import RequestsToolSpec
|
| 9 |
from llama_index.tools.wikipedia import WikipediaToolSpec
|
| 10 |
-
from workflows import Workflow, step
|
| 11 |
from workflows.events import StartEvent, Event, StopEvent
|
| 12 |
|
| 13 |
from gaia_solving_agent import NEBIUS_API_KEY
|
| 14 |
from gaia_solving_agent.prompts import PLANING_PROMPT, FORMAT_ANSWER
|
| 15 |
-
from gaia_solving_agent.tools import
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
|
| 17 |
# Choice of the model
|
| 18 |
cheap_model_name = "meta-llama/Meta-Llama-3.1-8B-Instruct"
|
|
@@ -31,12 +37,12 @@ def get_llm(model_name=cheap_model_name):
|
|
| 31 |
max_retries=5,
|
| 32 |
)
|
| 33 |
|
|
|
|
|
|
|
|
|
|
| 34 |
|
| 35 |
class QueryEvent(Event):
|
| 36 |
-
|
| 37 |
-
additional_file: Any | None
|
| 38 |
-
additional_file_path: str | Path | None = None
|
| 39 |
-
plan: str
|
| 40 |
|
| 41 |
class AnswerEvent(Event):
|
| 42 |
plan: str
|
|
@@ -45,44 +51,99 @@ class AnswerEvent(Event):
|
|
| 45 |
|
| 46 |
class GaiaWorkflow(Workflow):
|
| 47 |
@step
|
| 48 |
-
async def setup(self, ev: StartEvent) ->
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 49 |
llm = get_llm(reasoning_model_name)
|
| 50 |
prompt_template = RichPromptTemplate(PLANING_PROMPT)
|
| 51 |
-
file_extension = Path(
|
| 52 |
-
|
| 53 |
-
user_request=
|
| 54 |
additional_file_extension=file_extension,
|
| 55 |
-
))
|
| 56 |
-
return QueryEvent(
|
| 57 |
-
query=ev.user_msg,
|
| 58 |
-
additional_file=ev.additional_file,
|
| 59 |
-
additional_file_path=ev.additional_file_path,
|
| 60 |
-
plan=plan.text,
|
| 61 |
)
|
| 62 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 63 |
@step()
|
| 64 |
-
async def multi_agent_process(self, ev: QueryEvent) -> AnswerEvent:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 65 |
# Cheap trick to avoid Error 400 errors from OpenAPI
|
| 66 |
from llama_index.core.memory import ChatMemoryBuffer
|
| 67 |
memory = ChatMemoryBuffer.from_defaults(token_limit=100000)
|
| 68 |
|
| 69 |
agent_output = await gaia_solving_agent.run(
|
| 70 |
-
user_msg=
|
| 71 |
memory=memory,
|
| 72 |
-
additional_file=
|
| 73 |
-
additional_file_path=
|
| 74 |
)
|
| 75 |
-
return AnswerEvent(plan=
|
| 76 |
|
| 77 |
@step
|
| 78 |
-
async def parse_answer(self, ev: AnswerEvent) -> StopEvent:
|
| 79 |
llm = get_llm(balanced_model_name)
|
| 80 |
prompt_template = RichPromptTemplate(FORMAT_ANSWER)
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
return StopEvent(result=result)
|
| 86 |
|
| 87 |
|
| 88 |
tavily_search_engine = FunctionAgent(
|
|
@@ -100,9 +161,10 @@ tavily_search_engine = FunctionAgent(
|
|
| 100 |
description="Agent that makes web searches to answer questions."
|
| 101 |
)
|
| 102 |
|
| 103 |
-
|
| 104 |
tools=[
|
| 105 |
*RequestsToolSpec().to_tool_list(),
|
|
|
|
| 106 |
],
|
| 107 |
llm=get_llm(),
|
| 108 |
system_prompt="""
|
|
@@ -119,7 +181,7 @@ visit_website = FunctionAgent(
|
|
| 119 |
)
|
| 120 |
|
| 121 |
wikipedia_agent = FunctionAgent(
|
| 122 |
-
tools=[*WikipediaToolSpec()
|
| 123 |
llm=get_llm(),
|
| 124 |
system_prompt="""
|
| 125 |
You are a helpful assistant that searches Wikipedia and visit Wikipedia pages.
|
|
@@ -139,16 +201,30 @@ You are a helpful assistant that searches Wikipedia and visit Wikipedia pages.
|
|
| 139 |
# num_concurrent_runs=1,
|
| 140 |
# )
|
| 141 |
|
|
|
|
| 142 |
gaia_solving_agent = FunctionAgent(
|
| 143 |
tools = [
|
|
|
|
| 144 |
tavily_search_web,
|
| 145 |
*load_and_search_tools_from_toolspec(WikipediaToolSpec()),
|
| 146 |
*simple_web_page_reader_toolspec.to_tool_list(),
|
| 147 |
*RequestsToolSpec().to_tool_list(),
|
|
|
|
| 148 |
],
|
| 149 |
llm=get_llm(balanced_model_name),
|
| 150 |
system_prompt="""
|
| 151 |
You are a helpful assistant that uses tools to browse additional information and resources on the web to answer questions.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 152 |
""",
|
| 153 |
name="gaia_solving_agent",
|
| 154 |
description="Agent that browse additional information and resources on the web.",
|
|
|
|
|
|
|
| 1 |
from pathlib import Path
|
| 2 |
+
from typing import Literal
|
| 3 |
|
| 4 |
from llama_index.core.agent.workflow import FunctionAgent, AgentWorkflow
|
| 5 |
from llama_index.core.prompts import RichPromptTemplate
|
| 6 |
from llama_index.llms.nebius import NebiusLLM
|
| 7 |
from llama_index.tools.requests import RequestsToolSpec
|
| 8 |
from llama_index.tools.wikipedia import WikipediaToolSpec
|
| 9 |
+
from workflows import Workflow, step, Context
|
| 10 |
from workflows.events import StartEvent, Event, StopEvent
|
| 11 |
|
| 12 |
from gaia_solving_agent import NEBIUS_API_KEY
|
| 13 |
from gaia_solving_agent.prompts import PLANING_PROMPT, FORMAT_ANSWER
|
| 14 |
+
from gaia_solving_agent.tools import (
|
| 15 |
+
tavily_search_web,
|
| 16 |
+
load_and_search_tools_from_toolspec,
|
| 17 |
+
simple_web_page_reader_toolspec,
|
| 18 |
+
vllm_ask_image_tool,
|
| 19 |
+
youtube_transcript_reader_toolspec,
|
| 20 |
+
)
|
| 21 |
+
from gaia_solving_agent.utils import extract_pattern
|
| 22 |
|
| 23 |
# Choice of the model
|
| 24 |
cheap_model_name = "meta-llama/Meta-Llama-3.1-8B-Instruct"
|
|
|
|
| 37 |
max_retries=5,
|
| 38 |
)
|
| 39 |
|
| 40 |
+
class PlanEvent(Event):
|
| 41 |
+
to_do: Literal["Initialize", "Format", "Replan"] = "Initialize"
|
| 42 |
+
plan: str | None = None
|
| 43 |
|
| 44 |
class QueryEvent(Event):
|
| 45 |
+
pass
|
|
|
|
|
|
|
|
|
|
| 46 |
|
| 47 |
class AnswerEvent(Event):
|
| 48 |
plan: str
|
|
|
|
| 51 |
|
| 52 |
class GaiaWorkflow(Workflow):
|
| 53 |
@step
|
| 54 |
+
async def setup(self, ctx: Context, ev: StartEvent) -> PlanEvent:
|
| 55 |
+
await ctx.set("user_msg", ev.user_msg)
|
| 56 |
+
await ctx.set("additional_file", ev.additional_file)
|
| 57 |
+
await ctx.set("additional_file_path", ev.additional_file_path)
|
| 58 |
+
return PlanEvent()
|
| 59 |
+
|
| 60 |
+
@step
|
| 61 |
+
async def make_plan(self, ctx: Context, ev: PlanEvent) -> PlanEvent | QueryEvent:
|
| 62 |
+
additional_file_path = await ctx.get("additional_file_path")
|
| 63 |
+
user_msg = await ctx.get("user_msg")
|
| 64 |
+
|
| 65 |
llm = get_llm(reasoning_model_name)
|
| 66 |
prompt_template = RichPromptTemplate(PLANING_PROMPT)
|
| 67 |
+
file_extension = Path(additional_file_path).suffix if additional_file_path else ""
|
| 68 |
+
prompt = prompt_template.format(
|
| 69 |
+
user_request=user_msg,
|
| 70 |
additional_file_extension=file_extension,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 71 |
)
|
| 72 |
|
| 73 |
+
if ev.to_do == "Replan":
|
| 74 |
+
...
|
| 75 |
+
# TODO : Placeholder for future update
|
| 76 |
+
elif ev.to_do == "Format":
|
| 77 |
+
prompt = f"""
|
| 78 |
+
The original plan is not in the correct format.
|
| 79 |
+
|
| 80 |
+
There is the query and constraints you must respect :
|
| 81 |
+
{prompt}
|
| 82 |
+
|
| 83 |
+
There is the original plan you must reformat :
|
| 84 |
+
{ev.plan}
|
| 85 |
+
|
| 86 |
+
Stick strictly to the formatting constraints !
|
| 87 |
+
"""
|
| 88 |
+
|
| 89 |
+
plan = llm.complete(prompt)
|
| 90 |
+
await ctx.set("plan", plan.text)
|
| 91 |
+
|
| 92 |
+
question = extract_pattern(pattern=r"<Question> :\s*([\s\S]*?)\s*</Question>", text=plan.text)
|
| 93 |
+
known_facts = extract_pattern(pattern=r"<Known facts> :\s*([\s\S]*?)\s*</Known facts>", text=plan.text)
|
| 94 |
+
sub_tasks = extract_pattern(pattern=r"<Sub-tasks> :\s*([\s\S]*?)\s*<\/Sub-tasks>", text=plan.text)
|
| 95 |
+
if any(
|
| 96 |
+
extracted is None
|
| 97 |
+
for extracted in [question, known_facts, sub_tasks]
|
| 98 |
+
):
|
| 99 |
+
return PlanEvent(to_do="Format", plan=plan.text)
|
| 100 |
+
else:
|
| 101 |
+
await ctx.set("question", question if question is not None else "")
|
| 102 |
+
await ctx.set("known_facts", known_facts if known_facts is not None else "")
|
| 103 |
+
await ctx.set("sub_tasks", sub_tasks if sub_tasks is not None else "")
|
| 104 |
+
|
| 105 |
+
return QueryEvent()
|
| 106 |
+
|
| 107 |
@step()
|
| 108 |
+
async def multi_agent_process(self, ctx: Context, ev: QueryEvent) -> AnswerEvent:
|
| 109 |
+
plan = await ctx.get("plan")
|
| 110 |
+
additional_file = await ctx.get("additional_file")
|
| 111 |
+
additional_file_path = await ctx.get("additional_file_path")
|
| 112 |
+
|
| 113 |
+
question = await ctx.get("question")
|
| 114 |
+
known_facts = await ctx.get("known_facts")
|
| 115 |
+
sub_tasks = await ctx.get("sub_tasks")
|
| 116 |
+
prompt = f"""
|
| 117 |
+
The question is : {question}
|
| 118 |
+
|
| 119 |
+
The known facts are :
|
| 120 |
+
{known_facts}
|
| 121 |
+
|
| 122 |
+
The sub-tasks are :
|
| 123 |
+
{sub_tasks}
|
| 124 |
+
"""
|
| 125 |
+
|
| 126 |
# Cheap trick to avoid Error 400 errors from OpenAPI
|
| 127 |
from llama_index.core.memory import ChatMemoryBuffer
|
| 128 |
memory = ChatMemoryBuffer.from_defaults(token_limit=100000)
|
| 129 |
|
| 130 |
agent_output = await gaia_solving_agent.run(
|
| 131 |
+
user_msg=plan,
|
| 132 |
memory=memory,
|
| 133 |
+
additional_file=additional_file,
|
| 134 |
+
additional_file_path=additional_file_path,
|
| 135 |
)
|
| 136 |
+
return AnswerEvent(plan=plan, answer=str(agent_output))
|
| 137 |
|
| 138 |
@step
|
| 139 |
+
async def parse_answer(self, ctx: Context, ev: AnswerEvent) -> StopEvent:
|
| 140 |
llm = get_llm(balanced_model_name)
|
| 141 |
prompt_template = RichPromptTemplate(FORMAT_ANSWER)
|
| 142 |
+
question = await ctx.get("question")
|
| 143 |
+
prompt = prompt_template.format(question=question, answer=ev.answer)
|
| 144 |
+
result = llm.complete(prompt)
|
| 145 |
+
|
| 146 |
+
return StopEvent(result=result.text, reasoning=ev.plan)
|
| 147 |
|
| 148 |
|
| 149 |
tavily_search_engine = FunctionAgent(
|
|
|
|
| 161 |
description="Agent that makes web searches to answer questions."
|
| 162 |
)
|
| 163 |
|
| 164 |
+
visit_web_page_agent = FunctionAgent(
|
| 165 |
tools=[
|
| 166 |
*RequestsToolSpec().to_tool_list(),
|
| 167 |
+
*simple_web_page_reader_toolspec.to_tool_list(),
|
| 168 |
],
|
| 169 |
llm=get_llm(),
|
| 170 |
system_prompt="""
|
|
|
|
| 181 |
)
|
| 182 |
|
| 183 |
wikipedia_agent = FunctionAgent(
|
| 184 |
+
tools=[*load_and_search_tools_from_toolspec(WikipediaToolSpec())],
|
| 185 |
llm=get_llm(),
|
| 186 |
system_prompt="""
|
| 187 |
You are a helpful assistant that searches Wikipedia and visit Wikipedia pages.
|
|
|
|
| 201 |
# num_concurrent_runs=1,
|
| 202 |
# )
|
| 203 |
|
| 204 |
+
|
| 205 |
gaia_solving_agent = FunctionAgent(
|
| 206 |
tools = [
|
| 207 |
+
vllm_ask_image_tool,
|
| 208 |
tavily_search_web,
|
| 209 |
*load_and_search_tools_from_toolspec(WikipediaToolSpec()),
|
| 210 |
*simple_web_page_reader_toolspec.to_tool_list(),
|
| 211 |
*RequestsToolSpec().to_tool_list(),
|
| 212 |
+
*youtube_transcript_reader_toolspec.to_tool_list(),
|
| 213 |
],
|
| 214 |
llm=get_llm(balanced_model_name),
|
| 215 |
system_prompt="""
|
| 216 |
You are a helpful assistant that uses tools to browse additional information and resources on the web to answer questions.
|
| 217 |
+
|
| 218 |
+
Tools you have are of three types:
|
| 219 |
+
- External resources getter: get text, images, video, etc. from the internet
|
| 220 |
+
- Resource querier and transformer: query, summarize or transform a resource into a more digestible format.
|
| 221 |
+
- Analyse or compute : specialized tools to provide a specific analysis or computation.
|
| 222 |
+
|
| 223 |
+
Try to get resources before querying them.
|
| 224 |
+
If the analysis require a new external resource get it first.(e.g. a set of rules or a process)
|
| 225 |
+
|
| 226 |
+
You will be provided a question, some known facts summarizing the user provided context and some sub-tasks to complete.
|
| 227 |
+
You should follow the order of the sub-tasks.
|
| 228 |
""",
|
| 229 |
name="gaia_solving_agent",
|
| 230 |
description="Agent that browse additional information and resources on the web.",
|
src/gaia_solving_agent/hf_submission_api.py
CHANGED
|
@@ -1,5 +1,6 @@
|
|
| 1 |
import re
|
| 2 |
from functools import lru_cache
|
|
|
|
| 3 |
|
| 4 |
import pandas as pd
|
| 5 |
import requests
|
|
@@ -11,7 +12,7 @@ from gaia_solving_agent.agent import GaiaWorkflow
|
|
| 11 |
|
| 12 |
|
| 13 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
| 14 |
-
|
| 15 |
|
| 16 |
def instantiate_agent(space_id: str, **agent_kwargs):
|
| 17 |
try:
|
|
@@ -52,10 +53,41 @@ def fetching_questions(api_url: str = DEFAULT_API_URL):
|
|
| 52 |
return questions_data
|
| 53 |
|
| 54 |
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 58 |
|
|
|
|
| 59 |
# First, ensure that all complementary files are in the FILE_DIR
|
| 60 |
for item in questions_data:
|
| 61 |
if item.get("file_name"):
|
|
@@ -65,6 +97,12 @@ async def run_agent(agent, questions_data):
|
|
| 65 |
str(file_path): document
|
| 66 |
for file_path, document in zip(file_reader.list_resources(), file_reader.load_data())
|
| 67 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 68 |
|
| 69 |
print(f"Running agent on {len(questions_data)} questions...")
|
| 70 |
for item in questions_data:
|
|
|
|
| 1 |
import re
|
| 2 |
from functools import lru_cache
|
| 3 |
+
from pathlib import Path
|
| 4 |
|
| 5 |
import pandas as pd
|
| 6 |
import requests
|
|
|
|
| 12 |
|
| 13 |
|
| 14 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
| 15 |
+
FILE_DIR = Path(__file__).parent / "static" / "files"
|
| 16 |
|
| 17 |
def instantiate_agent(space_id: str, **agent_kwargs):
|
| 18 |
try:
|
|
|
|
| 53 |
return questions_data
|
| 54 |
|
| 55 |
|
| 56 |
+
def get_or_save_file_associated_with_task(file_name: str, api_url: str = DEFAULT_API_URL) -> Path | None:
|
| 57 |
+
file_path = FILE_DIR / file_name
|
| 58 |
+
task_id = file_path.stem
|
| 59 |
+
|
| 60 |
+
if file_path.exists() and file_path.is_file():
|
| 61 |
+
print(f"File already exists: {file_path}")
|
| 62 |
+
return file_path
|
| 63 |
+
|
| 64 |
+
file_url = f"{api_url}/files/{task_id}"
|
| 65 |
+
|
| 66 |
+
print(f"Getting file associated to task: {task_id}")
|
| 67 |
+
try:
|
| 68 |
+
response = requests.get(file_url, timeout=15)
|
| 69 |
+
response.raise_for_status()
|
| 70 |
+
file_data = response.content
|
| 71 |
+
if not file_data:
|
| 72 |
+
print("Fetched file is empty.")
|
| 73 |
+
print(f"Fetched the file.")
|
| 74 |
+
except requests.exceptions.RequestException as e:
|
| 75 |
+
print(f"Error fetching file: {e}")
|
| 76 |
+
except requests.exceptions.JSONDecodeError as e:
|
| 77 |
+
print(f"Error decoding JSON response from file endpoint: {e}")
|
| 78 |
+
print(f"Response text: {response.text[:500]}")
|
| 79 |
+
except Exception as e:
|
| 80 |
+
print(f"An unexpected error occurred fetching file: {e}")
|
| 81 |
+
|
| 82 |
+
try: # Save the file
|
| 83 |
+
with open(file_path, 'wb') as f:
|
| 84 |
+
f.write(response.content)
|
| 85 |
+
return file_path
|
| 86 |
+
except Exception as e:
|
| 87 |
+
print(f"Error saving file: {e}")
|
| 88 |
+
return None
|
| 89 |
|
| 90 |
+
def ensure_files_are_loaded(questions_data):
|
| 91 |
# First, ensure that all complementary files are in the FILE_DIR
|
| 92 |
for item in questions_data:
|
| 93 |
if item.get("file_name"):
|
|
|
|
| 97 |
str(file_path): document
|
| 98 |
for file_path, document in zip(file_reader.list_resources(), file_reader.load_data())
|
| 99 |
}
|
| 100 |
+
return additional_files
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
async def run_agent(agent, questions_data, additional_files):
|
| 104 |
+
results_log = []
|
| 105 |
+
answers_payload = []
|
| 106 |
|
| 107 |
print(f"Running agent on {len(questions_data)} questions...")
|
| 108 |
for item in questions_data:
|