lwant commited on
Commit
9c4e981
Β·
1 Parent(s): 869f110

Update `requirements.txt` to include new dependencies for enhanced functionality and compatibility

Browse files
requirements.txt CHANGED
Binary files a/requirements.txt and b/requirements.txt differ
 
src/gaia_solving_agent/agent.py CHANGED
@@ -52,15 +52,15 @@ class AnswerEvent(Event):
52
  class GaiaWorkflow(Workflow):
53
  @step
54
  async def setup(self, ctx: Context, ev: StartEvent) -> PlanEvent:
55
- await ctx.set("user_msg", ev.user_msg)
56
- await ctx.set("additional_file", ev.additional_file)
57
- await ctx.set("additional_file_path", ev.additional_file_path)
58
  return PlanEvent()
59
 
60
  @step
61
  async def make_plan(self, ctx: Context, ev: PlanEvent) -> PlanEvent | QueryEvent:
62
- additional_file_path = await ctx.get("additional_file_path")
63
- user_msg = await ctx.get("user_msg")
64
 
65
  llm = get_llm(reasoning_model_name)
66
  prompt_template = RichPromptTemplate(PLANING_PROMPT)
@@ -87,7 +87,7 @@ Stick strictly to the formatting constraints !
87
  """
88
 
89
  plan = llm.complete(prompt)
90
- await ctx.set("plan", plan.text)
91
 
92
  question = extract_pattern(pattern=r"<Question> :\s*([\s\S]*?)\s*</Question>", text=plan.text)
93
  known_facts = extract_pattern(pattern=r"<Known facts> :\s*([\s\S]*?)\s*</Known facts>", text=plan.text)
@@ -98,20 +98,20 @@ Stick strictly to the formatting constraints !
98
  ):
99
  return PlanEvent(to_do="Format", plan=plan.text)
100
  else:
101
- await ctx.set("question", question if question is not None else "")
102
- await ctx.set("known_facts", known_facts if known_facts is not None else "")
103
- await ctx.set("sub_tasks", sub_tasks if sub_tasks is not None else "")
104
 
105
  return QueryEvent()
106
 
107
  @step()
108
  async def multi_agent_process(self, ctx: Context, ev: QueryEvent) -> AnswerEvent:
109
- plan = await ctx.get("plan")
110
- additional_file = await ctx.get("additional_file")
111
 
112
- question = await ctx.get("question")
113
- known_facts = await ctx.get("known_facts")
114
- sub_tasks = await ctx.get("sub_tasks")
115
  prompt = f"""
116
  The question is : {question}
117
 
@@ -127,7 +127,7 @@ The sub-tasks are :
127
  memory = ChatMemoryBuffer.from_defaults(token_limit=100000)
128
 
129
  agent_ctx = Context(gaia_solving_agent)
130
- await agent_ctx.set("additional_file", additional_file)
131
  agent_output = await gaia_solving_agent.run(
132
  user_msg=prompt,
133
  memory=memory,
@@ -139,7 +139,7 @@ The sub-tasks are :
139
  async def parse_answer(self, ctx: Context, ev: AnswerEvent) -> StopEvent:
140
  llm = get_llm(balanced_model_name)
141
  prompt_template = RichPromptTemplate(FORMAT_ANSWER)
142
- question = await ctx.get("question")
143
  prompt = prompt_template.format(question=question, answer=ev.answer)
144
  result = llm.complete(prompt)
145
 
 
52
  class GaiaWorkflow(Workflow):
53
  @step
54
  async def setup(self, ctx: Context, ev: StartEvent) -> PlanEvent:
55
+ await ctx.store.set("user_msg", ev.user_msg)
56
+ await ctx.store.set("additional_file", ev.additional_file)
57
+ await ctx.store.set("additional_file_path", ev.additional_file_path)
58
  return PlanEvent()
59
 
60
  @step
61
  async def make_plan(self, ctx: Context, ev: PlanEvent) -> PlanEvent | QueryEvent:
62
+ additional_file_path = await ctx.store.get("additional_file_path")
63
+ user_msg = await ctx.store.get("user_msg")
64
 
65
  llm = get_llm(reasoning_model_name)
66
  prompt_template = RichPromptTemplate(PLANING_PROMPT)
 
87
  """
88
 
89
  plan = llm.complete(prompt)
90
+ await ctx.store.set("plan", plan.text)
91
 
92
  question = extract_pattern(pattern=r"<Question> :\s*([\s\S]*?)\s*</Question>", text=plan.text)
93
  known_facts = extract_pattern(pattern=r"<Known facts> :\s*([\s\S]*?)\s*</Known facts>", text=plan.text)
 
98
  ):
99
  return PlanEvent(to_do="Format", plan=plan.text)
100
  else:
101
+ await ctx.store.set("question", question if question is not None else "")
102
+ await ctx.store.set("known_facts", known_facts if known_facts is not None else "")
103
+ await ctx.store.set("sub_tasks", sub_tasks if sub_tasks is not None else "")
104
 
105
  return QueryEvent()
106
 
107
  @step()
108
  async def multi_agent_process(self, ctx: Context, ev: QueryEvent) -> AnswerEvent:
109
+ plan = await ctx.store.get("plan")
110
+ additional_file = await ctx.store.get("additional_file")
111
 
112
+ question = await ctx.store.get("question")
113
+ known_facts = await ctx.store.get("known_facts")
114
+ sub_tasks = await ctx.store.get("sub_tasks")
115
  prompt = f"""
116
  The question is : {question}
117
 
 
127
  memory = ChatMemoryBuffer.from_defaults(token_limit=100000)
128
 
129
  agent_ctx = Context(gaia_solving_agent)
130
+ await agent_ctx.store.set("additional_file", additional_file)
131
  agent_output = await gaia_solving_agent.run(
132
  user_msg=prompt,
133
  memory=memory,
 
139
  async def parse_answer(self, ctx: Context, ev: AnswerEvent) -> StopEvent:
140
  llm = get_llm(balanced_model_name)
141
  prompt_template = RichPromptTemplate(FORMAT_ANSWER)
142
+ question = await ctx.store.get("question")
143
  prompt = prompt_template.format(question=question, answer=ev.answer)
144
  result = llm.complete(prompt)
145
 
src/gaia_solving_agent/tools.py CHANGED
@@ -45,7 +45,7 @@ async def vllm_ask_image_tool(ctx: Context, query: str) -> str:
45
  The result or response to the provided query based on the processed
46
  image content.
47
  """
48
- images = await ctx.get("additional_file")
49
  return await vllm_ask_image(query=query, images=images)
50
 
51
 
 
45
  The result or response to the provided query based on the processed
46
  image content.
47
  """
48
+ images = await ctx.store.get("additional_file")
49
  return await vllm_ask_image(query=query, images=images)
50
 
51