Psiska commited on
Commit
24e87ef
·
1 Parent(s): 014235b

Make the models work in parallel

Browse files
Files changed (2) hide show
  1. __pycache__/crew.cpython-310.pyc +0 -0
  2. crew.py +41 -25
__pycache__/crew.cpython-310.pyc CHANGED
Binary files a/__pycache__/crew.cpython-310.pyc and b/__pycache__/crew.cpython-310.pyc differ
 
crew.py CHANGED
@@ -144,19 +144,16 @@ class GAIACrew():
144
  verbose=True
145
  )
146
 
 
147
 
148
- def run_crew(question, file_path):
149
  """
150
- Orchestrates the GAIA crew to answer a question, optionally with a file.
151
-
152
- Args:
153
- question (str): The user's question.
154
- file_path (str): Optional path to a data file to include in the prompt.
155
-
156
- Returns:
157
- str: The final answer from the manager agent.
158
  """
159
- # Build the final prompt, including file JSON if needed
160
  final_question = question
161
  if file_path:
162
  if is_ext(file_path, ".csv") or is_ext(file_path, ".xls") \
@@ -166,22 +163,41 @@ def run_crew(question, file_path):
166
  final_question = f"{question} JSON data:\n{json_data}."
167
  else:
168
  final_question = f"{question} File path: {file_path}."
169
-
170
- # Instantiate the crew and kick off the workflow
171
  crew_instance = GAIACrew()
172
- crew = crew_instance.get_crew()
173
- answer = crew.kickoff(inputs={"question": final_question})
174
-
175
- # Post-process through the final-answer model
176
- final_answer = get_final_answer(FINAL_ANSWER_MODEL, question, str(answer))
177
-
178
- # Debug logging
179
- print(f"=> Initial question: {question}")
180
- print(f"=> Final question: {final_question}")
181
- print(f"=> Initial answer: {answer}")
182
- print(f"=> Final answer: {final_answer}")
183
-
184
- return final_answer
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
185
 
186
 
187
  def get_final_answer(model, question, answer):
 
144
  verbose=True
145
  )
146
 
147
+ import concurrent.futures
148
 
149
+ def run_parallel_crew(question: str, file_path: str):
150
  """
151
+ 1) Prepares the prompt (including file data if any).
152
+ 2) Runs every non-manager agent in parallel on that prompt.
153
+ 3) Gathers their raw outputs.
154
+ 4) Sends a combined prompt to the manager_agent for the final answer.
 
 
 
 
155
  """
156
+ # 1) Build the final prompt
157
  final_question = question
158
  if file_path:
159
  if is_ext(file_path, ".csv") or is_ext(file_path, ".xls") \
 
163
  final_question = f"{question} JSON data:\n{json_data}."
164
  else:
165
  final_question = f"{question} File path: {file_path}."
166
+
167
+ # 2) Instantiate your crew and split manager vs workers
168
  crew_instance = GAIACrew()
169
+ all_agents = crew_instance.agents
170
+ workers = [a for a in all_agents if a.config.get("name") != "manager_agent"]
171
+ manager = next(a for a in all_agents if a.config.get("name") == "manager_agent")
172
+
173
+ # 3) Run workers in parallel
174
+ inputs = {"question": final_question}
175
+ results = {}
176
+ with concurrent.futures.ThreadPoolExecutor(max_workers=len(workers)) as pool:
177
+ futures = {
178
+ pool.submit(lambda ag: ag.kickoff(inputs), ag): ag.config["name"]
179
+ for ag in workers
180
+ }
181
+ for fut in concurrent.futures.as_completed(futures):
182
+ name = futures[fut]
183
+ try:
184
+ results[name] = fut.result()
185
+ except Exception as e:
186
+ results[name] = f"<error: {e}>"
187
+
188
+ # 4) Compose a manager prompt that includes all worker outputs
189
+ combined = "\n\n".join(f"--- {name} output ---\n{out}"
190
+ for name, out in results.items())
191
+ manager_prompt = (
192
+ f"You have received these reports from your coworkers:\n\n"
193
+ f"{combined}\n\n"
194
+ f"Now, based on the original question, provide the final answer.\n"
195
+ f"Original question: {question}"
196
+ )
197
+
198
+ # 5) Run the manager agent for the final answer
199
+ final = manager.kickoff(inputs={"question": manager_prompt})
200
+ return get_final_answer(FINAL_ANSWER_MODEL, question, str(final))
201
 
202
 
203
  def get_final_answer(model, question, answer):