Spaces:
Paused
Paused
Testing
Browse files- __pycache__/crew.cpython-310.pyc +0 -0
- app.py +3 -1
- crew.py +37 -1
__pycache__/crew.cpython-310.pyc
CHANGED
|
Binary files a/__pycache__/crew.cpython-310.pyc and b/__pycache__/crew.cpython-310.pyc differ
|
|
|
app.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
| 1 |
import os, threading
|
| 2 |
import gradio as gr
|
| 3 |
from crew import run_parallel_crew
|
|
|
|
| 4 |
from utils import get_questions
|
| 5 |
|
| 6 |
|
|
@@ -43,7 +44,8 @@ def ask(question, openai_api_key, gemini_api_key, anthropic_api_key, file_name =
|
|
| 43 |
os.environ["GEMINI_API_KEY"] = gemini_api_key
|
| 44 |
os.environ["MODEL_API_KEY"] = anthropic_api_key
|
| 45 |
|
| 46 |
-
answer = run_parallel_crew(question, file_name)
|
|
|
|
| 47 |
except Exception as e:
|
| 48 |
raise gr.Error(e)
|
| 49 |
finally:
|
|
|
|
| 1 |
import os, threading
|
| 2 |
import gradio as gr
|
| 3 |
from crew import run_parallel_crew
|
| 4 |
+
from crew import run_crew
|
| 5 |
from utils import get_questions
|
| 6 |
|
| 7 |
|
|
|
|
| 44 |
os.environ["GEMINI_API_KEY"] = gemini_api_key
|
| 45 |
os.environ["MODEL_API_KEY"] = anthropic_api_key
|
| 46 |
|
| 47 |
+
#answer = run_parallel_crew(question, file_name)
|
| 48 |
+
answer = run_crew(question, file_name)
|
| 49 |
except Exception as e:
|
| 50 |
raise gr.Error(e)
|
| 51 |
finally:
|
crew.py
CHANGED
|
@@ -144,7 +144,43 @@ class GAIACrew():
|
|
| 144 |
verbose=True
|
| 145 |
)
|
| 146 |
|
| 147 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 148 |
|
| 149 |
import concurrent.futures
|
| 150 |
|
|
|
|
| 144 |
verbose=True
|
| 145 |
)
|
| 146 |
|
| 147 |
+
def run_crew(question, file_path):
|
| 148 |
+
"""
|
| 149 |
+
Orchestrates the GAIA crew to answer a question, optionally with a file.
|
| 150 |
+
|
| 151 |
+
Args:
|
| 152 |
+
question (str): The user's question.
|
| 153 |
+
file_path (str): Optional path to a data file to include in the prompt.
|
| 154 |
+
|
| 155 |
+
Returns:
|
| 156 |
+
str: The final answer from the manager agent.
|
| 157 |
+
"""
|
| 158 |
+
# Build the final prompt, including file JSON if needed
|
| 159 |
+
final_question = question
|
| 160 |
+
if file_path:
|
| 161 |
+
if is_ext(file_path, ".csv") or is_ext(file_path, ".xls") \
|
| 162 |
+
or is_ext(file_path, ".xlsx") or is_ext(file_path, ".json") \
|
| 163 |
+
or is_ext(file_path, ".jsonl"):
|
| 164 |
+
json_data = read_file_json(file_path)
|
| 165 |
+
final_question = f"{question} JSON data:\n{json_data}."
|
| 166 |
+
else:
|
| 167 |
+
final_question = f"{question} File path: {file_path}."
|
| 168 |
+
|
| 169 |
+
# Instantiate the crew and kick off the workflow
|
| 170 |
+
crew_instance = GAIACrew()
|
| 171 |
+
crew = crew_instance.get_crew()
|
| 172 |
+
answer = crew.kickoff(inputs={"question": final_question})
|
| 173 |
+
|
| 174 |
+
# Post-process through the final-answer model
|
| 175 |
+
final_answer = get_final_answer(FINAL_ANSWER_MODEL, question, str(answer))
|
| 176 |
+
|
| 177 |
+
# Debug logging
|
| 178 |
+
print(f"=> Initial question: {question}")
|
| 179 |
+
print(f"=> Final question: {final_question}")
|
| 180 |
+
print(f"=> Initial answer: {answer}")
|
| 181 |
+
print(f"=> Final answer: {final_answer}")
|
| 182 |
+
|
| 183 |
+
return final_answer
|
| 184 |
|
| 185 |
import concurrent.futures
|
| 186 |
|