Update app.py
Browse files
app.py
CHANGED
|
@@ -18,7 +18,7 @@ GROQ_KEY = os.environ['GROQ_KEY']
|
|
| 18 |
class LLaMaAgent:
|
| 19 |
def __init__(self):
|
| 20 |
self.model = model = LiteLLMModel(
|
| 21 |
-
"
|
| 22 |
api_base="https://api.groq.com/openai/v1",
|
| 23 |
api_key=GROQ_KEY,
|
| 24 |
)
|
|
@@ -37,58 +37,27 @@ class LLaMaAgent:
|
|
| 37 |
print("First LLaMa Error!!!")
|
| 38 |
raise
|
| 39 |
|
| 40 |
-
class
|
| 41 |
def __init__(self):
|
| 42 |
-
self.
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
"content": question
|
| 49 |
-
}]
|
| 50 |
-
completion = self.client.chat.completions.create(
|
| 51 |
-
messages=message,
|
| 52 |
-
model="compound-beta",
|
| 53 |
-
)
|
| 54 |
-
answer = completion.choices[0].message.content
|
| 55 |
-
message=[
|
| 56 |
-
{
|
| 57 |
-
"role": "system",
|
| 58 |
-
"content": """
|
| 59 |
-
You are an expert in identifying and extracting definitive answers. Your sole task is to analyze the provided text, which is an agent's response, and extract only the conclusive final answer to the original user's query.
|
| 60 |
-
|
| 61 |
-
Output only this core answer. Do not include any explanations, pleasantries, introductory phrases, or any surrounding text.
|
| 62 |
-
|
| 63 |
-
Here The Examples:
|
| 64 |
-
|
| 65 |
-
Input: ... Final answer: 12 ...
|
| 66 |
-
You should output: 12
|
| 67 |
-
|
| 68 |
-
Input: $\\boxed{b,c,e}$
|
| 69 |
-
Output: b, c, e
|
| 70 |
-
|
| 71 |
-
Input: Jan
|
| 72 |
-
Output: Jan
|
| 73 |
-
|
| 74 |
-
Input: The Yankee with the most walks in the 1977 regular season was Reggie Jackson, with 58 walks. He had 357 at bats that season.
|
| 75 |
-
Output: 357
|
| 76 |
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
},
|
| 81 |
-
{
|
| 82 |
-
"role": "user",
|
| 83 |
-
"content": answer
|
| 84 |
-
}
|
| 85 |
-
]
|
| 86 |
-
completion = self.client.chat.completions.create(
|
| 87 |
-
messages=message,
|
| 88 |
-
model="gemma2-9b-it",
|
| 89 |
)
|
| 90 |
-
|
| 91 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 92 |
|
| 93 |
def run_and_submit_all( profile: gr.OAuthProfile | None):
|
| 94 |
"""
|
|
@@ -112,7 +81,7 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
| 112 |
# 1. Instantiate Agent ( modify this part to create your agent)
|
| 113 |
try:
|
| 114 |
llama = LLaMaAgent()
|
| 115 |
-
|
| 116 |
except Exception as e:
|
| 117 |
print(f"Error instantiating agent: {e}")
|
| 118 |
return f"Error initializing agent: {e}", None
|
|
@@ -156,7 +125,7 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
| 156 |
submitted_answer = llama(question_text)
|
| 157 |
except Exception as ke:
|
| 158 |
print("Second LLaMa Error!")
|
| 159 |
-
submitted_answer =
|
| 160 |
print(f"\n\n### Answer{submitted_answer} ###\n\n")
|
| 161 |
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
|
| 162 |
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
|
|
|
|
| 18 |
class LLaMaAgent:
|
| 19 |
def __init__(self):
|
| 20 |
self.model = model = LiteLLMModel(
|
| 21 |
+
"llama-3.3-70b-versatile",
|
| 22 |
api_base="https://api.groq.com/openai/v1",
|
| 23 |
api_key=GROQ_KEY,
|
| 24 |
)
|
|
|
|
| 37 |
print("First LLaMa Error!!!")
|
| 38 |
raise
|
| 39 |
|
| 40 |
+
class LLaMaAgent2:
|
| 41 |
def __init__(self):
|
| 42 |
+
self.model = model = LiteLLMModel(
|
| 43 |
+
"meta-llama/llama-4-scout-17b-16e-instruct",
|
| 44 |
+
api_base="https://api.groq.com/openai/v1",
|
| 45 |
+
api_key=GROQ_KEY,
|
| 46 |
+
)
|
| 47 |
+
self.model.flatten_messages_as_text = True
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 48 |
|
| 49 |
+
self.agent = CodeAgent(
|
| 50 |
+
tools=[DuckDuckGoSearchTool(), FinalAnswerTool(), VisitWebpageTool(), PythonInterpreterTool()],
|
| 51 |
+
model=model,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 52 |
)
|
| 53 |
+
|
| 54 |
+
def __call__(self, question: str) -> str:
|
| 55 |
+
try:
|
| 56 |
+
response = self.agent.run(question)
|
| 57 |
+
return response
|
| 58 |
+
except Exception as e:
|
| 59 |
+
print("Third LLaMa Error!!!")
|
| 60 |
+
raise
|
| 61 |
|
| 62 |
def run_and_submit_all( profile: gr.OAuthProfile | None):
|
| 63 |
"""
|
|
|
|
| 81 |
# 1. Instantiate Agent ( modify this part to create your agent)
|
| 82 |
try:
|
| 83 |
llama = LLaMaAgent()
|
| 84 |
+
llama2 = LLaMaAgent2()
|
| 85 |
except Exception as e:
|
| 86 |
print(f"Error instantiating agent: {e}")
|
| 87 |
return f"Error initializing agent: {e}", None
|
|
|
|
| 125 |
submitted_answer = llama(question_text)
|
| 126 |
except Exception as ke:
|
| 127 |
print("Second LLaMa Error!")
|
| 128 |
+
submitted_answer = llama2(question_text)
|
| 129 |
print(f"\n\n### Answer{submitted_answer} ###\n\n")
|
| 130 |
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
|
| 131 |
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
|