Update app.py
Browse files
app.py
CHANGED
|
@@ -17,35 +17,120 @@ GROQ_KEY = os.environ['GROQ_KEY']
|
|
| 17 |
class BasicAgent:
|
| 18 |
def __init__(self):
|
| 19 |
print("BasicAgent initialized.")
|
|
|
|
| 20 |
|
| 21 |
def __call__(self, question: str) -> str:
|
| 22 |
print(f"Agent received question (first 50 chars): {question[:50]}...")
|
| 23 |
try:
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
)
|
| 29 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 30 |
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
model = LiteLLMModel(
|
| 38 |
-
"meta-llama/llama-4-scout-17b-16e-instruct",
|
| 39 |
-
api_base="https://api.groq.com/openai/v1",
|
| 40 |
-
api_key=GROQ_KEY,
|
| 41 |
-
)
|
| 42 |
-
model.flatten_messages_as_text = True
|
| 43 |
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 47 |
)
|
| 48 |
-
response =
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 49 |
|
| 50 |
return response
|
| 51 |
|
|
|
|
| 17 |
class BasicAgent:
|
| 18 |
def __init__(self):
|
| 19 |
print("BasicAgent initialized.")
|
| 20 |
+
self.client = Groq(api_key=GROQ_KEY)
|
| 21 |
|
| 22 |
def __call__(self, question: str) -> str:
|
| 23 |
print(f"Agent received question (first 50 chars): {question[:50]}...")
|
| 24 |
try:
|
| 25 |
+
message = [
|
| 26 |
+
{
|
| 27 |
+
"role": "user",
|
| 28 |
+
"content": question
|
| 29 |
+
}]
|
| 30 |
+
completion = self.client.chat.completions.create(
|
| 31 |
+
messages=message,
|
| 32 |
+
model="compound-beta",
|
| 33 |
)
|
| 34 |
+
answer = completion.choices[0].message.content
|
| 35 |
+
print(f"First answer: {answer}")
|
| 36 |
+
message=[
|
| 37 |
+
{
|
| 38 |
+
"role": "system",
|
| 39 |
+
"content": """
|
| 40 |
+
You are an expert in identifying and extracting definitive answers. Your sole task is to analyze the provided text, which is an agent's response, and extract only the conclusive final answer to the original user's query.
|
| 41 |
|
| 42 |
+
Output only this core answer. Do not include any explanations, pleasantries, introductory phrases, or any surrounding text.
|
| 43 |
+
|
| 44 |
+
Here The Examples:
|
| 45 |
+
|
| 46 |
+
Input: ... Final answer: 12 ...
|
| 47 |
+
You should output: 12
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 48 |
|
| 49 |
+
Input: $\\boxed{b,c,e}$
|
| 50 |
+
Output: b, c, e
|
| 51 |
+
|
| 52 |
+
Input: Jan
|
| 53 |
+
Output: Jan
|
| 54 |
+
|
| 55 |
+
Input: The Yankee with the most walks in the 1977 regular season was Reggie Jackson, with 58 walks. He had 357 at bats that season.
|
| 56 |
+
Output: 357
|
| 57 |
+
|
| 58 |
+
Input: broccoli, bell pepper, celery, fresh basil, green beans, lettuce, sweet potatoes, zucchini
|
| 59 |
+
Output: broccoli, bell pepper, celery, fresh basil, green beans, lettuce, sweet potatoes, zucchini
|
| 60 |
+
"""
|
| 61 |
+
},
|
| 62 |
+
{
|
| 63 |
+
"role": "user",
|
| 64 |
+
"content": answer
|
| 65 |
+
}
|
| 66 |
+
]
|
| 67 |
+
completion = self.client.chat.completions.create(
|
| 68 |
+
messages=message,
|
| 69 |
+
model="llama-3.1-8b-instant",
|
| 70 |
)
|
| 71 |
+
response = completion.choices[0].message.content
|
| 72 |
+
except Exception:
|
| 73 |
+
try:
|
| 74 |
+
model = LiteLLMModel(
|
| 75 |
+
"llama-3.3-70b-versatile",
|
| 76 |
+
api_base="https://api.groq.com/openai/v1",
|
| 77 |
+
api_key=GROQ_KEY,
|
| 78 |
+
)
|
| 79 |
+
model.flatten_messages_as_text = True
|
| 80 |
+
|
| 81 |
+
agent = CodeAgent(
|
| 82 |
+
tools=[DuckDuckGoSearchTool(), FinalAnswerTool(), VisitWebpageTool(), PythonInterpreterTool()],
|
| 83 |
+
model=model,
|
| 84 |
+
)
|
| 85 |
+
response = agent.run(question)
|
| 86 |
+
except Exception:
|
| 87 |
+
message = [
|
| 88 |
+
{
|
| 89 |
+
"role": "user",
|
| 90 |
+
"content": question
|
| 91 |
+
}]
|
| 92 |
+
completion = self.client.chat.completions.create(
|
| 93 |
+
messages=message,
|
| 94 |
+
model="compound-beta-mini",
|
| 95 |
+
)
|
| 96 |
+
answer = completion.choices[0].message.content
|
| 97 |
+
print(f"First answer: {answer}")
|
| 98 |
+
message=[
|
| 99 |
+
{
|
| 100 |
+
"role": "system",
|
| 101 |
+
"content": """
|
| 102 |
+
You are an expert in identifying and extracting definitive answers. Your sole task is to analyze the provided text, which is an agent's response, and extract only the conclusive final answer to the original user's query.
|
| 103 |
+
|
| 104 |
+
Output only this core answer. Do not include any explanations, pleasantries, introductory phrases, or any surrounding text.
|
| 105 |
+
|
| 106 |
+
Here The Examples:
|
| 107 |
+
|
| 108 |
+
Input: ... Final answer: 12 ...
|
| 109 |
+
You should output: 12
|
| 110 |
+
|
| 111 |
+
Input: $\\boxed{b,c,e}$
|
| 112 |
+
Output: b, c, e
|
| 113 |
+
|
| 114 |
+
Input: Jan
|
| 115 |
+
Output: Jan
|
| 116 |
+
|
| 117 |
+
Input: The Yankee with the most walks in the 1977 regular season was Reggie Jackson, with 58 walks. He had 357 at bats that season.
|
| 118 |
+
Output: 357
|
| 119 |
+
|
| 120 |
+
Input: broccoli, bell pepper, celery, fresh basil, green beans, lettuce, sweet potatoes, zucchini
|
| 121 |
+
Output: broccoli, bell pepper, celery, fresh basil, green beans, lettuce, sweet potatoes, zucchini
|
| 122 |
+
"""
|
| 123 |
+
},
|
| 124 |
+
{
|
| 125 |
+
"role": "user",
|
| 126 |
+
"content": answer
|
| 127 |
+
}
|
| 128 |
+
]
|
| 129 |
+
completion = self.client.chat.completions.create(
|
| 130 |
+
messages=message,
|
| 131 |
+
model="llama-3.1-8b-instant",
|
| 132 |
+
)
|
| 133 |
+
response = completion.choices[0].message.content
|
| 134 |
|
| 135 |
return response
|
| 136 |
|