Update app.py
Browse files
app.py
CHANGED
|
@@ -3,7 +3,7 @@ import gradio as gr
|
|
| 3 |
import requests
|
| 4 |
import inspect
|
| 5 |
import pandas as pd
|
| 6 |
-
from smolagents import CodeAgent, DuckDuckGoSearchTool, FinalAnswerTool, InferenceClientModel, Tool, tool, VisitWebpageTool, HfApiModel
|
| 7 |
from groq import Groq
|
| 8 |
|
| 9 |
# (Keep Constants as is)
|
|
@@ -16,60 +16,24 @@ GROQ_KEY = os.environ['GROQ_KEY']
|
|
| 16 |
class BasicAgent:
|
| 17 |
def __init__(self):
|
| 18 |
print("BasicAgent initialized.")
|
| 19 |
-
self.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
|
| 21 |
def __call__(self, question: str) -> str:
|
| 22 |
print(f"Agent received question (first 50 chars): {question[:50]}...")
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
}]
|
| 29 |
-
completion = self.client.chat.completions.create(
|
| 30 |
-
messages=message,
|
| 31 |
-
model="compound-beta",
|
| 32 |
-
)
|
| 33 |
-
answer = completion.choices[0].message.content
|
| 34 |
-
print(f"First answer: {answer}")
|
| 35 |
-
message=[
|
| 36 |
-
{
|
| 37 |
-
"role": "system",
|
| 38 |
-
"content": """
|
| 39 |
-
You are an expert in identifying and extracting definitive answers. Your sole task is to analyze the provided text, which is an agent's response, and extract only the conclusive final answer to the original user's query.
|
| 40 |
-
|
| 41 |
-
Output only this core answer. Do not include any explanations, pleasantries, introductory phrases, or any surrounding text.
|
| 42 |
-
|
| 43 |
-
Here The Examples:
|
| 44 |
-
|
| 45 |
-
Input: ... Final answer: 12 ...
|
| 46 |
-
You should output: 12
|
| 47 |
-
|
| 48 |
-
Input: $\\boxed{b,c,e}$
|
| 49 |
-
Output: b, c, e
|
| 50 |
-
|
| 51 |
-
Input: Jan
|
| 52 |
-
Output: Jan
|
| 53 |
-
|
| 54 |
-
Input: The Yankee with the most walks in the 1977 regular season was Reggie Jackson, with 58 walks. He had 357 at bats that season.
|
| 55 |
-
Output: 357
|
| 56 |
-
|
| 57 |
-
Input: broccoli, bell pepper, celery, fresh basil, green beans, lettuce, sweet potatoes, zucchini
|
| 58 |
-
Output: broccoli, bell pepper, celery, fresh basil, green beans, lettuce, sweet potatoes, zucchini
|
| 59 |
-
"""
|
| 60 |
-
},
|
| 61 |
-
{
|
| 62 |
-
"role": "user",
|
| 63 |
-
"content": answer
|
| 64 |
-
}
|
| 65 |
-
]
|
| 66 |
-
completion = self.client.chat.completions.create(
|
| 67 |
-
messages=message,
|
| 68 |
-
# Change model to compound-beta to use agentic tooling
|
| 69 |
-
model="llama-3.3-70b-versatile",
|
| 70 |
)
|
| 71 |
-
|
| 72 |
-
|
|
|
|
| 73 |
return answer
|
| 74 |
|
| 75 |
def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
|
|
| 3 |
import requests
|
| 4 |
import inspect
|
| 5 |
import pandas as pd
|
| 6 |
+
from smolagents import CodeAgent, DuckDuckGoSearchTool, FinalAnswerTool, InferenceClientModel, Tool, tool, VisitWebpageTool, HfApiModel, LiteLLMModel,
|
| 7 |
from groq import Groq
|
| 8 |
|
| 9 |
# (Keep Constants as is)
|
|
|
|
| 16 |
class BasicAgent:
|
| 17 |
def __init__(self):
|
| 18 |
print("BasicAgent initialized.")
|
| 19 |
+
self.model = LiteLLMModel(
|
| 20 |
+
"openai/deepseek-r1-distill-llama-70b",
|
| 21 |
+
api_base="https://api.groq.com/openai/v1",
|
| 22 |
+
api_key=api_key
|
| 23 |
+
)
|
| 24 |
+
self.model.flatten_messages_as_text = True
|
| 25 |
|
| 26 |
def __call__(self, question: str) -> str:
|
| 27 |
print(f"Agent received question (first 50 chars): {question[:50]}...")
|
| 28 |
+
agent = CodeAgent(
|
| 29 |
+
tools=[DuckDuckGoSearchTool(), FinalAnswerTool(), VisitWebpageTool()],
|
| 30 |
+
model=model,
|
| 31 |
+
add_base_tools=True,
|
| 32 |
+
verbosity_level=2
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
)
|
| 34 |
+
|
| 35 |
+
response = agent.run(question)
|
| 36 |
+
print(response)
|
| 37 |
return answer
|
| 38 |
|
| 39 |
def run_and_submit_all( profile: gr.OAuthProfile | None):
|