JuyeopDang commited on
Commit
07be671
·
verified ·
1 Parent(s): 5508b31

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -33
app.py CHANGED
@@ -3,7 +3,7 @@ import gradio as gr
3
  import requests
4
  import inspect
5
  import pandas as pd
6
- from smolagents import CodeAgent, DuckDuckGoSearchTool, FinalAnswerTool, PythonInterpreterTool, VisitWebpageTool, LiteLLMModel, ToolCallingAgent
7
  from groq import Groq
8
  import time
9
 
@@ -17,7 +17,7 @@ GROQ_KEY = os.environ['GROQ_KEY']
17
 
18
  class LLaMaAgent:
19
  def __init__(self):
20
- self.model = model = LiteLLMModel(
21
  "meta-llama/llama-4-scout-17b-16e-instruct",
22
  api_base="https://api.groq.com/openai/v1",
23
  api_key=GROQ_KEY,
@@ -25,8 +25,9 @@ class LLaMaAgent:
25
  self.model.flatten_messages_as_text = True
26
 
27
  self.agent = CodeAgent(
28
- tools=[DuckDuckGoSearchTool(), FinalAnswerTool(), VisitWebpageTool(), PythonInterpreterTool()],
29
- model=model,
 
30
  )
31
 
32
  def __call__(self, question: str) -> str:
@@ -37,28 +38,6 @@ class LLaMaAgent:
37
  print("First LLaMa Error!!!")
38
  raise
39
 
40
- class LLaMaAgent2:
41
- def __init__(self):
42
- self.model = model = LiteLLMModel(
43
- "meta-llama/llama-4-scout-17b-16e-instruct",
44
- api_base="https://api.groq.com/openai/v1",
45
- api_key=GROQ_KEY,
46
- )
47
- self.model.flatten_messages_as_text = True
48
-
49
- self.agent = ToolCallingAgent(
50
- tools=[DuckDuckGoSearchTool(), FinalAnswerTool(), VisitWebpageTool(), PythonInterpreterTool()],
51
- model=model,
52
- )
53
-
54
- def __call__(self, question: str) -> str:
55
- try:
56
- response = self.agent.run(question)
57
- return response
58
- except Exception as e:
59
- print("Third LLaMa Error!!!")
60
- raise
61
-
62
  def run_and_submit_all( profile: gr.OAuthProfile | None):
63
  """
64
  Fetches all questions, runs the BasicAgent on them, submits all answers,
@@ -81,7 +60,6 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
81
  # 1. Instantiate Agent ( modify this part to create your agent)
82
  try:
83
  llama = LLaMaAgent()
84
- llama2 = LLaMaAgent2()
85
  except Exception as e:
86
  print(f"Error instantiating agent: {e}")
87
  return f"Error initializing agent: {e}", None
@@ -121,12 +99,7 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
121
  print(f"Skipping item with missing task_id or question: {item}")
122
  continue
123
  try:
124
- try:
125
- submitted_answer = llama(question_text)
126
- except Exception as ke:
127
- print("Second LLaMa Error!")
128
- time.sleep(60)
129
- submitted_answer = llama2(question_text)
130
  print(f"\n\n### Answer{submitted_answer} ###\n\n")
131
  answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
132
  results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
 
3
  import requests
4
  import inspect
5
  import pandas as pd
6
+ from smolagents import CodeAgent, DuckDuckGoSearchTool, FinalAnswerTool, PythonInterpreterTool, VisitWebpageTool, LiteLLMModel, ToolCallingAgent, WebSearchTool
7
  from groq import Groq
8
  import time
9
 
 
17
 
18
  class LLaMaAgent:
19
  def __init__(self):
20
+ self.model = LiteLLMModel(
21
  "meta-llama/llama-4-scout-17b-16e-instruct",
22
  api_base="https://api.groq.com/openai/v1",
23
  api_key=GROQ_KEY,
 
25
  self.model.flatten_messages_as_text = True
26
 
27
  self.agent = CodeAgent(
28
+ tools=[DuckDuckGoSearchTool(), FinalAnswerTool(), VisitWebpageTool(), PythonInterpreterTool(), WebSearchTool()],
29
+ add_base_tools=True,
30
+ model=self.model,
31
  )
32
 
33
  def __call__(self, question: str) -> str:
 
38
  print("First LLaMa Error!!!")
39
  raise
40
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
  def run_and_submit_all( profile: gr.OAuthProfile | None):
42
  """
43
  Fetches all questions, runs the BasicAgent on them, submits all answers,
 
60
  # 1. Instantiate Agent ( modify this part to create your agent)
61
  try:
62
  llama = LLaMaAgent()
 
63
  except Exception as e:
64
  print(f"Error instantiating agent: {e}")
65
  return f"Error initializing agent: {e}", None
 
99
  print(f"Skipping item with missing task_id or question: {item}")
100
  continue
101
  try:
102
+ submitted_answer = llama(question_text)
 
 
 
 
 
103
  print(f"\n\n### Answer{submitted_answer} ###\n\n")
104
  answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
105
  results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})