JuyeopDang commited on
Commit
90b76d3
·
verified ·
1 Parent(s): 54a8589

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +69 -114
app.py CHANGED
@@ -14,124 +14,76 @@ GROQ_KEY = os.environ['GROQ_KEY']
14
 
15
  # --- Basic Agent Definition ---
16
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
17
- class BasicAgent:
 
18
  def __init__(self):
19
- print("BasicAgent initialized.")
20
- self.client = Groq(api_key=GROQ_KEY)
21
-
22
- def __call__(self, question: str) -> str:
23
- print(f"Agent received question (first 50 chars): {question[:50]}...")
24
- try:
25
- model = LiteLLMModel(
26
  "llama-3.3-70b-versatile",
27
  api_base="https://api.groq.com/openai/v1",
28
  api_key=GROQ_KEY,
29
  )
30
- model.flatten_messages_as_text = True
31
-
32
- agent = CodeAgent(
33
- tools=[DuckDuckGoSearchTool(), FinalAnswerTool(), VisitWebpageTool(), PythonInterpreterTool()],
34
- model=model,
35
- )
36
- response = agent.run(question)
37
-
38
- except Exception:
39
- try:
40
- message = [
41
- {
42
- "role": "user",
43
- "content": question
44
- }]
45
- completion = self.client.chat.completions.create(
46
- messages=message,
47
- model="compound-beta",
48
- )
49
- answer = completion.choices[0].message.content
50
- print(f"First answer: {answer}")
51
- message=[
52
- {
53
- "role": "system",
54
- "content": """
55
- You are an expert in identifying and extracting definitive answers. Your sole task is to analyze the provided text, which is an agent's response, and extract only the conclusive final answer to the original user's query.
56
-
57
- Output only this core answer. Do not include any explanations, pleasantries, introductory phrases, or any surrounding text.
58
-
59
- Here The Examples:
60
-
61
- Input: ... Final answer: 12 ...
62
- You should output: 12
63
-
64
- Input: $\\boxed{b,c,e}$
65
- Output: b, c, e
66
-
67
- Input: Jan
68
- Output: Jan
69
-
70
- Input: The Yankee with the most walks in the 1977 regular season was Reggie Jackson, with 58 walks. He had 357 at bats that season.
71
- Output: 357
72
-
73
- Input: broccoli, bell pepper, celery, fresh basil, green beans, lettuce, sweet potatoes, zucchini
74
- Output: broccoli, bell pepper, celery, fresh basil, green beans, lettuce, sweet potatoes, zucchini
75
- """
76
- },
77
- {
78
- "role": "user",
79
- "content": answer
80
- }
81
- ]
82
- completion = self.client.chat.completions.create(
83
- messages=message,
84
- model="llama-3.1-8b-instant",
85
- )
86
- response = completion.choices[0].message.content
87
- except Exception:
88
- message = [
89
- {
90
- "role": "user",
91
- "content": question
92
- }]
93
- completion = self.client.chat.completions.create(
94
- messages=message,
95
- model="compound-beta-mini",
96
- )
97
- answer = completion.choices[0].message.content
98
- print(f"First answer: {answer}")
99
- message=[
100
- {
101
- "role": "system",
102
- "content": """
103
- You are an expert in identifying and extracting definitive answers. Your sole task is to analyze the provided text, which is an agent's response, and extract only the conclusive final answer to the original user's query.
104
-
105
- Output only this core answer. Do not include any explanations, pleasantries, introductory phrases, or any surrounding text.
106
-
107
- Here The Examples:
108
-
109
- Input: ... Final answer: 12 ...
110
- You should output: 12
111
-
112
- Input: $\\boxed{b,c,e}$
113
- Output: b, c, e
114
-
115
- Input: Jan
116
- Output: Jan
117
 
118
- Input: The Yankee with the most walks in the 1977 regular season was Reggie Jackson, with 58 walks. He had 357 at bats that season.
119
- Output: 357
 
 
 
 
 
120
 
121
- Input: broccoli, bell pepper, celery, fresh basil, green beans, lettuce, sweet potatoes, zucchini
122
- Output: broccoli, bell pepper, celery, fresh basil, green beans, lettuce, sweet potatoes, zucchini
123
- """
124
- },
125
- {
126
- "role": "user",
127
- "content": answer
128
- }
129
- ]
130
- completion = self.client.chat.completions.create(
131
- messages=message,
132
- model="llama-3.1-8b-instant",
133
- )
134
- response = completion.choices[0].message.content
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
135
  return response
136
 
137
  def run_and_submit_all( profile: gr.OAuthProfile | None):
@@ -155,7 +107,8 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
155
 
156
  # 1. Instantiate Agent ( modify this part to create your agent)
157
  try:
158
- agent = BasicAgent()
 
159
  except Exception as e:
160
  print(f"Error instantiating agent: {e}")
161
  return f"Error initializing agent: {e}", None
@@ -191,12 +144,14 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
191
  for item in questions_data:
192
  task_id = item.get("task_id")
193
  question_text = item.get("question")
194
- time.sleep(45)
195
  if not task_id or question_text is None:
196
  print(f"Skipping item with missing task_id or question: {item}")
197
  continue
198
  try:
199
- submitted_answer = agent(question_text)
 
 
200
  answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
201
  results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
202
  except Exception as e:
 
14
 
15
  # --- Basic Agent Definition ---
16
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
17
+
18
+ class LLaMaAgent:
19
  def __init__(self):
20
+ self.model = model = LiteLLMModel(
 
 
 
 
 
 
21
  "llama-3.3-70b-versatile",
22
  api_base="https://api.groq.com/openai/v1",
23
  api_key=GROQ_KEY,
24
  )
25
+ self.model.flatten_messages_as_text = True
26
+
27
+ self.agent = CodeAgent(
28
+ tools=[DuckDuckGoSearchTool(), FinalAnswerTool(), VisitWebpageTool(), PythonInterpreterTool()],
29
+ model=model,
30
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
 
32
+ def __call__(self, question: str) -> str:
33
+ response = self.agent.run(question)
34
+ return response
35
+
36
+ class CompoundAgent:
37
+ def __init__(self):
38
+ self.client = Groq(api_key=GROQ_KEY)
39
 
40
+ def __call__(self, question: str) -> str:
41
+ message = [
42
+ {
43
+ "role": "user",
44
+ "content": question
45
+ }]
46
+ completion = self.client.chat.completions.create(
47
+ messages=message,
48
+ model="compound-beta",
49
+ )
50
+ answer = completion.choices[0].message.content
51
+ message=[
52
+ {
53
+ "role": "system",
54
+ "content": """
55
+ You are an expert in identifying and extracting definitive answers. Your sole task is to analyze the provided text, which is an agent's response, and extract only the conclusive final answer to the original user's query.
56
+
57
+ Output only this core answer. Do not include any explanations, pleasantries, introductory phrases, or any surrounding text.
58
+
59
+ Here The Examples:
60
+
61
+ Input: ... Final answer: 12 ...
62
+ You should output: 12
63
+
64
+ Input: $\\boxed{b,c,e}$
65
+ Output: b, c, e
66
+
67
+ Input: Jan
68
+ Output: Jan
69
+
70
+ Input: The Yankee with the most walks in the 1977 regular season was Reggie Jackson, with 58 walks. He had 357 at bats that season.
71
+ Output: 357
72
+
73
+ Input: broccoli, bell pepper, celery, fresh basil, green beans, lettuce, sweet potatoes, zucchini
74
+ Output: broccoli, bell pepper, celery, fresh basil, green beans, lettuce, sweet potatoes, zucchini
75
+ """
76
+ },
77
+ {
78
+ "role": "user",
79
+ "content": answer
80
+ }
81
+ ]
82
+ completion = self.client.chat.completions.create(
83
+ messages=message,
84
+ model="llama-3.1-8b-instant",
85
+ )
86
+ response = completion.choices[0].message.content
87
  return response
88
 
89
  def run_and_submit_all( profile: gr.OAuthProfile | None):
 
107
 
108
  # 1. Instantiate Agent ( modify this part to create your agent)
109
  try:
110
+ llama = LLaMaAgent()
111
+ compound = CompoundAgent()
112
  except Exception as e:
113
  print(f"Error instantiating agent: {e}")
114
  return f"Error initializing agent: {e}", None
 
144
  for item in questions_data:
145
  task_id = item.get("task_id")
146
  question_text = item.get("question")
147
+ time.sleep(30)
148
  if not task_id or question_text is None:
149
  print(f"Skipping item with missing task_id or question: {item}")
150
  continue
151
  try:
152
+ submitted_answer = llama(question_text)
153
+ if submitted_answer.contains('token'):
154
+ submitted_answer = compound(question_text)
155
  answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
156
  results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
157
  except Exception as e: