jbisal commited on
Commit
4e33a26
·
1 Parent(s): 7829849

Changed model

Browse files
Files changed (7) hide show
  1. .gitignore +2 -0
  2. .python-version +1 -0
  3. Gradio_UI.py +67 -34
  4. Gradio_UI3.py +381 -0
  5. app.py +14 -5
  6. prompts.yaml +8 -0
  7. requirements.txt +4 -2
.gitignore CHANGED
@@ -1,2 +1,4 @@
1
  # Ignore Python cache directories
2
  **/__pycache__/
 
 
 
1
  # Ignore Python cache directories
2
  **/__pycache__/
3
+ venv/
4
+ .DS_Store
.python-version ADDED
@@ -0,0 +1 @@
 
 
1
+ 3.11.9
Gradio_UI.py CHANGED
@@ -26,6 +26,8 @@ import gradio as gr
26
  from gradio.components import Markdown
27
  from gradio.components import Chatbot, Textbox, State, Button
28
 
 
 
29
  def pull_messages_from_step(
30
  step_log: MemoryStep,
31
  ):
@@ -109,16 +111,19 @@ def pull_messages_from_step(
109
  elif hasattr(step_log, "error") and step_log.error is not None:
110
  yield gr.ChatMessage(role="assistant", content=str(step_log.error), metadata={"title": "💥 Error"})
111
 
 
112
  # Calculate duration and token information
113
  step_footnote = f"{step_number}"
114
  if hasattr(step_log, "input_token_count") and hasattr(step_log, "output_token_count"):
115
- token_str = (
116
- f" | Input-tokens:{step_log.input_token_count:,} | Output-tokens:{step_log.output_token_count:,}"
117
- )
118
  step_footnote += token_str
119
  if hasattr(step_log, "duration"):
120
  step_duration = f" | Duration: {round(float(step_log.duration), 2)}" if step_log.duration else None
121
- step_footnote += step_duration
 
 
122
  step_footnote = f"""<span style="color: #bbbbc2; font-size: 12px;">{step_footnote}</span> """
123
  yield gr.ChatMessage(role="assistant", content=f"{step_footnote}")
124
  yield gr.ChatMessage(role="assistant", content="-----")
@@ -143,8 +148,19 @@ def stream_to_gradio(
143
  for step_log in agent.run(task, stream=True, reset=reset_agent_memory, additional_args=additional_args):
144
  # Track tokens if model provides them
145
  if hasattr(agent.model, "last_input_token_count"):
146
- total_input_tokens += agent.model.last_input_token_count
147
- total_output_tokens += agent.model.last_output_token_count
 
 
 
 
 
 
 
 
 
 
 
148
  if isinstance(step_log, ActionStep):
149
  step_log.input_token_count = agent.model.last_input_token_count
150
  step_log.output_token_count = agent.model.last_output_token_count
@@ -155,6 +171,7 @@ def stream_to_gradio(
155
  yield message
156
 
157
  final_answer = step_log # Last log is the run's final_answer
 
158
  final_answer = handle_agent_output_types(final_answer)
159
 
160
  if isinstance(final_answer, AgentText):
@@ -173,7 +190,8 @@ def stream_to_gradio(
173
  content={"path": final_answer.to_string(), "mime_type": "audio/wav"},
174
  )
175
  else:
176
- yield gr.ChatMessage(role="assistant", content=f"**Final answer:** {str(final_answer)}")
 
177
 
178
 
179
  class GradioUI:
@@ -260,56 +278,71 @@ class GradioUI:
260
  )
261
 
262
  def launch(self, **kwargs): # <-- Moved inside the class and added self
263
- with gr.Blocks("base") as demo:
 
 
 
 
 
 
 
 
 
 
 
264
  # State to store chat messages and prompt strings
265
  stored_messages = State([])
266
  prompt_state = State("")
267
 
268
- with gr.Row():
269
 
270
  # Markdown Introduction
271
  gr.Markdown("""
272
- # NBAi - NBA Stats Chatbot 🏀
 
273
 
274
- Welcome to **NBAi**, your personal NBA statistics assistant! This app fetches and presents NBA box scores from last night's games, giving you insights on player performance, team stats, and more.
275
-
 
 
276
  ## Features
277
  - Ask questions like:
278
  - Who had the most points last night?
279
  - Who grabbed the most rebounds?
280
  - Who had the highest assist-to-turnover ratio?
281
-
282
  ## Tools Used
283
  - **[smolagents](https://github.com/huggingface/smolagents)** for building multi-step agents.
284
  - **[Gradio](https://www.gradio.app/docs)** for the user interface.
285
  - **[BeautifulSoup](https://www.crummy.com/software/BeautifulSoup/bs4/doc/)** and **[Pandas](https://pandas.pydata.org/docs/index.html)** for web scraping and data processing.
286
-
287
  ## How to Use
288
  - Click one of the quick prompt buttons below or type your own question.
289
  - The chatbot will respond with detailed NBA statistics from last night's games.
290
 
291
  ---
292
- """)
293
- with gr.Column():
294
- # Chatbot Interface
295
- chatbot = Chatbot(
296
- label="NBAi Chatbot",
297
- type="messages",
298
- avatar_images=(
299
- None,
300
- "https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/communication/Alfred.png",
301
- ),
302
- resizeable=True,
303
- scale=1,
304
- )
 
 
 
 
 
305
 
306
- # Textbox for Custom User Input
307
- text_input = Textbox(lines=1, label="Your Question")
308
- # Quick Prompt Buttons
309
- with gr.Row():
310
- btn_points = Button(value="Most Points", variant="primary")
311
- btn_rebounds = Button(value="Most Rebounds", variant="primary")
312
- btn_assist_to_turnover = Button(value="Best Assist-to-Turnover Ratio", variant="primary")
313
 
314
 
315
 
 
26
  from gradio.components import Markdown
27
  from gradio.components import Chatbot, Textbox, State, Button
28
 
29
+ from smolagents.memory import TokenUsage
30
+
31
  def pull_messages_from_step(
32
  step_log: MemoryStep,
33
  ):
 
111
  elif hasattr(step_log, "error") and step_log.error is not None:
112
  yield gr.ChatMessage(role="assistant", content=str(step_log.error), metadata={"title": "💥 Error"})
113
 
114
+ # Calculate duration and token information
115
  # Calculate duration and token information
116
  step_footnote = f"{step_number}"
117
  if hasattr(step_log, "input_token_count") and hasattr(step_log, "output_token_count"):
118
+ input_tokens = step_log.input_token_count if step_log.input_token_count is not None else 0
119
+ output_tokens = step_log.output_token_count if step_log.output_token_count is not None else 0
120
+ token_str = f" | Input-tokens:{input_tokens:,} | Output-tokens:{output_tokens:,}"
121
  step_footnote += token_str
122
  if hasattr(step_log, "duration"):
123
  step_duration = f" | Duration: {round(float(step_log.duration), 2)}" if step_log.duration else None
124
+ if step_duration:
125
+ step_footnote += step_duration
126
+
127
  step_footnote = f"""<span style="color: #bbbbc2; font-size: 12px;">{step_footnote}</span> """
128
  yield gr.ChatMessage(role="assistant", content=f"{step_footnote}")
129
  yield gr.ChatMessage(role="assistant", content="-----")
 
148
  for step_log in agent.run(task, stream=True, reset=reset_agent_memory, additional_args=additional_args):
149
  # Track tokens if model provides them
150
  if hasattr(agent.model, "last_input_token_count"):
151
+
152
+ # total_output_tokens += agent.model.last_output_token_count
153
+
154
+ if agent.memory.steps:
155
+ last_step = agent.memory.steps[-1]
156
+ # import pdb; pdb.set_trace()
157
+ if isinstance(last_step, ActionStep) and hasattr(last_step, "token_usage"):
158
+ token_usage = last_step.token_usage
159
+ if token_usage and isinstance(token_usage, TokenUsage):
160
+ total_input_tokens += token_usage.input_tokens
161
+
162
+
163
+
164
  if isinstance(step_log, ActionStep):
165
  step_log.input_token_count = agent.model.last_input_token_count
166
  step_log.output_token_count = agent.model.last_output_token_count
 
171
  yield message
172
 
173
  final_answer = step_log # Last log is the run's final_answer
174
+ # import pdb; pdb.set_trace()
175
  final_answer = handle_agent_output_types(final_answer)
176
 
177
  if isinstance(final_answer, AgentText):
 
190
  content={"path": final_answer.to_string(), "mime_type": "audio/wav"},
191
  )
192
  else:
193
+ # import pdb; pdb.set_trace()
194
+ yield gr.ChatMessage(role="assistant", content=f"**Final answer:** {str(final_answer.output)}")
195
 
196
 
197
  class GradioUI:
 
278
  )
279
 
280
  def launch(self, **kwargs): # <-- Moved inside the class and added self
281
+ import gradio as gr
282
+
283
+ css = """
284
+ .gradio-container { height: 100vh !important; } /* make the whole app viewport‑tall */
285
+ #chatbot {
286
+ flex-grow: 1 !important; /* fill extra vertical space */
287
+ overflow: auto !important; /* scroll when content overflows */
288
+ }
289
+ """
290
+
291
+ with gr.Blocks(theme="base", fill_height=True, fill_width=True, css=css) as demo:
292
+
293
  # State to store chat messages and prompt strings
294
  stored_messages = State([])
295
  prompt_state = State("")
296
 
297
+ with gr.Sidebar():
298
 
299
  # Markdown Introduction
300
  gr.Markdown("""
301
+ # 🏀 NBAi 🏀
302
+ ## NBA Stats Chatbot
303
 
304
+ Welcome to **NBAi**, your personal NBA statistics assistant!
305
+
306
+ This app fetches and presents NBA box scores from last night's games, giving you insights on player performance, team stats, and more.
307
+ <br/><br/>
308
  ## Features
309
  - Ask questions like:
310
  - Who had the most points last night?
311
  - Who grabbed the most rebounds?
312
  - Who had the highest assist-to-turnover ratio?
313
+ <br/><br/>
314
  ## Tools Used
315
  - **[smolagents](https://github.com/huggingface/smolagents)** for building multi-step agents.
316
  - **[Gradio](https://www.gradio.app/docs)** for the user interface.
317
  - **[BeautifulSoup](https://www.crummy.com/software/BeautifulSoup/bs4/doc/)** and **[Pandas](https://pandas.pydata.org/docs/index.html)** for web scraping and data processing.
318
+ <br/><br/>
319
  ## How to Use
320
  - Click one of the quick prompt buttons below or type your own question.
321
  - The chatbot will respond with detailed NBA statistics from last night's games.
322
 
323
  ---
324
+ """, elem_id="sidebar-markdown")
325
+ # Quick Prompt Buttons
326
+ with gr.Column():
327
+ btn_points = Button(value="Most Points", variant="primary")
328
+ btn_rebounds = Button(value="Most Rebounds", variant="primary")
329
+ btn_assist_to_turnover = Button(value="Best Assist-to-Turnover Ratio", variant="primary")
330
+ with gr.Column():
331
+ # Chatbot Interface
332
+ chatbot = Chatbot(
333
+ label="NBAi Chatbot",
334
+ type="messages",
335
+ avatar_images=(
336
+ None,
337
+ "https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/communication/Alfred.png",
338
+ ),
339
+ resizeable=True,
340
+ elem_id="chatbot"
341
+ )
342
 
343
+ # Textbox for Custom User Input
344
+ text_input = Textbox(lines=1, label="Your Question")
345
+
 
 
 
 
346
 
347
 
348
 
Gradio_UI3.py ADDED
@@ -0,0 +1,381 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding=utf-8
3
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ import mimetypes
17
+ import os
18
+ import re
19
+ import shutil
20
+ from typing import Optional
21
+ from smolagents.agent_types import AgentAudio, AgentImage, AgentText, handle_agent_output_types
22
+ from smolagents.agents import ActionStep, MultiStepAgent
23
+ from smolagents.memory import MemoryStep
24
+ from smolagents.utils import _is_package_available
25
+ import gradio as gr
26
+ from gradio.components import Markdown
27
+ from gradio.components import Chatbot, Textbox, State, Button
28
+
29
+ from smolagents.memory import TokenUsage
30
+
31
+ def pull_messages_from_step(
32
+ step_log: MemoryStep,
33
+ ):
34
+ """Extract ChatMessage objects from agent steps with proper nesting"""
35
+ import gradio as gr
36
+
37
+ if isinstance(step_log, ActionStep):
38
+ # Output the step number
39
+ step_number = f"Step {step_log.step_number}" if step_log.step_number is not None else ""
40
+ yield gr.ChatMessage(role="assistant", content=f"**{step_number}**")
41
+
42
+ # First yield the thought/reasoning from the LLM
43
+ if hasattr(step_log, "model_output") and step_log.model_output is not None:
44
+ # Clean up the LLM output
45
+ model_output = step_log.model_output.strip()
46
+ # Remove any trailing <end_code> and extra backticks, handling multiple possible formats
47
+ model_output = re.sub(r"```\s*<end_code>", "```", model_output) # handles ```<end_code>
48
+ model_output = re.sub(r"<end_code>\s*```", "```", model_output) # handles <end_code>```
49
+ model_output = re.sub(r"```\s*\n\s*<end_code>", "```", model_output) # handles ```\n<end_code>
50
+ model_output = model_output.strip()
51
+ yield gr.ChatMessage(role="assistant", content=model_output)
52
+
53
+ # For tool calls, create a parent message
54
+ if hasattr(step_log, "tool_calls") and step_log.tool_calls is not None:
55
+ first_tool_call = step_log.tool_calls[0]
56
+ used_code = first_tool_call.name == "python_interpreter"
57
+ parent_id = f"call_{len(step_log.tool_calls)}"
58
+
59
+ # Tool call becomes the parent message with timing info
60
+ # First we will handle arguments based on type
61
+ args = first_tool_call.arguments
62
+ if isinstance(args, dict):
63
+ content = str(args.get("answer", str(args)))
64
+ else:
65
+ content = str(args).strip()
66
+
67
+ if used_code:
68
+ # Clean up the content by removing any end code tags
69
+ content = re.sub(r"```.*?\n", "", content) # Remove existing code blocks
70
+ content = re.sub(r"\s*<end_code>\s*", "", content) # Remove end_code tags
71
+ content = content.strip()
72
+ if not content.startswith("```python"):
73
+ content = f"```python\n{content}\n```"
74
+
75
+ parent_message_tool = gr.ChatMessage(
76
+ role="assistant",
77
+ content=content,
78
+ metadata={
79
+ "title": f"🛠️ Used tool {first_tool_call.name}",
80
+ "id": parent_id,
81
+ "status": "pending",
82
+ },
83
+ )
84
+ yield parent_message_tool
85
+
86
+ # Nesting execution logs under the tool call if they exist
87
+ if hasattr(step_log, "observations") and (
88
+ step_log.observations is not None and step_log.observations.strip()
89
+ ): # Only yield execution logs if there's actual content
90
+ log_content = step_log.observations.strip()
91
+ if log_content:
92
+ log_content = re.sub(r"^Execution logs:\s*", "", log_content)
93
+ yield gr.ChatMessage(
94
+ role="assistant",
95
+ content=f"{log_content}",
96
+ metadata={"title": "📝 Execution Logs", "parent_id": parent_id, "status": "done"},
97
+ )
98
+
99
+ # Nesting any errors under the tool call
100
+ if hasattr(step_log, "error") and step_log.error is not None:
101
+ yield gr.ChatMessage(
102
+ role="assistant",
103
+ content=str(step_log.error),
104
+ metadata={"title": "💥 Error", "parent_id": parent_id, "status": "done"},
105
+ )
106
+
107
+ # Update parent message metadata to done status without yielding a new message
108
+ parent_message_tool.metadata["status"] = "done"
109
+
110
+ # Handle standalone errors but not from tool calls
111
+ elif hasattr(step_log, "error") and step_log.error is not None:
112
+ yield gr.ChatMessage(role="assistant", content=str(step_log.error), metadata={"title": "💥 Error"})
113
+
114
+ # Calculate duration and token information
115
+ # Calculate duration and token information
116
+ step_footnote = f"{step_number}"
117
+ if hasattr(step_log, "input_token_count") and hasattr(step_log, "output_token_count"):
118
+ input_tokens = step_log.input_token_count if step_log.input_token_count is not None else 0
119
+ output_tokens = step_log.output_token_count if step_log.output_token_count is not None else 0
120
+ token_str = f" | Input-tokens:{input_tokens:,} | Output-tokens:{output_tokens:,}"
121
+ step_footnote += token_str
122
+ if hasattr(step_log, "duration"):
123
+ step_duration = f" | Duration: {round(float(step_log.duration), 2)}" if step_log.duration else None
124
+ if step_duration:
125
+ step_footnote += step_duration
126
+
127
+ step_footnote = f"""<span style="color: #bbbbc2; font-size: 12px;">{step_footnote}</span> """
128
+ yield gr.ChatMessage(role="assistant", content=f"{step_footnote}")
129
+ yield gr.ChatMessage(role="assistant", content="-----")
130
+
131
+
132
+ def stream_to_gradio(
133
+ agent,
134
+ task: str,
135
+ reset_agent_memory: bool = False,
136
+ additional_args: Optional[dict] = None,
137
+ ):
138
+ """Runs an agent with the given task and streams the messages from the agent as gradio ChatMessages."""
139
+ if not _is_package_available("gradio"):
140
+ raise ModuleNotFoundError(
141
+ "Please install 'gradio' extra to use the GradioUI: `pip install 'smolagents[gradio]'`"
142
+ )
143
+ import gradio as gr
144
+
145
+ total_input_tokens = 0
146
+ total_output_tokens = 0
147
+
148
+ for step_log in agent.run(task, stream=True, reset=reset_agent_memory, additional_args=additional_args):
149
+ # Track tokens if model provides them
150
+ if hasattr(agent.model, "last_input_token_count"):
151
+
152
+ # total_output_tokens += agent.model.last_output_token_count
153
+
154
+ if agent.memory.steps:
155
+ last_step = agent.memory.steps[-1]
156
+ # import pdb; pdb.set_trace()
157
+ if isinstance(last_step, ActionStep) and hasattr(last_step, "token_usage"):
158
+ token_usage = last_step.token_usage
159
+ if token_usage and isinstance(token_usage, TokenUsage):
160
+ total_input_tokens += token_usage.input_tokens
161
+
162
+
163
+
164
+ if isinstance(step_log, ActionStep):
165
+ step_log.input_token_count = agent.model.last_input_token_count
166
+ step_log.output_token_count = agent.model.last_output_token_count
167
+
168
+ for message in pull_messages_from_step(
169
+ step_log,
170
+ ):
171
+ yield message
172
+
173
+ final_answer = step_log # Last log is the run's final_answer
174
+ # import pdb; pdb.set_trace()
175
+ final_answer = handle_agent_output_types(final_answer)
176
+
177
+ if isinstance(final_answer, AgentText):
178
+ yield gr.ChatMessage(
179
+ role="assistant",
180
+ content=f"**Final answer:**\n{final_answer.to_string()}\n",
181
+ )
182
+ elif isinstance(final_answer, AgentImage):
183
+ yield gr.ChatMessage(
184
+ role="assistant",
185
+ content={"path": final_answer.to_string(), "mime_type": "image/png"},
186
+ )
187
+ elif isinstance(final_answer, AgentAudio):
188
+ yield gr.ChatMessage(
189
+ role="assistant",
190
+ content={"path": final_answer.to_string(), "mime_type": "audio/wav"},
191
+ )
192
+ else:
193
+ # import pdb; pdb.set_trace()
194
+ yield gr.ChatMessage(role="assistant", content=f"**Final answer:** {str(final_answer.output)}")
195
+
196
+
197
+ class GradioUI:
198
+ """A one-line interface to launch your agent in Gradio"""
199
+
200
+ def __init__(self, agent: MultiStepAgent, file_upload_folder: str | None = None):
201
+ if not _is_package_available("gradio"):
202
+ raise ModuleNotFoundError(
203
+ "Please install 'gradio' extra to use the GradioUI: `pip install 'smolagents[gradio]'`"
204
+ )
205
+ self.agent = agent
206
+ self.file_upload_folder = file_upload_folder
207
+ if self.file_upload_folder is not None:
208
+ if not os.path.exists(file_upload_folder):
209
+ os.mkdir(file_upload_folder)
210
+
211
+ def interact_with_agent(self, prompt, messages):
212
+ import gradio as gr
213
+
214
+ messages.append(gr.ChatMessage(role="user", content=prompt))
215
+ yield messages
216
+ for msg in stream_to_gradio(self.agent, task=prompt, reset_agent_memory=False):
217
+ messages.append(msg)
218
+ yield messages
219
+ yield messages
220
+
221
+ def upload_file(
222
+ self,
223
+ file,
224
+ file_uploads_log,
225
+ allowed_file_types=[
226
+ "application/pdf",
227
+ "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
228
+ "text/plain",
229
+ ],
230
+ ):
231
+ """
232
+ Handle file uploads, default allowed types are .pdf, .docx, and .txt
233
+ """
234
+ import gradio as gr
235
+
236
+ if file is None:
237
+ return gr.Textbox("No file uploaded", visible=True), file_uploads_log
238
+
239
+ try:
240
+ mime_type, _ = mimetypes.guess_type(file.name)
241
+ except Exception as e:
242
+ return gr.Textbox(f"Error: {e}", visible=True), file_uploads_log
243
+
244
+ if mime_type not in allowed_file_types:
245
+ return gr.Textbox("File type disallowed", visible=True), file_uploads_log
246
+
247
+ # Sanitize file name
248
+ original_name = os.path.basename(file.name)
249
+ sanitized_name = re.sub(
250
+ r"[^\w\-.]", "_", original_name
251
+ ) # Replace any non-alphanumeric, non-dash, or non-dot characters with underscores
252
+
253
+ type_to_ext = {}
254
+ for ext, t in mimetypes.types_map.items():
255
+ if t not in type_to_ext:
256
+ type_to_ext[t] = ext
257
+
258
+ # Ensure the extension correlates to the mime type
259
+ sanitized_name = sanitized_name.split(".")[:-1]
260
+ sanitized_name.append("" + type_to_ext[mime_type])
261
+ sanitized_name = "".join(sanitized_name)
262
+
263
+ # Save the uploaded file to the specified folder
264
+ file_path = os.path.join(self.file_upload_folder, os.path.basename(sanitized_name))
265
+ shutil.copy(file.name, file_path)
266
+
267
+ return gr.Textbox(f"File uploaded: {file_path}", visible=True), file_uploads_log + [file_path]
268
+
269
+ def log_user_message(self, text_input, file_uploads_log):
270
+ return (
271
+ text_input
272
+ + (
273
+ f"\nYou have been provided with these files, which might be helpful or not: {file_uploads_log}"
274
+ if len(file_uploads_log) > 0
275
+ else ""
276
+ ),
277
+ "",
278
+ )
279
+
280
+ def launch(self, **kwargs): # <-- Moved inside the class and added self
281
+ import gradio as gr
282
+
283
+ css = """
284
+ .gradio-container { height: 100vh !important; } /* make the whole app viewport‑tall */
285
+ #chatbot {
286
+ flex-grow: 1 !important; /* fill extra vertical space */
287
+ overflow: auto !important; /* scroll when content overflows */
288
+ }
289
+ """
290
+
291
+ with gr.Blocks(theme="base", fill_height=True, fill_width=True, css=css) as demo:
292
+
293
+ # State to store chat messages and prompt strings
294
+ stored_messages = State([])
295
+ prompt_state = State("")
296
+
297
+ with gr.Sidebar():
298
+
299
+ # Markdown Introduction
300
+ gr.Markdown("""
301
+ # 🏀 NBAi 🏀
302
+ ## NBA Stats Chatbot
303
+
304
+ Welcome to **NBAi**, your personal NBA statistics assistant!
305
+
306
+ This app fetches and presents NBA box scores from last night's games, giving you insights on player performance, team stats, and more.
307
+ <br/><br/>
308
+ ## Features
309
+ - Ask questions like:
310
+ - Who had the most points last night?
311
+ - Who grabbed the most rebounds?
312
+ - Who had the highest assist-to-turnover ratio?
313
+ <br/><br/>
314
+ ## Tools Used
315
+ - **[smolagents](https://github.com/huggingface/smolagents)** for building multi-step agents.
316
+ - **[Gradio](https://www.gradio.app/docs)** for the user interface.
317
+ - **[BeautifulSoup](https://www.crummy.com/software/BeautifulSoup/bs4/doc/)** and **[Pandas](https://pandas.pydata.org/docs/index.html)** for web scraping and data processing.
318
+ <br/><br/>
319
+ ## How to Use
320
+ - Click one of the quick prompt buttons below or type your own question.
321
+ - The chatbot will respond with detailed NBA statistics from last night's games.
322
+
323
+ ---
324
+ """, elem_id="sidebar-markdown")
325
+ # Quick Prompt Buttons
326
+ with gr.Column():
327
+ btn_points = Button(value="Most Points", variant="primary")
328
+ btn_rebounds = Button(value="Most Rebounds", variant="primary")
329
+ btn_assist_to_turnover = Button(value="Best Assist-to-Turnover Ratio", variant="primary")
330
+ with gr.Column():
331
+ # Chatbot Interface
332
+ chatbot = Chatbot(
333
+ label="NBAi Chatbot",
334
+ type="messages",
335
+ avatar_images=(
336
+ None,
337
+ "https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/communication/Alfred.png",
338
+ ),
339
+ resizeable=True,
340
+ elem_id="chatbot"
341
+ )
342
+
343
+ # Textbox for Custom User Input
344
+ text_input = Textbox(lines=1, label="Your Question")
345
+
346
+
347
+
348
+
349
+ # Bindings for Buttons using prompt_state
350
+ btn_points.click(
351
+ lambda: "Who had the most points in last night's NBA games?",
352
+ [],
353
+ prompt_state
354
+ ).then(self.interact_with_agent, [prompt_state, stored_messages], chatbot)
355
+
356
+ btn_rebounds.click(
357
+ lambda: "Who had the most rebounds in last night's NBA games?",
358
+ [],
359
+ prompt_state
360
+ ).then(self.interact_with_agent, [prompt_state, stored_messages], chatbot)
361
+
362
+ btn_assist_to_turnover.click(
363
+ lambda: "Who had the highest ratio of assists to turnovers in last night's NBA games?",
364
+ [],
365
+ prompt_state
366
+ ).then(self.interact_with_agent, [prompt_state, stored_messages], chatbot)
367
+
368
+ # Custom Input Submission
369
+ text_input.submit(
370
+ self.interact_with_agent,
371
+ [text_input, stored_messages],
372
+ chatbot
373
+ )
374
+
375
+
376
+
377
+
378
+ demo.launch(debug=True, share=True, **kwargs)
379
+
380
+
381
+ __all__ = ["stream_to_gradio", "GradioUI"]
app.py CHANGED
@@ -12,6 +12,10 @@ from bs4 import BeautifulSoup # Fixed Import
12
  from tools.final_answer import FinalAnswerTool
13
  from Gradio_UI import GradioUI
14
 
 
 
 
 
15
  logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
16
 
17
  @tool
@@ -135,13 +139,16 @@ search_tool = DuckDuckGoSearchTool()
135
  visit_webpage_tool = VisitWebpageTool()
136
  user_input_tool = UserInputTool()
137
 
138
- model = HfApiModel(
139
- max_tokens=2096,
140
- temperature=0.5,
141
- model_id='Qwen/Qwen2.5-Coder-32B-Instruct',
142
- custom_role_conversions=None,
 
 
143
  )
144
 
 
145
  # Import tool from Hub
146
  image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
147
 
@@ -149,6 +156,8 @@ image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_co
149
  with open("prompts.yaml", 'r') as stream:
150
  prompt_templates = yaml.safe_load(stream)
151
 
 
 
152
  # Setup the Agent
153
  agent = CodeAgent(
154
  model=model,
 
12
  from tools.final_answer import FinalAnswerTool
13
  from Gradio_UI import GradioUI
14
 
15
+ import pdb
16
+
17
+
18
+
19
  logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
20
 
21
  @tool
 
139
  visit_webpage_tool = VisitWebpageTool()
140
  user_input_tool = UserInputTool()
141
 
142
+ import os
143
+ from smolagents import OpenAIServerModel
144
+
145
+ model = OpenAIServerModel(
146
+ model_id="gpt-4o",
147
+ api_base="https://api.openai.com/v1",
148
+ api_key=os.environ["OPENAI_API_KEY"]
149
  )
150
 
151
+
152
  # Import tool from Hub
153
  image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
154
 
 
156
  with open("prompts.yaml", 'r') as stream:
157
  prompt_templates = yaml.safe_load(stream)
158
 
159
+ # pdb.set_trace()
160
+
161
  # Setup the Agent
162
  agent = CodeAgent(
163
  model=model,
prompts.yaml CHANGED
@@ -319,3 +319,11 @@
319
  "report": |-
320
  Here is the final answer from your managed agent '{{name}}':
321
  {{final_answer}}
 
 
 
 
 
 
 
 
 
319
  "report": |-
320
  Here is the final answer from your managed agent '{{name}}':
321
  {{final_answer}}
322
+
323
+ final_answer:
324
+ pre_messages: |-
325
+ You are about to provide a final answer based on the task and steps performed.
326
+ post_messages: |-
327
+ Now write your final answer below using everything you have learned. End with a short summary of your reasoning.
328
+
329
+ Thank you for using the NBA Box Scores Agent. Please contact justin@viz-explainer if you have any questions.
requirements.txt CHANGED
@@ -1,7 +1,9 @@
1
  markdownify
2
- smolagents==1.13.0
3
  requests
4
  duckduckgo_search
5
  pandas
6
  matplotlib
7
- bs4
 
 
 
1
  markdownify
2
+ git+https://github.com/huggingface/smolagents.git
3
  requests
4
  duckduckgo_search
5
  pandas
6
  matplotlib
7
+ bs4
8
+ gradio
9
+ smolagents[openai]