Spaces:
Sleeping
Sleeping
chore: ruff it up and update imports
Browse files- Gradio_UI.py +64 -21
- app.py +20 -16
- tools/final_answer.py +6 -2
- tools/visit_webpage.py +6 -5
- tools/web_search.py +9 -5
Gradio_UI.py
CHANGED
|
@@ -19,7 +19,12 @@ import re
|
|
| 19 |
import shutil
|
| 20 |
from typing import Optional
|
| 21 |
|
| 22 |
-
from smolagents.agent_types import
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
from smolagents.agents import ActionStep, MultiStepAgent
|
| 24 |
from smolagents.memory import MemoryStep
|
| 25 |
from smolagents.utils import _is_package_available
|
|
@@ -33,7 +38,9 @@ def pull_messages_from_step(
|
|
| 33 |
|
| 34 |
if isinstance(step_log, ActionStep):
|
| 35 |
# Output the step number
|
| 36 |
-
step_number =
|
|
|
|
|
|
|
| 37 |
yield gr.ChatMessage(role="assistant", content=f"**{step_number}**")
|
| 38 |
|
| 39 |
# First yield the thought/reasoning from the LLM
|
|
@@ -41,9 +48,15 @@ def pull_messages_from_step(
|
|
| 41 |
# Clean up the LLM output
|
| 42 |
model_output = step_log.model_output.strip()
|
| 43 |
# Remove any trailing <end_code> and extra backticks, handling multiple possible formats
|
| 44 |
-
model_output = re.sub(
|
| 45 |
-
|
| 46 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 47 |
model_output = model_output.strip()
|
| 48 |
yield gr.ChatMessage(role="assistant", content=model_output)
|
| 49 |
|
|
@@ -63,8 +76,12 @@ def pull_messages_from_step(
|
|
| 63 |
|
| 64 |
if used_code:
|
| 65 |
# Clean up the content by removing any end code tags
|
| 66 |
-
content = re.sub(
|
| 67 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 68 |
content = content.strip()
|
| 69 |
if not content.startswith("```python"):
|
| 70 |
content = f"```python\n{content}\n```"
|
|
@@ -90,7 +107,11 @@ def pull_messages_from_step(
|
|
| 90 |
yield gr.ChatMessage(
|
| 91 |
role="assistant",
|
| 92 |
content=f"{log_content}",
|
| 93 |
-
metadata={
|
|
|
|
|
|
|
|
|
|
|
|
|
| 94 |
)
|
| 95 |
|
| 96 |
# Nesting any errors under the tool call
|
|
@@ -98,7 +119,11 @@ def pull_messages_from_step(
|
|
| 98 |
yield gr.ChatMessage(
|
| 99 |
role="assistant",
|
| 100 |
content=str(step_log.error),
|
| 101 |
-
metadata={
|
|
|
|
|
|
|
|
|
|
|
|
|
| 102 |
)
|
| 103 |
|
| 104 |
# Update parent message metadata to done status without yielding a new message
|
|
@@ -106,17 +131,25 @@ def pull_messages_from_step(
|
|
| 106 |
|
| 107 |
# Handle standalone errors but not from tool calls
|
| 108 |
elif hasattr(step_log, "error") and step_log.error is not None:
|
| 109 |
-
yield gr.ChatMessage(
|
|
|
|
|
|
|
|
|
|
|
|
|
| 110 |
|
| 111 |
# Calculate duration and token information
|
| 112 |
step_footnote = f"{step_number}"
|
| 113 |
-
if hasattr(step_log, "input_token_count") and hasattr(
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
step_footnote += token_str
|
| 118 |
if hasattr(step_log, "duration"):
|
| 119 |
-
step_duration =
|
|
|
|
|
|
|
|
|
|
|
|
|
| 120 |
step_footnote += step_duration
|
| 121 |
step_footnote = f"""<span style="color: #bbbbc2; font-size: 12px;">{step_footnote}</span> """
|
| 122 |
yield gr.ChatMessage(role="assistant", content=f"{step_footnote}")
|
|
@@ -139,7 +172,9 @@ def stream_to_gradio(
|
|
| 139 |
total_input_tokens = 0
|
| 140 |
total_output_tokens = 0
|
| 141 |
|
| 142 |
-
for step_log in agent.run(
|
|
|
|
|
|
|
| 143 |
# Track tokens if model provides them
|
| 144 |
if hasattr(agent.model, "last_input_token_count"):
|
| 145 |
total_input_tokens += agent.model.last_input_token_count
|
|
@@ -172,7 +207,9 @@ def stream_to_gradio(
|
|
| 172 |
content={"path": final_answer.to_string(), "mime_type": "audio/wav"},
|
| 173 |
)
|
| 174 |
else:
|
| 175 |
-
yield gr.ChatMessage(
|
|
|
|
|
|
|
| 176 |
|
| 177 |
|
| 178 |
class GradioUI:
|
|
@@ -242,10 +279,14 @@ class GradioUI:
|
|
| 242 |
sanitized_name = "".join(sanitized_name)
|
| 243 |
|
| 244 |
# Save the uploaded file to the specified folder
|
| 245 |
-
file_path = os.path.join(
|
|
|
|
|
|
|
| 246 |
shutil.copy(file.name, file_path)
|
| 247 |
|
| 248 |
-
return gr.Textbox(
|
|
|
|
|
|
|
| 249 |
|
| 250 |
def log_user_message(self, text_input, file_uploads_log):
|
| 251 |
return (
|
|
@@ -277,7 +318,9 @@ class GradioUI:
|
|
| 277 |
# If an upload folder is provided, enable the upload feature
|
| 278 |
if self.file_upload_folder is not None:
|
| 279 |
upload_file = gr.File(label="Upload a file")
|
| 280 |
-
upload_status = gr.Textbox(
|
|
|
|
|
|
|
| 281 |
upload_file.change(
|
| 282 |
self.upload_file,
|
| 283 |
[upload_file, file_uploads_log],
|
|
@@ -293,4 +336,4 @@ class GradioUI:
|
|
| 293 |
demo.launch(debug=True, share=True, **kwargs)
|
| 294 |
|
| 295 |
|
| 296 |
-
__all__ = ["stream_to_gradio", "GradioUI"]
|
|
|
|
| 19 |
import shutil
|
| 20 |
from typing import Optional
|
| 21 |
|
| 22 |
+
from smolagents.agent_types import (
|
| 23 |
+
AgentAudio,
|
| 24 |
+
AgentImage,
|
| 25 |
+
AgentText,
|
| 26 |
+
handle_agent_output_types,
|
| 27 |
+
)
|
| 28 |
from smolagents.agents import ActionStep, MultiStepAgent
|
| 29 |
from smolagents.memory import MemoryStep
|
| 30 |
from smolagents.utils import _is_package_available
|
|
|
|
| 38 |
|
| 39 |
if isinstance(step_log, ActionStep):
|
| 40 |
# Output the step number
|
| 41 |
+
step_number = (
|
| 42 |
+
f"Step {step_log.step_number}" if step_log.step_number is not None else ""
|
| 43 |
+
)
|
| 44 |
yield gr.ChatMessage(role="assistant", content=f"**{step_number}**")
|
| 45 |
|
| 46 |
# First yield the thought/reasoning from the LLM
|
|
|
|
| 48 |
# Clean up the LLM output
|
| 49 |
model_output = step_log.model_output.strip()
|
| 50 |
# Remove any trailing <end_code> and extra backticks, handling multiple possible formats
|
| 51 |
+
model_output = re.sub(
|
| 52 |
+
r"```\s*<end_code>", "```", model_output
|
| 53 |
+
) # handles ```<end_code>
|
| 54 |
+
model_output = re.sub(
|
| 55 |
+
r"<end_code>\s*```", "```", model_output
|
| 56 |
+
) # handles <end_code>```
|
| 57 |
+
model_output = re.sub(
|
| 58 |
+
r"```\s*\n\s*<end_code>", "```", model_output
|
| 59 |
+
) # handles ```\n<end_code>
|
| 60 |
model_output = model_output.strip()
|
| 61 |
yield gr.ChatMessage(role="assistant", content=model_output)
|
| 62 |
|
|
|
|
| 76 |
|
| 77 |
if used_code:
|
| 78 |
# Clean up the content by removing any end code tags
|
| 79 |
+
content = re.sub(
|
| 80 |
+
r"```.*?\n", "", content
|
| 81 |
+
) # Remove existing code blocks
|
| 82 |
+
content = re.sub(
|
| 83 |
+
r"\s*<end_code>\s*", "", content
|
| 84 |
+
) # Remove end_code tags
|
| 85 |
content = content.strip()
|
| 86 |
if not content.startswith("```python"):
|
| 87 |
content = f"```python\n{content}\n```"
|
|
|
|
| 107 |
yield gr.ChatMessage(
|
| 108 |
role="assistant",
|
| 109 |
content=f"{log_content}",
|
| 110 |
+
metadata={
|
| 111 |
+
"title": "📝 Execution Logs",
|
| 112 |
+
"parent_id": parent_id,
|
| 113 |
+
"status": "done",
|
| 114 |
+
},
|
| 115 |
)
|
| 116 |
|
| 117 |
# Nesting any errors under the tool call
|
|
|
|
| 119 |
yield gr.ChatMessage(
|
| 120 |
role="assistant",
|
| 121 |
content=str(step_log.error),
|
| 122 |
+
metadata={
|
| 123 |
+
"title": "💥 Error",
|
| 124 |
+
"parent_id": parent_id,
|
| 125 |
+
"status": "done",
|
| 126 |
+
},
|
| 127 |
)
|
| 128 |
|
| 129 |
# Update parent message metadata to done status without yielding a new message
|
|
|
|
| 131 |
|
| 132 |
# Handle standalone errors but not from tool calls
|
| 133 |
elif hasattr(step_log, "error") and step_log.error is not None:
|
| 134 |
+
yield gr.ChatMessage(
|
| 135 |
+
role="assistant",
|
| 136 |
+
content=str(step_log.error),
|
| 137 |
+
metadata={"title": "💥 Error"},
|
| 138 |
+
)
|
| 139 |
|
| 140 |
# Calculate duration and token information
|
| 141 |
step_footnote = f"{step_number}"
|
| 142 |
+
if hasattr(step_log, "input_token_count") and hasattr(
|
| 143 |
+
step_log, "output_token_count"
|
| 144 |
+
):
|
| 145 |
+
token_str = f" | Input-tokens:{step_log.input_token_count:,} | Output-tokens:{step_log.output_token_count:,}"
|
| 146 |
step_footnote += token_str
|
| 147 |
if hasattr(step_log, "duration"):
|
| 148 |
+
step_duration = (
|
| 149 |
+
f" | Duration: {round(float(step_log.duration), 2)}"
|
| 150 |
+
if step_log.duration
|
| 151 |
+
else None
|
| 152 |
+
)
|
| 153 |
step_footnote += step_duration
|
| 154 |
step_footnote = f"""<span style="color: #bbbbc2; font-size: 12px;">{step_footnote}</span> """
|
| 155 |
yield gr.ChatMessage(role="assistant", content=f"{step_footnote}")
|
|
|
|
| 172 |
total_input_tokens = 0
|
| 173 |
total_output_tokens = 0
|
| 174 |
|
| 175 |
+
for step_log in agent.run(
|
| 176 |
+
task, stream=True, reset=reset_agent_memory, additional_args=additional_args
|
| 177 |
+
):
|
| 178 |
# Track tokens if model provides them
|
| 179 |
if hasattr(agent.model, "last_input_token_count"):
|
| 180 |
total_input_tokens += agent.model.last_input_token_count
|
|
|
|
| 207 |
content={"path": final_answer.to_string(), "mime_type": "audio/wav"},
|
| 208 |
)
|
| 209 |
else:
|
| 210 |
+
yield gr.ChatMessage(
|
| 211 |
+
role="assistant", content=f"**Final answer:** {str(final_answer)}"
|
| 212 |
+
)
|
| 213 |
|
| 214 |
|
| 215 |
class GradioUI:
|
|
|
|
| 279 |
sanitized_name = "".join(sanitized_name)
|
| 280 |
|
| 281 |
# Save the uploaded file to the specified folder
|
| 282 |
+
file_path = os.path.join(
|
| 283 |
+
self.file_upload_folder, os.path.basename(sanitized_name)
|
| 284 |
+
)
|
| 285 |
shutil.copy(file.name, file_path)
|
| 286 |
|
| 287 |
+
return gr.Textbox(
|
| 288 |
+
f"File uploaded: {file_path}", visible=True
|
| 289 |
+
), file_uploads_log + [file_path]
|
| 290 |
|
| 291 |
def log_user_message(self, text_input, file_uploads_log):
|
| 292 |
return (
|
|
|
|
| 318 |
# If an upload folder is provided, enable the upload feature
|
| 319 |
if self.file_upload_folder is not None:
|
| 320 |
upload_file = gr.File(label="Upload a file")
|
| 321 |
+
upload_status = gr.Textbox(
|
| 322 |
+
label="Upload Status", interactive=False, visible=False
|
| 323 |
+
)
|
| 324 |
upload_file.change(
|
| 325 |
self.upload_file,
|
| 326 |
[upload_file, file_uploads_log],
|
|
|
|
| 336 |
demo.launch(debug=True, share=True, **kwargs)
|
| 337 |
|
| 338 |
|
| 339 |
+
__all__ = ["stream_to_gradio", "GradioUI"]
|
app.py
CHANGED
|
@@ -1,23 +1,27 @@
|
|
| 1 |
-
from smolagents import CodeAgent,DuckDuckGoSearchTool, HfApiModel,load_tool,tool
|
| 2 |
import datetime
|
| 3 |
-
import requests
|
| 4 |
import pytz
|
| 5 |
import yaml
|
| 6 |
-
|
|
|
|
| 7 |
|
| 8 |
from Gradio_UI import GradioUI
|
|
|
|
|
|
|
| 9 |
|
| 10 |
# Below is an example of a tool that does nothing. Amaze us with your creativity !
|
| 11 |
@tool
|
| 12 |
-
def my_custom_tool(
|
| 13 |
-
|
| 14 |
-
|
|
|
|
|
|
|
| 15 |
Args:
|
| 16 |
arg1: the first argument
|
| 17 |
arg2: the second argument
|
| 18 |
"""
|
| 19 |
return "What magic will you build ?"
|
| 20 |
|
|
|
|
| 21 |
@tool
|
| 22 |
def get_current_time_in_timezone(timezone: str) -> str:
|
| 23 |
"""A tool that fetches the current local time in a specified timezone.
|
|
@@ -37,33 +41,33 @@ def get_current_time_in_timezone(timezone: str) -> str:
|
|
| 37 |
final_answer = FinalAnswerTool()
|
| 38 |
|
| 39 |
# If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
|
| 40 |
-
# model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
|
| 41 |
|
| 42 |
model = HfApiModel(
|
| 43 |
-
max_tokens=2096,
|
| 44 |
-
temperature=0.5,
|
| 45 |
-
model_id=
|
| 46 |
-
custom_role_conversions=None,
|
| 47 |
)
|
| 48 |
|
| 49 |
|
| 50 |
# Import tool from Hub
|
| 51 |
image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
|
| 52 |
|
| 53 |
-
with open("prompts.yaml",
|
| 54 |
prompt_templates = yaml.safe_load(stream)
|
| 55 |
-
|
| 56 |
agent = CodeAgent(
|
| 57 |
model=model,
|
| 58 |
-
tools=[final_answer],
|
| 59 |
max_steps=6,
|
| 60 |
verbosity_level=1,
|
| 61 |
grammar=None,
|
| 62 |
planning_interval=None,
|
| 63 |
name=None,
|
| 64 |
description=None,
|
| 65 |
-
prompt_templates=prompt_templates
|
| 66 |
)
|
| 67 |
|
| 68 |
|
| 69 |
-
GradioUI(agent).launch()
|
|
|
|
|
|
|
| 1 |
import datetime
|
|
|
|
| 2 |
import pytz
|
| 3 |
import yaml
|
| 4 |
+
|
| 5 |
+
from smolagents import CodeAgent, HfApiModel, load_tool, tool
|
| 6 |
|
| 7 |
from Gradio_UI import GradioUI
|
| 8 |
+
from tools.final_answer import FinalAnswerTool
|
| 9 |
+
|
| 10 |
|
| 11 |
# Below is an example of a tool that does nothing. Amaze us with your creativity !
|
| 12 |
@tool
|
| 13 |
+
def my_custom_tool(
|
| 14 |
+
arg1: str, arg2: int
|
| 15 |
+
) -> str: # it's import to specify the return type
|
| 16 |
+
# Keep this format for the description / args / args description but feel free to modify the tool
|
| 17 |
+
"""A tool that does nothing yet
|
| 18 |
Args:
|
| 19 |
arg1: the first argument
|
| 20 |
arg2: the second argument
|
| 21 |
"""
|
| 22 |
return "What magic will you build ?"
|
| 23 |
|
| 24 |
+
|
| 25 |
@tool
|
| 26 |
def get_current_time_in_timezone(timezone: str) -> str:
|
| 27 |
"""A tool that fetches the current local time in a specified timezone.
|
|
|
|
| 41 |
final_answer = FinalAnswerTool()
|
| 42 |
|
| 43 |
# If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
|
| 44 |
+
# model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
|
| 45 |
|
| 46 |
model = HfApiModel(
|
| 47 |
+
max_tokens=2096,
|
| 48 |
+
temperature=0.5,
|
| 49 |
+
model_id="Qwen/Qwen2.5-Coder-32B-Instruct", # it is possible that this model may be overloaded
|
| 50 |
+
custom_role_conversions=None,
|
| 51 |
)
|
| 52 |
|
| 53 |
|
| 54 |
# Import tool from Hub
|
| 55 |
image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
|
| 56 |
|
| 57 |
+
with open("prompts.yaml", "r") as stream:
|
| 58 |
prompt_templates = yaml.safe_load(stream)
|
| 59 |
+
|
| 60 |
agent = CodeAgent(
|
| 61 |
model=model,
|
| 62 |
+
tools=[final_answer], ## add your tools here (don't remove final answer)
|
| 63 |
max_steps=6,
|
| 64 |
verbosity_level=1,
|
| 65 |
grammar=None,
|
| 66 |
planning_interval=None,
|
| 67 |
name=None,
|
| 68 |
description=None,
|
| 69 |
+
prompt_templates=prompt_templates,
|
| 70 |
)
|
| 71 |
|
| 72 |
|
| 73 |
+
GradioUI(agent).launch()
|
tools/final_answer.py
CHANGED
|
@@ -1,10 +1,14 @@
|
|
| 1 |
-
from typing import Any
|
|
|
|
| 2 |
from smolagents.tools import Tool
|
| 3 |
|
|
|
|
| 4 |
class FinalAnswerTool(Tool):
|
| 5 |
name = "final_answer"
|
| 6 |
description = "Provides a final answer to the given problem."
|
| 7 |
-
inputs = {
|
|
|
|
|
|
|
| 8 |
output_type = "any"
|
| 9 |
|
| 10 |
def forward(self, answer: Any) -> Any:
|
|
|
|
| 1 |
+
from typing import Any
|
| 2 |
+
|
| 3 |
from smolagents.tools import Tool
|
| 4 |
|
| 5 |
+
|
| 6 |
class FinalAnswerTool(Tool):
|
| 7 |
name = "final_answer"
|
| 8 |
description = "Provides a final answer to the given problem."
|
| 9 |
+
inputs = {
|
| 10 |
+
"answer": {"type": "any", "description": "The final answer to the problem"}
|
| 11 |
+
}
|
| 12 |
output_type = "any"
|
| 13 |
|
| 14 |
def forward(self, answer: Any) -> Any:
|
tools/visit_webpage.py
CHANGED
|
@@ -1,13 +1,14 @@
|
|
| 1 |
-
|
|
|
|
| 2 |
from smolagents.tools import Tool
|
| 3 |
-
|
| 4 |
-
import markdownify
|
| 5 |
-
import smolagents
|
| 6 |
|
| 7 |
class VisitWebpageTool(Tool):
|
| 8 |
name = "visit_webpage"
|
| 9 |
description = "Visits a webpage at the given url and reads its content as a markdown string. Use this to browse webpages."
|
| 10 |
-
inputs = {
|
|
|
|
|
|
|
| 11 |
output_type = "string"
|
| 12 |
|
| 13 |
def forward(self, url: str) -> str:
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
|
| 3 |
from smolagents.tools import Tool
|
| 4 |
+
|
|
|
|
|
|
|
| 5 |
|
| 6 |
class VisitWebpageTool(Tool):
|
| 7 |
name = "visit_webpage"
|
| 8 |
description = "Visits a webpage at the given url and reads its content as a markdown string. Use this to browse webpages."
|
| 9 |
+
inputs = {
|
| 10 |
+
"url": {"type": "string", "description": "The url of the webpage to visit."}
|
| 11 |
+
}
|
| 12 |
output_type = "string"
|
| 13 |
|
| 14 |
def forward(self, url: str) -> str:
|
tools/web_search.py
CHANGED
|
@@ -1,14 +1,15 @@
|
|
| 1 |
-
from typing import Any, Optional
|
| 2 |
from smolagents.tools import Tool
|
| 3 |
-
|
| 4 |
|
| 5 |
class DuckDuckGoSearchTool(Tool):
|
| 6 |
name = "web_search"
|
| 7 |
description = "Performs a duckduckgo web search based on your query (think a Google search) then returns the top search results."
|
| 8 |
-
inputs = {
|
|
|
|
|
|
|
| 9 |
output_type = "string"
|
| 10 |
|
| 11 |
-
def __init__(self, max_results=10, **kwargs):
|
| 12 |
super().__init__()
|
| 13 |
self.max_results = max_results
|
| 14 |
try:
|
|
@@ -23,5 +24,8 @@ class DuckDuckGoSearchTool(Tool):
|
|
| 23 |
results = self.ddgs.text(query, max_results=self.max_results)
|
| 24 |
if len(results) == 0:
|
| 25 |
raise Exception("No results found! Try a less restrictive/shorter query.")
|
| 26 |
-
postprocessed_results = [
|
|
|
|
|
|
|
|
|
|
| 27 |
return "## Search Results\n\n" + "\n\n".join(postprocessed_results)
|
|
|
|
|
|
|
| 1 |
from smolagents.tools import Tool
|
| 2 |
+
|
| 3 |
|
| 4 |
class DuckDuckGoSearchTool(Tool):
|
| 5 |
name = "web_search"
|
| 6 |
description = "Performs a duckduckgo web search based on your query (think a Google search) then returns the top search results."
|
| 7 |
+
inputs = {
|
| 8 |
+
"query": {"type": "string", "description": "The search query to perform."}
|
| 9 |
+
}
|
| 10 |
output_type = "string"
|
| 11 |
|
| 12 |
+
def __init__(self, max_results: int = 10, **kwargs):
|
| 13 |
super().__init__()
|
| 14 |
self.max_results = max_results
|
| 15 |
try:
|
|
|
|
| 24 |
results = self.ddgs.text(query, max_results=self.max_results)
|
| 25 |
if len(results) == 0:
|
| 26 |
raise Exception("No results found! Try a less restrictive/shorter query.")
|
| 27 |
+
postprocessed_results = [
|
| 28 |
+
f"[{result['title']}]({result['href']})\n{result['body']}"
|
| 29 |
+
for result in results
|
| 30 |
+
]
|
| 31 |
return "## Search Results\n\n" + "\n\n".join(postprocessed_results)
|