Spaces:
Runtime error
Runtime error
Upload 5 files
Browse files- .env +12 -0
- .gitignore +16 -0
- app.py +132 -0
- requirements.txt +8 -0
- smolagent_studio_ide.py +132 -0
.env
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#.env
|
| 2 |
+
|
| 3 |
+
# π Secure your model provider tokens here
|
| 4 |
+
HUGGINGFACEHUB_API_TOKEN=your_huggingface_token_here
|
| 5 |
+
OPENAI_API_KEY=your_openai_token_here
|
| 6 |
+
ANTHROPIC_API_KEY=your_anthropic_token_here
|
| 7 |
+
OLLAMA_API_KEY=your_ollama_token_here
|
| 8 |
+
TOGETHER_API_KEY=your_together_token_here
|
| 9 |
+
OPENROUTER_API_KEY=your_openrouter_token_here
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
π‘ You can use the python-dotenv package to auto-load these in Python via os.getenv().
|
.gitignore
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
text
|
| 2 |
+
# Python
|
| 3 |
+
__pycache__/
|
| 4 |
+
*.py[cod]
|
| 5 |
+
*.env
|
| 6 |
+
env/
|
| 7 |
+
venv/
|
| 8 |
+
|
| 9 |
+
# Gradio Cache
|
| 10 |
+
gradio_cached_examples/
|
| 11 |
+
|
| 12 |
+
# VS Code
|
| 13 |
+
.vscode/
|
| 14 |
+
|
| 15 |
+
# Docker
|
| 16 |
+
*.container
|
app.py
ADDED
|
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import os
|
| 3 |
+
import json
|
| 4 |
+
import yaml
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
from smolagents import (
|
| 7 |
+
CodeAgent, ToolCallingAgent, ManagedAgent,
|
| 8 |
+
InferenceClientModel, TransformersModel, LiteLLMModel,
|
| 9 |
+
OllamaModel, OpenAIModel, AnthropicModel,
|
| 10 |
+
TogetherModel, OpenRouterModel,
|
| 11 |
+
WebSearchTool, PythonTool, FileTool, ImageTool,
|
| 12 |
+
TerminalTool, FinalAnswerTool, UserInputTool,
|
| 13 |
+
LangChainTool, HFModelDownloadsTool
|
| 14 |
+
)
|
| 15 |
+
|
| 16 |
+
# π― Tool and Model Mappings
|
| 17 |
+
TOOL_MAP = {
|
| 18 |
+
"WebSearchTool": (WebSearchTool, "π Search the internet"),
|
| 19 |
+
"PythonTool": (PythonTool, "π Execute Python code"),
|
| 20 |
+
"FileTool": (FileTool, "π Read/write local files"),
|
| 21 |
+
"ImageTool": (ImageTool, "πΌοΈ Handle image input/output"),
|
| 22 |
+
"TerminalTool": (TerminalTool, "π» Run shell commands"),
|
| 23 |
+
"FinalAnswerTool": (FinalAnswerTool, "β
Finalize agent response"),
|
| 24 |
+
"UserInputTool": (UserInputTool, "π€ Interactive user input"),
|
| 25 |
+
"LangChainTool": (LangChainTool, "π Wrap LangChain tools"),
|
| 26 |
+
"HFModelDownloadsTool": (HFModelDownloadsTool, "π Hugging Face stats")
|
| 27 |
+
}
|
| 28 |
+
|
| 29 |
+
MODEL_MAP = {
|
| 30 |
+
"Hugging Face API": (InferenceClientModel, "Use hosted HF models"),
|
| 31 |
+
"Transformers (local)": (TransformersModel, "Run local models"),
|
| 32 |
+
"LiteLLM": (LiteLLMModel, "Connect to LiteLLM"),
|
| 33 |
+
"OpenAI": (OpenAIModel, "Use OpenAI GPT models"),
|
| 34 |
+
"Anthropic": (AnthropicModel, "Access Claude via Anthropic"),
|
| 35 |
+
"Ollama": (OllamaModel, "Use local Ollama models"),
|
| 36 |
+
"Together.ai": (TogetherModel, "Use Together-hosted models"),
|
| 37 |
+
"OpenRouter": (OpenRouterModel, "Universal model router")
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
AGENT_TYPES = {
|
| 41 |
+
"CodeAgent": "Generates and runs Python code",
|
| 42 |
+
"ToolCallingAgent": "Uses structured tool calls",
|
| 43 |
+
"ManagedAgent": "Manages tool routing and sub-agents"
|
| 44 |
+
}
|
| 45 |
+
|
| 46 |
+
# βοΈ Helper: Generate Agent Code Preview
|
| 47 |
+
def generate_code_preview(model_choice, tools_selected, agent_type, prompt):
|
| 48 |
+
model_cls_name = model_choice.replace(" ", "")
|
| 49 |
+
imports = f"from smolagents import {agent_type}, {model_cls_name}, {', '.join(tools_selected)}\nimport os"
|
| 50 |
+
setup = f'os.environ["HUGGINGFACEHUB_API_TOKEN"] = "your_token_here"'
|
| 51 |
+
model = f'model = {model_cls_name}(provider="{model_choice}", token=os.getenv("HUGGINGFACEHUB_API_TOKEN"))'
|
| 52 |
+
tools = f'tools = [{", ".join(tool + "()" for tool in tools_selected)}]'
|
| 53 |
+
agent = f'agent = {agent_type}(tools=tools, model=model, stream_outputs=True)'
|
| 54 |
+
run = f'result = agent.run("{prompt}")\nprint(result)'
|
| 55 |
+
return "\n".join([imports, setup, model, tools, agent, run])
|
| 56 |
+
|
| 57 |
+
# π Main Execution Logic
|
| 58 |
+
def run_agent(model_choice, token, tools_selected, agent_type, prompt):
|
| 59 |
+
os.environ["HUGGINGFACEHUB_API_TOKEN"] = token
|
| 60 |
+
model_cls = MODEL_MAP[model_choice][0]
|
| 61 |
+
model = model_cls(provider=model_choice, token=token)
|
| 62 |
+
tools = [TOOL_MAP[t][0]() for t in tools_selected]
|
| 63 |
+
|
| 64 |
+
if agent_type == "CodeAgent":
|
| 65 |
+
agent = CodeAgent(tools=tools, model=model, stream_outputs=True)
|
| 66 |
+
elif agent_type == "ToolCallingAgent":
|
| 67 |
+
agent = ToolCallingAgent(tools=tools, model=model)
|
| 68 |
+
else:
|
| 69 |
+
agent = ManagedAgent(tools=tools, model=model)
|
| 70 |
+
|
| 71 |
+
try:
|
| 72 |
+
result = agent.run(prompt)
|
| 73 |
+
except Exception as e:
|
| 74 |
+
result = f"β οΈ Agent Error: {e}"
|
| 75 |
+
|
| 76 |
+
code = generate_code_preview(model_choice, tools_selected, agent_type, prompt)
|
| 77 |
+
return result, code
|
| 78 |
+
|
| 79 |
+
# πΎ Export Agent Setup
|
| 80 |
+
def export_agent_config(model_choice, tools_selected, agent_type, prompt, format):
|
| 81 |
+
config = {
|
| 82 |
+
"model": model_choice,
|
| 83 |
+
"tools": tools_selected,
|
| 84 |
+
"agent_type": agent_type,
|
| 85 |
+
"prompt": prompt
|
| 86 |
+
}
|
| 87 |
+
path = Path(f"agent_config.{format}")
|
| 88 |
+
if format == "yaml":
|
| 89 |
+
path.write_text(yaml.dump(config, sort_keys=False))
|
| 90 |
+
elif format == "py":
|
| 91 |
+
code = generate_code_preview(model_choice, tools_selected, agent_type, prompt)
|
| 92 |
+
path.write_text(code)
|
| 93 |
+
else:
|
| 94 |
+
path.write_text(json.dumps(config, indent=2))
|
| 95 |
+
return f"β
Exported to {path}"
|
| 96 |
+
|
| 97 |
+
# π§ͺ Gradio Interface
|
| 98 |
+
with gr.Blocks(title="SmolAgent Studio IDE") as demo:
|
| 99 |
+
gr.Markdown("# π οΈ SmolAgent Studio")
|
| 100 |
+
gr.Markdown("Build and deploy smart agents visually. Select tools, models, and prompts β with live code preview and export.")
|
| 101 |
+
|
| 102 |
+
with gr.Row():
|
| 103 |
+
model_dropdown = gr.Dropdown(list(MODEL_MAP.keys()), label="Model Provider", info="Choose LLM backend")
|
| 104 |
+
token_input = gr.Textbox(label="API Token", type="password", placeholder="Paste your token securely")
|
| 105 |
+
|
| 106 |
+
with gr.Row():
|
| 107 |
+
tool_selector = gr.CheckboxGroup(list(TOOL_MAP.keys()), label="Tools", info="Select tools for your agent")
|
| 108 |
+
agent_dropdown = gr.Dropdown(list(AGENT_TYPES.keys()), label="Agent Type", info="Select agent logic")
|
| 109 |
+
|
| 110 |
+
prompt_box = gr.Textbox(label="Prompt", lines=4, placeholder="Ask your agent something...")
|
| 111 |
+
|
| 112 |
+
code_view = gr.Code(label="Generated Python Code", language="python")
|
| 113 |
+
output_box = gr.Textbox(label="Agent Output")
|
| 114 |
+
|
| 115 |
+
with gr.Row():
|
| 116 |
+
run_button = gr.Button("π Run Agent")
|
| 117 |
+
format_dropdown = gr.Dropdown(["json", "yaml", "py"], label="Export Format", value="json")
|
| 118 |
+
export_button = gr.Button("πΎ Export Config")
|
| 119 |
+
|
| 120 |
+
run_button.click(
|
| 121 |
+
fn=run_agent,
|
| 122 |
+
inputs=[model_dropdown, token_input, tool_selector, agent_dropdown, prompt_box],
|
| 123 |
+
outputs=[output_box, code_view]
|
| 124 |
+
)
|
| 125 |
+
|
| 126 |
+
export_button.click(
|
| 127 |
+
fn=export_agent_config,
|
| 128 |
+
inputs=[model_dropdown, tool_selector, agent_dropdown, prompt_box, format_dropdown],
|
| 129 |
+
outputs=gr.Textbox(label="Export Status")
|
| 130 |
+
)
|
| 131 |
+
|
| 132 |
+
demo.launch()
|
requirements.txt
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#requirements.txt
|
| 2 |
+
|
| 3 |
+
# π§© SmolAgent Studio dependencies
|
| 4 |
+
smolagents>=0.1.21
|
| 5 |
+
gradio>=4.0.0
|
| 6 |
+
python-dotenv>=1.0.0
|
| 7 |
+
|
| 8 |
+
#π§ͺ You can install with pip install -r requirements.txt
|
smolagent_studio_ide.py
ADDED
|
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import os
|
| 3 |
+
import json
|
| 4 |
+
import yaml
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
from smolagents import (
|
| 7 |
+
CodeAgent, ToolCallingAgent, ManagedAgent,
|
| 8 |
+
InferenceClientModel, TransformersModel, LiteLLMModel,
|
| 9 |
+
OllamaModel, OpenAIModel, AnthropicModel,
|
| 10 |
+
TogetherModel, OpenRouterModel,
|
| 11 |
+
WebSearchTool, PythonTool, FileTool, ImageTool,
|
| 12 |
+
TerminalTool, FinalAnswerTool, UserInputTool,
|
| 13 |
+
LangChainTool, HFModelDownloadsTool
|
| 14 |
+
)
|
| 15 |
+
|
| 16 |
+
# π― Tool and Model Mappings
|
| 17 |
+
TOOL_MAP = {
|
| 18 |
+
"WebSearchTool": (WebSearchTool, "π Search the internet"),
|
| 19 |
+
"PythonTool": (PythonTool, "π Execute Python code"),
|
| 20 |
+
"FileTool": (FileTool, "π Read/write local files"),
|
| 21 |
+
"ImageTool": (ImageTool, "πΌοΈ Handle image input/output"),
|
| 22 |
+
"TerminalTool": (TerminalTool, "π» Run shell commands"),
|
| 23 |
+
"FinalAnswerTool": (FinalAnswerTool, "β
Finalize agent response"),
|
| 24 |
+
"UserInputTool": (UserInputTool, "π€ Interactive user input"),
|
| 25 |
+
"LangChainTool": (LangChainTool, "π Wrap LangChain tools"),
|
| 26 |
+
"HFModelDownloadsTool": (HFModelDownloadsTool, "π Hugging Face stats")
|
| 27 |
+
}
|
| 28 |
+
|
| 29 |
+
MODEL_MAP = {
|
| 30 |
+
"Hugging Face API": (InferenceClientModel, "Use hosted HF models"),
|
| 31 |
+
"Transformers (local)": (TransformersModel, "Run local models"),
|
| 32 |
+
"LiteLLM": (LiteLLMModel, "Connect to LiteLLM"),
|
| 33 |
+
"OpenAI": (OpenAIModel, "Use OpenAI GPT models"),
|
| 34 |
+
"Anthropic": (AnthropicModel, "Access Claude via Anthropic"),
|
| 35 |
+
"Ollama": (OllamaModel, "Use local Ollama models"),
|
| 36 |
+
"Together.ai": (TogetherModel, "Use Together-hosted models"),
|
| 37 |
+
"OpenRouter": (OpenRouterModel, "Universal model router")
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
AGENT_TYPES = {
|
| 41 |
+
"CodeAgent": "Generates and runs Python code",
|
| 42 |
+
"ToolCallingAgent": "Uses structured tool calls",
|
| 43 |
+
"ManagedAgent": "Manages tool routing and sub-agents"
|
| 44 |
+
}
|
| 45 |
+
|
| 46 |
+
# βοΈ Helper: Generate Agent Code Preview
|
| 47 |
+
def generate_code_preview(model_choice, tools_selected, agent_type, prompt):
|
| 48 |
+
model_cls_name = model_choice.replace(" ", "")
|
| 49 |
+
imports = f"from smolagents import {agent_type}, {model_cls_name}, {', '.join(tools_selected)}\nimport os"
|
| 50 |
+
setup = f'os.environ["HUGGINGFACEHUB_API_TOKEN"] = "your_token_here"'
|
| 51 |
+
model = f'model = {model_cls_name}(provider="{model_choice}", token=os.getenv("HUGGINGFACEHUB_API_TOKEN"))'
|
| 52 |
+
tools = f'tools = [{", ".join(tool + "()" for tool in tools_selected)}]'
|
| 53 |
+
agent = f'agent = {agent_type}(tools=tools, model=model, stream_outputs=True)'
|
| 54 |
+
run = f'result = agent.run("{prompt}")\nprint(result)'
|
| 55 |
+
return "\n".join([imports, setup, model, tools, agent, run])
|
| 56 |
+
|
| 57 |
+
# π Main Execution Logic
|
| 58 |
+
def run_agent(model_choice, token, tools_selected, agent_type, prompt):
|
| 59 |
+
os.environ["HUGGINGFACEHUB_API_TOKEN"] = token
|
| 60 |
+
model_cls = MODEL_MAP[model_choice][0]
|
| 61 |
+
model = model_cls(provider=model_choice, token=token)
|
| 62 |
+
tools = [TOOL_MAP[t][0]() for t in tools_selected]
|
| 63 |
+
|
| 64 |
+
if agent_type == "CodeAgent":
|
| 65 |
+
agent = CodeAgent(tools=tools, model=model, stream_outputs=True)
|
| 66 |
+
elif agent_type == "ToolCallingAgent":
|
| 67 |
+
agent = ToolCallingAgent(tools=tools, model=model)
|
| 68 |
+
else:
|
| 69 |
+
agent = ManagedAgent(tools=tools, model=model)
|
| 70 |
+
|
| 71 |
+
try:
|
| 72 |
+
result = agent.run(prompt)
|
| 73 |
+
except Exception as e:
|
| 74 |
+
result = f"β οΈ Agent Error: {e}"
|
| 75 |
+
|
| 76 |
+
code = generate_code_preview(model_choice, tools_selected, agent_type, prompt)
|
| 77 |
+
return result, code
|
| 78 |
+
|
| 79 |
+
# πΎ Export Agent Setup
|
| 80 |
+
def export_agent_config(model_choice, tools_selected, agent_type, prompt, format):
|
| 81 |
+
config = {
|
| 82 |
+
"model": model_choice,
|
| 83 |
+
"tools": tools_selected,
|
| 84 |
+
"agent_type": agent_type,
|
| 85 |
+
"prompt": prompt
|
| 86 |
+
}
|
| 87 |
+
path = Path(f"agent_config.{format}")
|
| 88 |
+
if format == "yaml":
|
| 89 |
+
path.write_text(yaml.dump(config, sort_keys=False))
|
| 90 |
+
elif format == "py":
|
| 91 |
+
code = generate_code_preview(model_choice, tools_selected, agent_type, prompt)
|
| 92 |
+
path.write_text(code)
|
| 93 |
+
else:
|
| 94 |
+
path.write_text(json.dumps(config, indent=2))
|
| 95 |
+
return f"β
Exported to {path}"
|
| 96 |
+
|
| 97 |
+
# π§ͺ Gradio Interface
|
| 98 |
+
with gr.Blocks(title="SmolAgent Studio IDE") as demo:
|
| 99 |
+
gr.Markdown("# π οΈ SmolAgent Studio")
|
| 100 |
+
gr.Markdown("Build and deploy smart agents visually. Select tools, models, and prompts β with live code preview and export.")
|
| 101 |
+
|
| 102 |
+
with gr.Row():
|
| 103 |
+
model_dropdown = gr.Dropdown(list(MODEL_MAP.keys()), label="Model Provider", info="Choose LLM backend")
|
| 104 |
+
token_input = gr.Textbox(label="API Token", type="password", placeholder="Paste your token securely")
|
| 105 |
+
|
| 106 |
+
with gr.Row():
|
| 107 |
+
tool_selector = gr.CheckboxGroup(list(TOOL_MAP.keys()), label="Tools", info="Select tools for your agent")
|
| 108 |
+
agent_dropdown = gr.Dropdown(list(AGENT_TYPES.keys()), label="Agent Type", info="Select agent logic")
|
| 109 |
+
|
| 110 |
+
prompt_box = gr.Textbox(label="Prompt", lines=4, placeholder="Ask your agent something...")
|
| 111 |
+
|
| 112 |
+
code_view = gr.Code(label="Generated Python Code", language="python")
|
| 113 |
+
output_box = gr.Textbox(label="Agent Output")
|
| 114 |
+
|
| 115 |
+
with gr.Row():
|
| 116 |
+
run_button = gr.Button("π Run Agent")
|
| 117 |
+
format_dropdown = gr.Dropdown(["json", "yaml", "py"], label="Export Format", value="json")
|
| 118 |
+
export_button = gr.Button("πΎ Export Config")
|
| 119 |
+
|
| 120 |
+
run_button.click(
|
| 121 |
+
fn=run_agent,
|
| 122 |
+
inputs=[model_dropdown, token_input, tool_selector, agent_dropdown, prompt_box],
|
| 123 |
+
outputs=[output_box, code_view]
|
| 124 |
+
)
|
| 125 |
+
|
| 126 |
+
export_button.click(
|
| 127 |
+
fn=export_agent_config,
|
| 128 |
+
inputs=[model_dropdown, tool_selector, agent_dropdown, prompt_box, format_dropdown],
|
| 129 |
+
outputs=gr.Textbox(label="Export Status")
|
| 130 |
+
)
|
| 131 |
+
|
| 132 |
+
demo.launch()
|