Agentic_Pod / app.py
Shourya Angrish
Revert "test"
60119b3
import gradio as gr
from smolagents import CodeAgent, tool
from smolagents.models import InferenceClientModel
import os
import requests
import json
# Set your HuggingFace token here or via environment variable
HF_TOKEN = os.getenv("HF_TOKEN")
SERPER_API_KEY = os.getenv("SERPER_API_KEY") # Get free API key from serper.dev
WEATHER_API_KEY = os.getenv("WEATHER_API_KEY") # Get free API key from openweathermap.org
# Define tools using @tool decorator
@tool
def search_google(query: str) -> str:
"""Search Google for current information about any topic.
Args:
query: The search query string
Returns:
Search results with titles and snippets
"""
if not SERPER_API_KEY:
return "Error: SERPER_API_KEY not set. Get one from https://serper.dev"
url = "https://google.serper.dev/search"
payload = json.dumps({"q": query})
headers = {
'X-API-KEY': SERPER_API_KEY,
'Content-Type': 'application/json'
}
try:
response = requests.post(url, headers=headers, data=payload, timeout=10)
results = response.json()
# Extract top 3 results
snippets = []
if 'organic' in results:
for i, item in enumerate(results['organic'][:3], 1):
snippets.append(f"{i}. {item.get('title', '')}\n {item.get('snippet', '')}")
return "\n\n".join(snippets) if snippets else "No results found"
except Exception as e:
return f"Search error: {str(e)}"
@tool
def get_weather(location: str) -> str:
"""Get current weather information for any location.
Args:
location: City name or location (e.g., 'Paris', 'New York', 'Tokyo')
Returns:
Weather information including temperature, conditions, humidity, and wind speed
"""
if not WEATHER_API_KEY:
return "Error: WEATHER_API_KEY not set. Get one from https://openweathermap.org/api"
url = f"http://api.openweathermap.org/data/2.5/weather?q={location}&appid={WEATHER_API_KEY}&units=metric"
try:
response = requests.get(url, timeout=10)
data = response.json()
if response.status_code == 200:
temp = data['main']['temp']
feels_like = data['main']['feels_like']
description = data['weather'][0]['description']
humidity = data['main']['humidity']
wind_speed = data['wind']['speed']
return f"Weather in {location}:\n- Temperature: {temp}°C (feels like {feels_like}°C)\n- Condition: {description}\n- Humidity: {humidity}%\n- Wind Speed: {wind_speed} m/s"
else:
return f"Weather error: {data.get('message', 'Unknown error')}"
except Exception as e:
return f"Weather error: {str(e)}"
# Initialize the agent with tools
agent = None
def initialize_agent(reasoning_type):
"""Initialize the SmolAgent with appropriate system prompt"""
global agent
# Create reasoning-specific system prompts
if reasoning_type == "Chain of Thought (CoT)":
system_prompt = """You are a helpful assistant that breaks down reasoning step-by-step.
When answering, use this format:
'Let me think through this step by step:
1. [First step]
2. [Second step]
3. [Final conclusion]'
Use the available tools when you need current information."""
elif reasoning_type == "ReAct":
system_prompt = """You are a helpful assistant using the ReAct framework.
For each query, follow this pattern:
Thought: [your reasoning about what to do]
Action: [use a tool if needed]
Observation: [what you learned]
Answer: [final response]
Use the available tools when you need external information."""
elif reasoning_type == "Tree of Thoughts":
system_prompt = """You are a helpful assistant that explores multiple reasoning paths.
Use this format:
Path 1: [first approach]
Path 2: [alternative approach]
Path 3: [another perspective]
Best Solution: [synthesized answer]
Use the available tools when needed."""
else: # Simple
system_prompt = "You are a helpful assistant. Provide clear, concise answers. Use the available tools when you need current or external information."
model = InferenceClientModel(
model_id="meta-llama/Llama-3.2-3B-Instruct",
token=HF_TOKEN
)
agent = CodeAgent(
tools=[search_google, get_weather],
model=model,
additional_authorized_imports=["requests", "json"],
max_steps=5
)
# Set system prompt via agent's prompt_templates dictionary
agent.prompt_templates["system_prompt"] = system_prompt
return agent
def chat(msg, reasoning_type):
"""Handle chat messages using SmolAgent"""
try:
# Initialize or reinitialize agent if reasoning type changed
current_agent = initialize_agent(reasoning_type)
# Run the agent
result = current_agent.run(msg)
return str(result)
except Exception as e:
return f"Error: {str(e)}\n\nMake sure HF_TOKEN is set and the model is accessible."
with gr.Blocks(theme=gr.themes.Soft()) as demo:
gr.Markdown("# 🤖 SmolAgents Reasoning Assistant")
gr.Markdown("Powered by Hugging Face SmolAgents with automatic tool orchestration")
gr.Markdown("""
### 🛠️ Available Tools (Auto-detected by Agent):
- **Google Search**: Current events, latest information
- **Weather**: Real-time weather for any location
### 🔑 Setup API Keys (Environment Variables):
- `HF_TOKEN` - Required: Your Hugging Face token
- `SERPER_API_KEY` - Optional: Get free at [serper.dev](https://serper.dev)
- `WEATHER_API_KEY` - Optional: Get free at [openweathermap.org](https://openweathermap.org/api)
### ✨ Features:
- Automatic tool selection and execution
- Multi-step reasoning with ReAct loop
- No manual tool parsing needed!
""")
with gr.Row():
with gr.Column(scale=3):
reasoning = gr.Radio(
choices=["Simple", "Chain of Thought (CoT)", "ReAct", "Tree of Thoughts"],
value="ReAct",
label="Reasoning Strategy"
)
msg_input = gr.Textbox(
label="Your Message",
placeholder="Try: 'What's the weather in Paris?' or 'Search for latest AI news'",
lines=3
)
submit_btn = gr.Button("Send", variant="primary")
gr.Examples(
examples=[
"What's the weather in Tokyo?",
"Search for the latest news about AI",
"What's the weather like in London and should I bring an umbrella?",
"Find information about the Mars rover",
"Compare weather in Paris and London"
],
inputs=msg_input
)
with gr.Column(scale=4):
output = gr.Textbox(
label="Agent Response (with reasoning steps)",
lines=25,
max_lines=35
)
submit_btn.click(fn=chat, inputs=[msg_input, reasoning], outputs=output)
msg_input.submit(fn=chat, inputs=[msg_input, reasoning], outputs=output)
demo.launch()