Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,117 +1,56 @@
|
|
| 1 |
import os
|
| 2 |
-
import datetime
|
| 3 |
-
import pytz
|
| 4 |
-
import math
|
| 5 |
-
import requests
|
| 6 |
import gradio as gr
|
| 7 |
-
from deep_translator import GoogleTranslator
|
| 8 |
-
|
| 9 |
-
# --- LLAMAINDEX IMPORTS ---
|
| 10 |
from llama_index.core.agent import ReActAgent
|
| 11 |
from llama_index.core.tools import FunctionTool
|
| 12 |
from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
|
|
|
|
| 13 |
|
| 14 |
-
#
|
| 15 |
-
from smolagents import CodeAgent, DuckDuckGoSearchTool, tool, InferenceClientModel
|
| 16 |
-
|
| 17 |
-
# 1. SHARED SETUP & TOKENS
|
| 18 |
-
# Ensure you set these in your Hugging Face Space Settings -> Secrets
|
| 19 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 20 |
|
| 21 |
-
#
|
| 22 |
-
# PART 1: LLAMAINDEX AGENT SETUP
|
| 23 |
-
# ==========================================
|
| 24 |
li_llm = HuggingFaceInferenceAPI(
|
| 25 |
model_name="Qwen/Qwen2.5-7B-Instruct",
|
| 26 |
token=HF_TOKEN,
|
| 27 |
-
task="conversational"
|
| 28 |
-
provider="together"
|
| 29 |
)
|
| 30 |
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
return f"The current time in Tokyo is {datetime.datetime.now(tz).strftime('%H:%M:%S')}"
|
| 35 |
-
|
| 36 |
-
def multiply_li(a: float, b: float) -> float:
|
| 37 |
-
"""Multiplies two numbers (a and b) and returns the result."""
|
| 38 |
return a * b
|
| 39 |
|
| 40 |
-
li_tools = [
|
| 41 |
-
FunctionTool.from_defaults(fn=get_tokyo_time_li),
|
| 42 |
-
FunctionTool.from_defaults(fn=multiply_li)
|
| 43 |
-
]
|
| 44 |
-
|
| 45 |
-
RE_ACT_PROMPT = """You are a helpful assistant.
|
| 46 |
-
For every query, you MUST follow this sequence:
|
| 47 |
-
Thought: <your reasoning>
|
| 48 |
-
Action: <tool_name>
|
| 49 |
-
Action Input: {"arg1": value}
|
| 50 |
-
Observation: <result from tool>
|
| 51 |
-
... (repeat if needed)
|
| 52 |
-
Thought: I have the final answer.
|
| 53 |
-
Answer: <your final response to the user>
|
| 54 |
-
"""
|
| 55 |
|
|
|
|
| 56 |
li_agent = ReActAgent.from_tools(
|
| 57 |
-
li_tools,
|
| 58 |
llm=li_llm,
|
| 59 |
-
verbose=True
|
| 60 |
-
context=RE_ACT_PROMPT
|
| 61 |
-
)
|
| 62 |
-
|
| 63 |
-
def li_chat_fn(message, history):
|
| 64 |
-
try:
|
| 65 |
-
return str(li_agent.chat(message))
|
| 66 |
-
except Exception as e:
|
| 67 |
-
return f"LlamaIndex Error: {str(e)}"
|
| 68 |
-
|
| 69 |
-
li_interface = gr.ChatInterface(li_chat_fn, title="LlamaIndex Agent")
|
| 70 |
-
|
| 71 |
-
# ==========================================
|
| 72 |
-
# PART 2: SMOLAGENTS SETUP
|
| 73 |
-
# ==========================================
|
| 74 |
-
smol_model = InferenceClientModel(
|
| 75 |
-
model_id="Qwen/Qwen2.5-Coder-32B-Instruct",
|
| 76 |
-
token=HF_TOKEN
|
| 77 |
-
)
|
| 78 |
-
|
| 79 |
-
@tool
|
| 80 |
-
def weather_tool(location: str) -> str:
|
| 81 |
-
"""Get current weather for any location.
|
| 82 |
-
Args:
|
| 83 |
-
location: The name of the city.
|
| 84 |
-
"""
|
| 85 |
-
geo_url = f"https://geocoding-api.open-meteo.com/v1/search?name={location}&count=1"
|
| 86 |
-
geo_res = requests.get(geo_url).json()
|
| 87 |
-
if not geo_res.get('results'): return "Location not found."
|
| 88 |
-
data = geo_res['results'][0]
|
| 89 |
-
w_url = f"https://api.open-meteo.com/v1/forecast?latitude={data['latitude']}&longitude={data['longitude']}¤t=temperature_2m"
|
| 90 |
-
w_res = requests.get(w_url).json()
|
| 91 |
-
return f"Temp in {location}: {w_res['current']['temperature_2m']}°C"
|
| 92 |
-
|
| 93 |
-
smol_agent = CodeAgent(
|
| 94 |
-
model=smol_model,
|
| 95 |
-
tools=[weather_tool, DuckDuckGoSearchTool()],
|
| 96 |
-
additional_authorized_imports=['requests', 'math', 'pytz']
|
| 97 |
)
|
| 98 |
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 115 |
|
| 116 |
if __name__ == "__main__":
|
| 117 |
demo.launch()
|
|
|
|
| 1 |
import os
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
import gradio as gr
|
|
|
|
|
|
|
|
|
|
| 3 |
from llama_index.core.agent import ReActAgent
|
| 4 |
from llama_index.core.tools import FunctionTool
|
| 5 |
from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
|
| 6 |
+
from smolagents import CodeAgent, HfApiModel, DuckDuckGoSearchTool
|
| 7 |
|
| 8 |
+
# 1. SETUP LLM
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 10 |
|
| 11 |
+
# --- LLAMAINDEX SETUP ---
|
|
|
|
|
|
|
| 12 |
li_llm = HuggingFaceInferenceAPI(
|
| 13 |
model_name="Qwen/Qwen2.5-7B-Instruct",
|
| 14 |
token=HF_TOKEN,
|
| 15 |
+
task="conversational"
|
|
|
|
| 16 |
)
|
| 17 |
|
| 18 |
+
# Define simple tools for LlamaIndex
|
| 19 |
+
def multiply(a: float, b: float) -> float:
|
| 20 |
+
"""Multiplies two numbers."""
|
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
return a * b
|
| 22 |
|
| 23 |
+
li_tools = [FunctionTool.from_defaults(fn=multiply)]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 24 |
|
| 25 |
+
# Fix: Ensure correct initialization
|
| 26 |
li_agent = ReActAgent.from_tools(
|
| 27 |
+
tools=li_tools,
|
| 28 |
llm=li_llm,
|
| 29 |
+
verbose=True
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 30 |
)
|
| 31 |
|
| 32 |
+
# --- SMOLAGENTS SETUP ---
|
| 33 |
+
smol_model = HfApiModel(model_id="Qwen/Qwen2.5-Coder-32B-Instruct", token=HF_TOKEN)
|
| 34 |
+
smol_agent = CodeAgent(tools=[DuckDuckGoSearchTool()], model=smol_model)
|
| 35 |
+
|
| 36 |
+
# 2. DEFINE WRAPPER FUNCTIONS FOR GRADIO
|
| 37 |
+
def chat_llama(message, history):
|
| 38 |
+
# LlamaIndex .chat() keeps its own history if using the same agent object
|
| 39 |
+
response = li_agent.chat(message)
|
| 40 |
+
return str(response)
|
| 41 |
+
|
| 42 |
+
def chat_smol(message, history):
|
| 43 |
+
# smolagents .run()
|
| 44 |
+
response = smol_agent.run(message)
|
| 45 |
+
return str(response)
|
| 46 |
+
|
| 47 |
+
# 3. COMBINE INTO TABS
|
| 48 |
+
with gr.Blocks() as demo:
|
| 49 |
+
gr.Markdown("# Dual Agent Interface: LlamaIndex vs Smolagents")
|
| 50 |
+
with gr.Tab("LlamaIndex (ReAct)"):
|
| 51 |
+
gr.ChatInterface(chat_llama)
|
| 52 |
+
with gr.Tab("Smolagents (Code)"):
|
| 53 |
+
gr.ChatInterface(chat_smol)
|
| 54 |
|
| 55 |
if __name__ == "__main__":
|
| 56 |
demo.launch()
|