Spaces:
Sleeping
Sleeping
File size: 4,727 Bytes
afc969d 0982499 0c3a810 0982499 e67b2df 0982499 afc969d 0982499 afc969d 0982499 0c3a810 e67b2df 0982499 e67b2df 0982499 e67b2df 0982499 e67b2df 0982499 510e2df 0982499 510e2df e67b2df 0982499 e67b2df 0c3a810 e67b2df 0c3a810 e67b2df 0c3a810 e67b2df 0c3a810 e67b2df 0c3a810 0463f36 0982499 e67b2df 0c3a810 0982499 e67b2df 0982499 e67b2df 0982499 e67b2df 435d8db e67b2df 0982499 e67b2df 435d8db e67b2df afc969d e67b2df | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 | import os
import gradio as gr
import datetime
import pytz
import asyncio
import requests
# Framework 1: LlamaIndex
from llama_index.core.agent.workflow import AgentWorkflow
from llama_index.core.tools import FunctionTool
from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
# Framework 2: smolagents
from smolagents import CodeAgent, DuckDuckGoSearchTool, tool, InferenceClientModel
# 0. SHARED CONFIG
HF_TOKEN = os.getenv("HF_TOKEN")
MODEL_ID = "Qwen/Qwen2.5-7B-Instruct"
def get_coordinates(location: str):
geo_url = f"https://geocoding-api.open-meteo.com/v1/search?name={location}&count=1&language=en&format=json"
geo_res = requests.get(geo_url).json()
if not geo_res.get("results"):
return None, None
res = geo_res["results"][0]
return res["latitude"], res["longitude"]
# ==========================================
# PART 1: LLAMAINDEX AGENT
# ==========================================
li_llm = HuggingFaceInferenceAPI(model_name=MODEL_ID, token=HF_TOKEN, provider="together")
def get_tokyo_time() -> str:
"""Returns the current time in Tokyo, Japan."""
tz = pytz.timezone('Asia/Tokyo')
return f"The current time in Tokyo is {datetime.datetime.now(tz).strftime('%H:%M:%S')}"
def get_weather(location: str) -> str:
"""
Get the real-time weather for a specific location.
Args:
location: The city name to check weather for.
"""
lat, lon = get_coordinates(location)
if lat is None:
return f"Could not find coordinates for {location}."
weather_url = f"https://api.open-meteo.com/v1/forecast?latitude={lat}&longitude={lon}¤t=temperature_2m,relative_humidity_2m,weather_code"
res = requests.get(weather_url).json()
current = res.get("current", {})
temp = current.get("temperature_2m")
hum = current.get("relative_humidity_2m")
return f"LlamaIndex: In {location}, it is currently {temp}°C with {hum}% humidity."
li_tools = [
FunctionTool.from_defaults(fn=get_tokyo_time),
FunctionTool.from_defaults(fn=get_weather) # Added weather tool here
]
li_agent = AgentWorkflow.from_tools_or_functions(li_tools, llm=li_llm)
async def chat_llama(message, history):
try:
result = await li_agent.run(user_msg=message)
return str(result)
except Exception as e:
return f"❌ LlamaIndex Error: {str(e)}"
# ==========================================
# PART 2: SMOLAGENTS
# ==========================================
smol_model = InferenceClientModel(model_id=MODEL_ID, token=HF_TOKEN, provider="together")
@tool
def weather_tool(location: str) -> str:
"""
Get the current real-time weather for a location.
Args:
location: The city and country, e.g., 'London, UK'.
"""
lat, lon = get_coordinates(location)
if lat is None:
return f"Sorry, I couldn't find the location: {location}"
weather_url = f"https://api.open-meteo.com/v1/forecast?latitude={lat}&longitude={lon}¤t=temperature_2m,wind_speed_10m"
res = requests.get(weather_url).json()
current = res.get("current", {})
temp = current.get("temperature_2m")
wind = current.get("wind_speed_10m")
return f"smolagents: The current temperature in {location} is {temp}°C with a wind speed of {wind} km/h."
smol_agent = CodeAgent(
model=smol_model,
tools=[weather_tool, DuckDuckGoSearchTool()],
add_base_tools=True
)
def chat_smol(message, history):
try:
response = smol_agent.run(message)
return str(response)
except Exception as e:
return f"❌ Smolagents Error: {str(e)}"
# ==========================================
# PART 3: GRADIO 6.0 UI
# ==========================================
with gr.Blocks() as demo:
gr.Markdown("# 🤖 Consolidated AI Agent Space", elem_id="main-title")
gr.Markdown(f"Currently using **{MODEL_ID}** via Together AI Provider.")
with gr.Tabs():
with gr.Tab("🏗️ LlamaIndex (Workflow)"):
gr.ChatInterface(
fn=chat_llama,
examples=["What is AI?", "What is the weather in Tokyo?", "What time is it in Japan?", "Translate hello to Franch", "Convert kg to g"]
)
with gr.Tab("💻 smolagents (CodeAgent)"):
gr.ChatInterface(
fn=chat_smol,
examples=["Search for the latest AI news", "How is the weather in Paris?", "What time is it in Korea?", "Translate hello to Japanese", "Convert kg to g"]
)
if __name__ == "__main__":
# Gradio 6.0: Move theme/css to launch()
demo.launch(
theme=gr.themes.Soft(),
css="#main-title { text-align: center; margin-bottom: 20px; }"
) |