Spaces:
Sleeping
Sleeping
| import os | |
| import gradio as gr | |
| import datetime | |
| import pytz | |
| import asyncio | |
| import requests | |
| # Framework 1: LlamaIndex | |
| from llama_index.core.agent.workflow import AgentWorkflow | |
| from llama_index.core.tools import FunctionTool | |
| from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI | |
| # Framework 2: smolagents | |
| from smolagents import CodeAgent, DuckDuckGoSearchTool, tool, InferenceClientModel | |
| # 0. SHARED CONFIG | |
| HF_TOKEN = os.getenv("HF_TOKEN") | |
| MODEL_ID = "Qwen/Qwen2.5-7B-Instruct" | |
| def get_coordinates(location: str): | |
| geo_url = f"https://geocoding-api.open-meteo.com/v1/search?name={location}&count=1&language=en&format=json" | |
| geo_res = requests.get(geo_url).json() | |
| if not geo_res.get("results"): | |
| return None, None | |
| res = geo_res["results"][0] | |
| return res["latitude"], res["longitude"] | |
| # ========================================== | |
| # PART 1: LLAMAINDEX AGENT | |
| # ========================================== | |
| li_llm = HuggingFaceInferenceAPI(model_name=MODEL_ID, token=HF_TOKEN, provider="together") | |
| def get_tokyo_time() -> str: | |
| """Returns the current time in Tokyo, Japan.""" | |
| tz = pytz.timezone('Asia/Tokyo') | |
| return f"The current time in Tokyo is {datetime.datetime.now(tz).strftime('%H:%M:%S')}" | |
| def get_weather(location: str) -> str: | |
| """ | |
| Get the real-time weather for a specific location. | |
| Args: | |
| location: The city name to check weather for. | |
| """ | |
| lat, lon = get_coordinates(location) | |
| if lat is None: | |
| return f"Could not find coordinates for {location}." | |
| weather_url = f"https://api.open-meteo.com/v1/forecast?latitude={lat}&longitude={lon}¤t=temperature_2m,relative_humidity_2m,weather_code" | |
| res = requests.get(weather_url).json() | |
| current = res.get("current", {}) | |
| temp = current.get("temperature_2m") | |
| hum = current.get("relative_humidity_2m") | |
| return f"LlamaIndex: In {location}, it is currently {temp}°C with {hum}% humidity." | |
| li_tools = [ | |
| FunctionTool.from_defaults(fn=get_tokyo_time), | |
| FunctionTool.from_defaults(fn=get_weather) # Added weather tool here | |
| ] | |
| li_agent = AgentWorkflow.from_tools_or_functions(li_tools, llm=li_llm) | |
| async def chat_llama(message, history): | |
| try: | |
| result = await li_agent.run(user_msg=message) | |
| return str(result) | |
| except Exception as e: | |
| return f"❌ LlamaIndex Error: {str(e)}" | |
| # ========================================== | |
| # PART 2: SMOLAGENTS | |
| # ========================================== | |
| smol_model = InferenceClientModel(model_id=MODEL_ID, token=HF_TOKEN, provider="together") | |
| def weather_tool(location: str) -> str: | |
| """ | |
| Get the current real-time weather for a location. | |
| Args: | |
| location: The city and country, e.g., 'London, UK'. | |
| """ | |
| lat, lon = get_coordinates(location) | |
| if lat is None: | |
| return f"Sorry, I couldn't find the location: {location}" | |
| weather_url = f"https://api.open-meteo.com/v1/forecast?latitude={lat}&longitude={lon}¤t=temperature_2m,wind_speed_10m" | |
| res = requests.get(weather_url).json() | |
| current = res.get("current", {}) | |
| temp = current.get("temperature_2m") | |
| wind = current.get("wind_speed_10m") | |
| return f"smolagents: The current temperature in {location} is {temp}°C with a wind speed of {wind} km/h." | |
| smol_agent = CodeAgent( | |
| model=smol_model, | |
| tools=[weather_tool, DuckDuckGoSearchTool()], | |
| add_base_tools=True | |
| ) | |
| def chat_smol(message, history): | |
| try: | |
| response = smol_agent.run(message) | |
| return str(response) | |
| except Exception as e: | |
| return f"❌ Smolagents Error: {str(e)}" | |
| # ========================================== | |
| # PART 3: GRADIO 6.0 UI | |
| # ========================================== | |
| with gr.Blocks() as demo: | |
| gr.Markdown("# 🤖 Consolidated AI Agent Space", elem_id="main-title") | |
| gr.Markdown(f"Currently using **{MODEL_ID}** via Together AI Provider.") | |
| with gr.Tabs(): | |
| with gr.Tab("🏗️ LlamaIndex (Workflow)"): | |
| gr.ChatInterface( | |
| fn=chat_llama, | |
| examples=["What's the weather in Tokyo?", "What time is it in Japan?"] | |
| ) | |
| with gr.Tab("💻 smolagents (CodeAgent)"): | |
| gr.ChatInterface( | |
| fn=chat_smol, | |
| examples=["Search for the latest AI news", "How is the weather in Paris?"] | |
| ) | |
| if __name__ == "__main__": | |
| # Gradio 6.0: Move theme/css to launch() | |
| demo.launch( | |
| theme=gr.themes.Soft(), | |
| css="#main-title { text-align: center; margin-bottom: 20px; }" | |
| ) |