Wayne0102 commited on
Commit
052135c
·
verified ·
1 Parent(s): 7e4f688

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +99 -70
app.py CHANGED
@@ -3,103 +3,132 @@ import gradio as gr
3
  import datetime
4
  import pytz
5
  import asyncio
6
- from gradio import ChatMessage
7
 
8
- # Frameworks
9
  from llama_index.core.agent.workflow import AgentWorkflow
10
  from llama_index.core.tools import FunctionTool
11
  from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
 
 
12
  from smolagents import CodeAgent, DuckDuckGoSearchTool, tool, InferenceClientModel
13
 
14
- # 0. CONFIG
15
  HF_TOKEN = os.getenv("HF_TOKEN")
16
  MODEL_ID = "Qwen/Qwen2.5-7B-Instruct"
17
 
 
 
 
 
 
 
 
 
18
  # ==========================================
19
- # AGENT SETUP
20
  # ==========================================
21
  li_llm = HuggingFaceInferenceAPI(model_name=MODEL_ID, token=HF_TOKEN, provider="together")
22
 
 
 
 
 
 
23
  def get_weather(location: str) -> str:
24
- """Get the current weather. Args: location: city name."""
25
- return f"The weather in {location} is currently sunny and 22°C."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
 
27
- li_agent = AgentWorkflow.from_tools_or_functions(
28
- [FunctionTool.from_defaults(fn=get_weather)],
29
- llm=li_llm
30
- )
31
 
32
- smol_model = InferenceClientModel(model_id=MODEL_ID, token=HF_TOKEN, provider="together")
33
- smol_agent = CodeAgent(model=smol_model, tools=[DuckDuckGoSearchTool()])
 
 
 
 
 
 
34
 
35
  # ==========================================
36
- # UI LOGIC
37
  # ==========================================
38
- async def chat_llama(message, history):
39
- result = await li_agent.run(user_msg=message)
40
- return str(result)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
 
42
  def chat_smol(message, history):
43
- # Yield a 'Thinking' status using ChatMessage metadata
44
- yield [
45
- ChatMessage(role="user", content=message),
46
- ChatMessage(
47
- role="assistant",
48
- content="Agent is searching and executing Python code...",
49
- metadata={"title": "🔍 Step 1: Thinking", "status": "pending"}
50
- )
51
- ]
52
  try:
53
  response = smol_agent.run(message)
54
- yield [
55
- ChatMessage(role="user", content=message),
56
- ChatMessage(role="assistant", content=str(response))
57
- ]
58
  except Exception as e:
59
- yield [ChatMessage(role="assistant", content=f"Error: {str(e)}")]
60
 
61
  # ==========================================
62
- # GRADIO 6.0 STABLE UI
63
  # ==========================================
64
- with gr.Blocks(fill_height=True) as demo:
65
- # Sidebar
66
- with gr.Sidebar():
67
- gr.Markdown("# ⚙️ Control Panel")
68
- gr.HTML("<hr style='border: 0.5px solid #e5e7eb;'>") # Manual divider
69
-
70
- gr.Markdown("### 🤖 Framework Status")
71
- gr.Markdown("- **LlamaIndex**: Ready")
72
- gr.Markdown("- **smolagents**: Ready")
73
-
74
- gr.HTML("<hr style='border: 0.5px solid #e5e7eb;'>")
75
- gr.Info("Use LlamaIndex for structured logic and smolagents for web searches.")
76
-
77
- # Main Content
78
- with gr.Column():
79
- gr.Markdown(f"# 🤖 Multi-Agent Hub")
80
 
81
- with gr.Tabs():
82
- # LlamaIndex Tab
83
- with gr.Tab("🏗️ LlamaIndex (Workflow)"):
84
- gr.ChatInterface(
85
- fn=chat_llama,
86
- examples=["How is the weather in Tokyo?"]
87
- )
88
-
89
- # smolagents Tab
90
- with gr.Tab("💻 smolagents (CodeAgent)"):
91
- chatbot = gr.Chatbot(
92
- label="Code Agent",
93
- type="messages",
94
- avatar_images=(None, "https://raw.githubusercontent.com/huggingface/smolagents/main/docs/source/imgs/smolagents_logo.png")
95
- )
96
- with gr.Row():
97
- msg = gr.Textbox(placeholder="Ask me to search or run code...", show_label=False, scale=4)
98
- submit = gr.Button("Send", variant="primary", scale=1)
99
-
100
- # Logic for custom chatbot behavior
101
- submit.click(chat_smol, [msg, chatbot], [chatbot])
102
- submit.click(lambda: "", None, [msg])
103
 
104
  if __name__ == "__main__":
105
- demo.launch(theme=gr.themes.Soft())
 
 
 
 
 
3
  import datetime
4
  import pytz
5
  import asyncio
6
+ import requests
7
 
8
+ # Framework 1: LlamaIndex
9
  from llama_index.core.agent.workflow import AgentWorkflow
10
  from llama_index.core.tools import FunctionTool
11
  from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
12
+
13
+ # Framework 2: smolagents
14
  from smolagents import CodeAgent, DuckDuckGoSearchTool, tool, InferenceClientModel
15
 
16
+ # 0. SHARED CONFIG
17
  HF_TOKEN = os.getenv("HF_TOKEN")
18
  MODEL_ID = "Qwen/Qwen2.5-7B-Instruct"
19
 
20
+ def get_coordinates(location: str):
21
+ geo_url = f"https://geocoding-api.open-meteo.com/v1/search?name={location}&count=1&language=en&format=json"
22
+ geo_res = requests.get(geo_url).json()
23
+ if not geo_res.get("results"):
24
+ return None, None
25
+ res = geo_res["results"][0]
26
+ return res["latitude"], res["longitude"]
27
+
28
  # ==========================================
29
+ # PART 1: LLAMAINDEX AGENT
30
  # ==========================================
31
  li_llm = HuggingFaceInferenceAPI(model_name=MODEL_ID, token=HF_TOKEN, provider="together")
32
 
33
+ def get_tokyo_time() -> str:
34
+ """Returns the current time in Tokyo, Japan."""
35
+ tz = pytz.timezone('Asia/Tokyo')
36
+ return f"The current time in Tokyo is {datetime.datetime.now(tz).strftime('%H:%M:%S')}"
37
+
38
  def get_weather(location: str) -> str:
39
+ """
40
+ Get the real-time weather for a specific location.
41
+ Args:
42
+ location: The city name to check weather for.
43
+ """
44
+ lat, lon = get_coordinates(location)
45
+ if lat is None:
46
+ return f"Could not find coordinates for {location}."
47
+
48
+ weather_url = f"https://api.open-meteo.com/v1/forecast?latitude={lat}&longitude={lon}&current=temperature_2m,relative_humidity_2m,weather_code"
49
+ res = requests.get(weather_url).json()
50
+
51
+ current = res.get("current", {})
52
+ temp = current.get("temperature_2m")
53
+ hum = current.get("relative_humidity_2m")
54
+
55
+ return f"LlamaIndex: In {location}, it is currently {temp}°C with {hum}% humidity."
56
 
57
+ li_tools = [
58
+ FunctionTool.from_defaults(fn=get_tokyo_time),
59
+ FunctionTool.from_defaults(fn=get_weather) # Added weather tool here
60
+ ]
61
 
62
+ li_agent = AgentWorkflow.from_tools_or_functions(li_tools, llm=li_llm)
63
+
64
+ async def chat_llama(message, history):
65
+ try:
66
+ result = await li_agent.run(user_msg=message)
67
+ return str(result)
68
+ except Exception as e:
69
+ return f"❌ LlamaIndex Error: {str(e)}"
70
 
71
  # ==========================================
72
+ # PART 2: SMOLAGENTS
73
  # ==========================================
74
+ smol_model = InferenceClientModel(model_id=MODEL_ID, token=HF_TOKEN, provider="together")
75
+
76
+ @tool
77
+ def weather_tool(location: str) -> str:
78
+ """
79
+ Get the current real-time weather for a location.
80
+ Args:
81
+ location: The city and country, e.g., 'London, UK'.
82
+ """
83
+ lat, lon = get_coordinates(location)
84
+ if lat is None:
85
+ return f"Sorry, I couldn't find the location: {location}"
86
+
87
+ weather_url = f"https://api.open-meteo.com/v1/forecast?latitude={lat}&longitude={lon}&current=temperature_2m,wind_speed_10m"
88
+ res = requests.get(weather_url).json()
89
+
90
+ current = res.get("current", {})
91
+ temp = current.get("temperature_2m")
92
+ wind = current.get("wind_speed_10m")
93
+
94
+ return f"smolagents: The current temperature in {location} is {temp}°C with a wind speed of {wind} km/h."
95
+
96
+ smol_agent = CodeAgent(
97
+ model=smol_model,
98
+ tools=[weather_tool, DuckDuckGoSearchTool()],
99
+ add_base_tools=True
100
+ )
101
 
102
  def chat_smol(message, history):
 
 
 
 
 
 
 
 
 
103
  try:
104
  response = smol_agent.run(message)
105
+ return str(response)
 
 
 
106
  except Exception as e:
107
+ return f"❌ Smolagents Error: {str(e)}"
108
 
109
  # ==========================================
110
+ # PART 3: GRADIO 6.0 UI
111
  # ==========================================
112
+ with gr.Blocks() as demo:
113
+ gr.Markdown("# 🤖 Consolidated AI Agent Space", elem_id="main-title")
114
+ gr.Markdown(f"Currently using **{MODEL_ID}** via Together AI Provider.")
115
+
116
+ with gr.Tabs():
117
+ with gr.Tab("🏗️ LlamaIndex (Workflow)"):
118
+ gr.ChatInterface(
119
+ fn=chat_llama,
120
+ examples=["What's the weather in Tokyo?", "What time is it in Japan?"]
121
+ )
 
 
 
 
 
 
122
 
123
+ with gr.Tab("💻 smolagents (CodeAgent)"):
124
+ gr.ChatInterface(
125
+ fn=chat_smol,
126
+ examples=["Search for the latest AI news", "How is the weather in Paris?"]
127
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128
 
129
  if __name__ == "__main__":
130
+ # Gradio 6.0: Move theme/css to launch()
131
+ demo.launch(
132
+ theme=gr.themes.Soft(),
133
+ css="#main-title { text-align: center; margin-bottom: 20px; }"
134
+ )