Jovynne commited on
Commit
e67b2df
·
verified ·
1 Parent(s): 2427ca2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +73 -30
app.py CHANGED
@@ -3,6 +3,7 @@ import gradio as gr
3
  import datetime
4
  import pytz
5
  import asyncio
 
6
 
7
  # Framework 1: LlamaIndex
8
  from llama_index.core.agent.workflow import AgentWorkflow
@@ -14,57 +15,88 @@ from smolagents import CodeAgent, DuckDuckGoSearchTool, tool, InferenceClientMod
14
 
15
  # 0. SHARED CONFIG
16
  HF_TOKEN = os.getenv("HF_TOKEN")
17
- # 7B is the sweet spot for free serverless inference in 2026
18
  MODEL_ID = "Qwen/Qwen2.5-7B-Instruct"
19
 
 
 
 
 
 
 
 
 
20
  # ==========================================
21
  # PART 1: LLAMAINDEX AGENT
22
  # ==========================================
23
- li_llm = HuggingFaceInferenceAPI(
24
- model_name=MODEL_ID,
25
- token=HF_TOKEN,
26
- provider="together"
27
- )
28
 
29
  def get_tokyo_time() -> str:
30
  """Returns the current time in Tokyo, Japan."""
31
  tz = pytz.timezone('Asia/Tokyo')
32
  return f"The current time in Tokyo is {datetime.datetime.now(tz).strftime('%H:%M:%S')}"
33
 
34
- li_tools = [FunctionTool.from_defaults(fn=get_tokyo_time)]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
 
36
- li_agent = AgentWorkflow.from_tools_or_functions(
37
- li_tools,
38
- llm=li_llm,
39
- )
 
 
40
 
41
  async def chat_llama(message, history):
42
  try:
43
  result = await li_agent.run(user_msg=message)
44
  return str(result)
45
  except Exception as e:
46
- return f"LlamaIndex Error: {str(e)}"
47
 
48
  # ==========================================
49
  # PART 2: SMOLAGENTS
50
  # ==========================================
51
- smol_model = InferenceClientModel(
52
- model_id=MODEL_ID,
53
- token=HF_TOKEN,
54
- provider="together"
55
- )
56
 
57
  @tool
58
  def weather_tool(location: str) -> str:
59
- """Get the current weather for a location.
 
60
  Args:
61
- location: The city name.
62
  """
63
- return f"The weather in {location} is currently sunny and 22°C."
 
 
 
 
 
 
 
 
 
 
 
64
 
65
  smol_agent = CodeAgent(
66
  model=smol_model,
67
- tools=[weather_tool, DuckDuckGoSearchTool()]
 
68
  )
69
 
70
  def chat_smol(message, history):
@@ -72,20 +104,31 @@ def chat_smol(message, history):
72
  response = smol_agent.run(message)
73
  return str(response)
74
  except Exception as e:
75
- return f"Smolagents Error: {str(e)}"
76
 
77
  # ==========================================
78
- # PART 3: UNIFIED GRADIO UI
79
  # ==========================================
80
- with gr.Blocks(theme=gr.themes.Soft()) as demo:
81
- gr.Markdown("# 🤖 Consolidated AI Agent Space")
82
  gr.Markdown(f"Currently using **{MODEL_ID}** via Together AI Provider.")
83
 
84
- with gr.Tab("LlamaIndex (Workflow)"):
85
- gr.ChatInterface(fn=chat_llama)
 
 
 
 
86
 
87
- with gr.Tab("smolagents (CodeAgent)"):
88
- gr.ChatInterface(fn=chat_smol)
 
 
 
89
 
90
  if __name__ == "__main__":
91
- demo.launch()
 
 
 
 
 
3
  import datetime
4
  import pytz
5
  import asyncio
6
+ import requests
7
 
8
  # Framework 1: LlamaIndex
9
  from llama_index.core.agent.workflow import AgentWorkflow
 
15
 
16
  # 0. SHARED CONFIG
17
  HF_TOKEN = os.getenv("HF_TOKEN")
 
18
  MODEL_ID = "Qwen/Qwen2.5-7B-Instruct"
19
 
20
+ def get_coordinates(location: str):
21
+ geo_url = f"https://geocoding-api.open-meteo.com/v1/search?name={location}&count=1&language=en&format=json"
22
+ geo_res = requests.get(geo_url).json()
23
+ if not geo_res.get("results"):
24
+ return None, None
25
+ res = geo_res["results"][0]
26
+ return res["latitude"], res["longitude"]
27
+
28
  # ==========================================
29
  # PART 1: LLAMAINDEX AGENT
30
  # ==========================================
31
+ li_llm = HuggingFaceInferenceAPI(model_name=MODEL_ID, token=HF_TOKEN, provider="together")
 
 
 
 
32
 
33
  def get_tokyo_time() -> str:
34
  """Returns the current time in Tokyo, Japan."""
35
  tz = pytz.timezone('Asia/Tokyo')
36
  return f"The current time in Tokyo is {datetime.datetime.now(tz).strftime('%H:%M:%S')}"
37
 
38
+ def get_weather(location: str) -> str:
39
+ """
40
+ Get the real-time weather for a specific location.
41
+ Args:
42
+ location: The city name to check weather for.
43
+ """
44
+ lat, lon = get_coordinates(location)
45
+ if lat is None:
46
+ return f"Could not find coordinates for {location}."
47
+
48
+ weather_url = f"https://api.open-meteo.com/v1/forecast?latitude={lat}&longitude={lon}&current=temperature_2m,relative_humidity_2m,weather_code"
49
+ res = requests.get(weather_url).json()
50
+
51
+ current = res.get("current", {})
52
+ temp = current.get("temperature_2m")
53
+ hum = current.get("relative_humidity_2m")
54
+
55
+ return f"LlamaIndex: In {location}, it is currently {temp}°C with {hum}% humidity."
56
 
57
+ li_tools = [
58
+ FunctionTool.from_defaults(fn=get_tokyo_time),
59
+ FunctionTool.from_defaults(fn=get_weather) # Added weather tool here
60
+ ]
61
+
62
+ li_agent = AgentWorkflow.from_tools_or_functions(li_tools, llm=li_llm)
63
 
64
  async def chat_llama(message, history):
65
  try:
66
  result = await li_agent.run(user_msg=message)
67
  return str(result)
68
  except Exception as e:
69
+ return f"LlamaIndex Error: {str(e)}"
70
 
71
  # ==========================================
72
  # PART 2: SMOLAGENTS
73
  # ==========================================
74
+ smol_model = InferenceClientModel(model_id=MODEL_ID, token=HF_TOKEN, provider="together")
 
 
 
 
75
 
76
  @tool
77
  def weather_tool(location: str) -> str:
78
+ """
79
+ Get the current real-time weather for a location.
80
  Args:
81
+ location: The city and country, e.g., 'London, UK'.
82
  """
83
+ lat, lon = get_coordinates(location)
84
+ if lat is None:
85
+ return f"Sorry, I couldn't find the location: {location}"
86
+
87
+ weather_url = f"https://api.open-meteo.com/v1/forecast?latitude={lat}&longitude={lon}&current=temperature_2m,wind_speed_10m"
88
+ res = requests.get(weather_url).json()
89
+
90
+ current = res.get("current", {})
91
+ temp = current.get("temperature_2m")
92
+ wind = current.get("wind_speed_10m")
93
+
94
+ return f"smolagents: The current temperature in {location} is {temp}°C with a wind speed of {wind} km/h."
95
 
96
  smol_agent = CodeAgent(
97
  model=smol_model,
98
+ tools=[weather_tool, DuckDuckGoSearchTool()],
99
+ add_base_tools=True
100
  )
101
 
102
  def chat_smol(message, history):
 
104
  response = smol_agent.run(message)
105
  return str(response)
106
  except Exception as e:
107
+ return f"Smolagents Error: {str(e)}"
108
 
109
  # ==========================================
110
+ # PART 3: GRADIO 6.0 UI
111
  # ==========================================
112
+ with gr.Blocks() as demo:
113
+ gr.Markdown("# 🤖 Consolidated AI Agent Space", elem_id="main-title")
114
  gr.Markdown(f"Currently using **{MODEL_ID}** via Together AI Provider.")
115
 
116
+ with gr.Tabs():
117
+ with gr.Tab("🏗️ LlamaIndex (Workflow)"):
118
+ gr.ChatInterface(
119
+ fn=chat_llama,
120
+ examples=["What's the weather in Tokyo?", "What time is it in Japan?"]
121
+ )
122
 
123
+ with gr.Tab("💻 smolagents (CodeAgent)"):
124
+ gr.ChatInterface(
125
+ fn=chat_smol,
126
+ examples=["Search for the latest AI news", "How is the weather in Paris?"]
127
+ )
128
 
129
  if __name__ == "__main__":
130
+ # Gradio 6.0: Move theme/css to launch()
131
+ demo.launch(
132
+ theme=gr.themes.Soft(),
133
+ css="#main-title { text-align: center; margin-bottom: 20px; }"
134
+ )