Jovynne commited on
Commit
510e2df
·
verified ·
1 Parent(s): d0f5a39

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +79 -65
app.py CHANGED
@@ -1,91 +1,105 @@
1
  import os
2
- import gradio as gr
3
  import datetime
4
  import pytz
5
- import asyncio
6
-
7
- # Framework 1: LlamaIndex
8
- from llama_index.core.agent.workflow import AgentWorkflow
 
 
9
  from llama_index.core.tools import FunctionTool
10
  from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
11
 
12
- # Framework 2: smolagents
13
- from smolagents import CodeAgent, DuckDuckGoSearchTool, tool, InferenceClientModel
14
-
15
- # 0. SHARED CONFIG
16
- HF_TOKEN = os.getenv("HF_TOKEN")
17
-
18
- # ==========================================
19
- # PART 1: LLAMAINDEX AGENT
20
- # ==========================================
21
- li_llm = HuggingFaceInferenceAPI(
22
- model_name="Qwen/Qwen2.5-7B-Instruct",
23
- token=HF_TOKEN,
24
- )
25
-
26
- def get_tokyo_time() -> str:
27
- """Returns the current time in Tokyo, Japan."""
28
- tz = pytz.timezone('Asia/Tokyo')
29
- return f"The current time in Tokyo is {datetime.datetime.now(tz).strftime('%H:%M:%S')}"
30
 
31
- li_tools = [FunctionTool.from_defaults(fn=get_tokyo_time)]
32
-
33
- # Using positional argument for tools list to avoid TypeError
34
- li_agent = AgentWorkflow.from_tools_or_functions(
35
- li_tools,
36
- llm=li_llm,
37
- )
38
-
39
- async def chat_llama(message, history):
40
  try:
41
- # AgentWorkflow requires async execution
42
- result = await li_agent.run(user_msg=message)
43
- return str(result)
44
  except Exception as e:
45
- return f"LlamaIndex Error: {str(e)}"
46
 
47
- # ==========================================
48
- # PART 2: SMOLAGENTS
49
- # ==========================================
50
- smol_model = InferenceClientModel(
51
- model_id="Qwen/Qwen2.5-Coder-32B-Instruct",
52
- token=HF_TOKEN
53
- )
 
 
 
 
 
 
54
 
55
  @tool
56
  def weather_tool(location: str) -> str:
57
- """Get the current weather for a location.
 
58
  Args:
59
- location: The city name.
60
  """
61
- return f"The weather in {location} is currently sunny and 22°C."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
 
63
- # Tool initialization happens here; ensure ddgs is in requirements.txt
64
  smol_agent = CodeAgent(
65
  model=smol_model,
66
- tools=[weather_tool, DuckDuckGoSearchTool()]
 
67
  )
68
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
  def chat_smol(message, history):
70
- try:
71
- # CodeAgent .run() is synchronous
72
- response = smol_agent.run(message)
73
- return str(response)
74
- except Exception as e:
75
- return f"Smolagents Error: {str(e)}"
76
 
77
- # ==========================================
78
- # PART 3: UNIFIED GRADIO UI
79
- # ==========================================
80
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
81
- gr.Markdown("# 🤖 Dual-Agent Testing Space")
82
- gr.Markdown("Comparing **Event-Driven Workflows** (LlamaIndex) vs **Code Execution** (smolagents).")
83
-
84
- with gr.Tab("LlamaIndex (Workflow)"):
85
- gr.ChatInterface(fn=chat_llama)
86
-
87
- with gr.Tab("smolagents (CodeAgent)"):
88
- gr.ChatInterface(fn=chat_smol)
89
 
90
  if __name__ == "__main__":
91
  demo.launch()
 
1
  import os
 
2
  import datetime
3
  import pytz
4
+ import math
5
+ import requests
6
+ import gradio as gr
7
+ from smolagents import CodeAgent, DuckDuckGoSearchTool, tool, InferenceClientModel
8
+ from deep_translator import GoogleTranslator
9
+ from llama_index.core.agent import ReActAgent
10
  from llama_index.core.tools import FunctionTool
11
  from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
12
 
13
+ # ============ 1. SMOLAGENTS TOOLS ============
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
+ @tool
16
+ def time_tool(timezone: str = "UTC") -> str:
17
+ """
18
+ Get current time in a specific timezone.
19
+ Args:
20
+ timezone: The timezone to check (e.g., 'Asia/Tokyo').
21
+ """
 
 
22
  try:
23
+ tz = pytz.timezone(timezone)
24
+ now = datetime.datetime.now(tz)
25
+ return f"⏰ Time in {timezone}: {now.strftime('%Y-%m-%d %H:%M:%S')}"
26
  except Exception as e:
27
+ return f"Error: {str(e)}"
28
 
29
+ @tool
30
+ def translator_tool(text: str, target_language: str) -> str:
31
+ """
32
+ Translates text into a specified language.
33
+ Args:
34
+ text: The text or phrase to translate.
35
+ target_language: The destination language (e.g., 'french', 'german').
36
+ """
37
+ try:
38
+ translation = GoogleTranslator(source='auto', target=target_language).translate(text)
39
+ return f"Translated to {target_language.title()}: {translation}"
40
+ except Exception as e:
41
+ return f"Translation error: {str(e)}"
42
 
43
  @tool
44
  def weather_tool(location: str) -> str:
45
+ """
46
+ Get the current weather for any location worldwide.
47
  Args:
48
+ location: The name of the city or place (e.g., 'London' or 'Tokyo').
49
  """
50
+ try:
51
+ geo_url = f"https://geocoding-api.open-meteo.com/v1/search?name={location}&count=1&language=en&format=json"
52
+ geo_res = requests.get(geo_url).json()
53
+ if not geo_res.get('results'): return "Location not found."
54
+ data = geo_res['results'][0]
55
+ weather_url = f"https://api.open-meteo.com/v1/forecast?latitude={data['latitude']}&longitude={data['longitude']}&current=temperature_2m"
56
+ w_res = requests.get(weather_url).json()
57
+ return f"🌤️ {data['name']}: {w_res['current']['temperature_2m']}°C"
58
+ except Exception as e:
59
+ return f"Error: {str(e)}"
60
+
61
+ # ============ AGENT CONFIGURATION ============
62
+
63
+ HF_TOKEN = os.getenv("HF_TOKEN", "")
64
+ smol_model = InferenceClientModel(model_id="Qwen/Qwen2.5-Coder-32B-Instruct", token=HF_TOKEN)
65
 
 
66
  smol_agent = CodeAgent(
67
  model=smol_model,
68
+ tools=[DuckDuckGoSearchTool(), time_tool, translator_tool, weather_tool],
69
+ max_steps=5
70
  )
71
 
72
+ # LlamaIndex Setup
73
+ llama_llm = HuggingFaceInferenceAPI(
74
+ model_name="Qwen/Qwen2.5-7B-Instruct",
75
+ token=HF_TOKEN,
76
+ task="conversational"
77
+ )
78
+
79
+ llama_agent = ReActAgent.from_tools(
80
+ [
81
+ FunctionTool.from_defaults(fn=lambda: f"Tokyo: {datetime.datetime.now(pytz.timezone('Asia/Tokyo'))}", name="get_tokyo_time"),
82
+ FunctionTool.from_defaults(fn=lambda a, b: a * b, name="multiply")
83
+ ],
84
+ llm=llama_llm,
85
+ verbose=True
86
+ )
87
+
88
+ # ============ GRADIO UI ============
89
+
90
  def chat_smol(message, history):
91
+ return str(smol_agent.run(message))
92
+
93
+ def chat_llama(message, history):
94
+ response = llama_agent.chat(message)
95
+ return str(response)
 
96
 
 
 
 
97
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
98
+ gr.Markdown("# 🤖 Multi-Framework AI Agent Space")
99
+ with gr.Tab("Smolagents (Alfred)"):
100
+ gr.ChatInterface(chat_smol)
101
+ with gr.Tab("LlamaIndex (ReAct)"):
102
+ gr.ChatInterface(chat_llama)
 
 
 
103
 
104
  if __name__ == "__main__":
105
  demo.launch()